Loading kernel/workqueue.c +21 −12 Original line number Diff line number Diff line Loading @@ -1976,6 +1976,23 @@ static struct worker *create_worker(struct worker_pool *pool) return NULL; } static void unbind_worker(struct worker *worker) { lockdep_assert_held(&wq_pool_attach_mutex); kthread_set_per_cpu(worker->task, -1); if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); else WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); } static void rebind_worker(struct worker *worker, struct worker_pool *pool) { kthread_set_per_cpu(worker->task, pool->cpu); WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); } /** * destroy_worker - destroy a workqueue worker * @worker: worker to be destroyed Loading Loading @@ -5051,13 +5068,8 @@ static void unbind_workers(int cpu) raw_spin_unlock_irq(&pool->lock); for_each_pool_worker(worker, pool) { kthread_set_per_cpu(worker->task, -1); if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); else WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); } for_each_pool_worker(worker, pool) unbind_worker(worker); mutex_unlock(&wq_pool_attach_mutex); } Loading @@ -5082,11 +5094,8 @@ static void rebind_workers(struct worker_pool *pool) * of all workers first and then clear UNBOUND. As we're called * from CPU_ONLINE, the following shouldn't fail. */ for_each_pool_worker(worker, pool) { kthread_set_per_cpu(worker->task, pool->cpu); WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); } for_each_pool_worker(worker, pool) rebind_worker(worker, pool); raw_spin_lock_irq(&pool->lock); Loading Loading
kernel/workqueue.c +21 −12 Original line number Diff line number Diff line Loading @@ -1976,6 +1976,23 @@ static struct worker *create_worker(struct worker_pool *pool) return NULL; } static void unbind_worker(struct worker *worker) { lockdep_assert_held(&wq_pool_attach_mutex); kthread_set_per_cpu(worker->task, -1); if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); else WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); } static void rebind_worker(struct worker *worker, struct worker_pool *pool) { kthread_set_per_cpu(worker->task, pool->cpu); WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); } /** * destroy_worker - destroy a workqueue worker * @worker: worker to be destroyed Loading Loading @@ -5051,13 +5068,8 @@ static void unbind_workers(int cpu) raw_spin_unlock_irq(&pool->lock); for_each_pool_worker(worker, pool) { kthread_set_per_cpu(worker->task, -1); if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask)) WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0); else WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0); } for_each_pool_worker(worker, pool) unbind_worker(worker); mutex_unlock(&wq_pool_attach_mutex); } Loading @@ -5082,11 +5094,8 @@ static void rebind_workers(struct worker_pool *pool) * of all workers first and then clear UNBOUND. As we're called * from CPU_ONLINE, the following shouldn't fail. */ for_each_pool_worker(worker, pool) { kthread_set_per_cpu(worker->task, pool->cpu); WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0); } for_each_pool_worker(worker, pool) rebind_worker(worker, pool); raw_spin_lock_irq(&pool->lock); Loading