Loading kernel/workqueue.c +21 −1 Original line number Diff line number Diff line Loading @@ -864,8 +864,17 @@ void wq_worker_running(struct task_struct *task) if (!worker->sleeping) return; /* * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check * and the nr_running increment below, we may ruin the nr_running reset * and leave with an unexpected pool->nr_running == 1 on the newly unbound * pool. Protect against such race. */ preempt_disable(); if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); preempt_enable(); worker->sleeping = 0; } Loading Loading @@ -898,6 +907,16 @@ void wq_worker_sleeping(struct task_struct *task) worker->sleeping = 1; raw_spin_lock_irq(&pool->lock); /* * Recheck in case unbind_workers() preempted us. We don't * want to decrement nr_running after the worker is unbound * and nr_running has been reset. */ if (worker->flags & WORKER_NOT_RUNNING) { raw_spin_unlock_irq(&pool->lock); return; } /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). Loading Loading @@ -1526,7 +1545,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, * @work: work to queue * * We queue the work to a specific CPU, the caller must ensure it * can't go away. * can't go away. Callers that fail to ensure that the specified * CPU cannot go away will execute on a randomly chosen CPU. * * Return: %false if @work was already on a queue, %true otherwise. */ Loading Loading
kernel/workqueue.c +21 −1 Original line number Diff line number Diff line Loading @@ -864,8 +864,17 @@ void wq_worker_running(struct task_struct *task) if (!worker->sleeping) return; /* * If preempted by unbind_workers() between the WORKER_NOT_RUNNING check * and the nr_running increment below, we may ruin the nr_running reset * and leave with an unexpected pool->nr_running == 1 on the newly unbound * pool. Protect against such race. */ preempt_disable(); if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); preempt_enable(); worker->sleeping = 0; } Loading Loading @@ -898,6 +907,16 @@ void wq_worker_sleeping(struct task_struct *task) worker->sleeping = 1; raw_spin_lock_irq(&pool->lock); /* * Recheck in case unbind_workers() preempted us. We don't * want to decrement nr_running after the worker is unbound * and nr_running has been reset. */ if (worker->flags & WORKER_NOT_RUNNING) { raw_spin_unlock_irq(&pool->lock); return; } /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). Loading Loading @@ -1526,7 +1545,8 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, * @work: work to queue * * We queue the work to a specific CPU, the caller must ensure it * can't go away. * can't go away. Callers that fail to ensure that the specified * CPU cannot go away will execute on a randomly chosen CPU. * * Return: %false if @work was already on a queue, %true otherwise. */ Loading