Commit 78383162 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull workqueue updates from Tejun Heo:
 "Nothing major. Just follow-up cleanups from Lai after the earlier
  synchronization simplification"

* 'for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: Convert the type of pool->nr_running to int
  workqueue: Use wake_up_worker() in wq_worker_sleeping() instead of open code
  workqueue: Change the comments of the synchronization about the idle_list
  workqueue: Remove the mb() pair between wq_worker_sleeping() and insert_work()
parents c5c009e2 bc35f7ef
Loading
Loading
Loading
Loading
+20 −38
Original line number Diff line number Diff line
@@ -154,15 +154,20 @@ struct worker_pool {

	unsigned long		watchdog_ts;	/* L: watchdog timestamp */

	/* The current concurrency level. */
	atomic_t		nr_running;
	/*
	 * The counter is incremented in a process context on the associated CPU
	 * w/ preemption disabled, and decremented or reset in the same context
	 * but w/ pool->lock held. The readers grab pool->lock and are
	 * guaranteed to see if the counter reached zero.
	 */
	int			nr_running;

	struct list_head	worklist;	/* L: list of pending works */

	int			nr_workers;	/* L: total number of workers */
	int			nr_idle;	/* L: currently idle workers */

	struct list_head	idle_list;	/* X: list of idle workers */
	struct list_head	idle_list;	/* L: list of idle workers */
	struct timer_list	idle_timer;	/* L: worker idle timeout */
	struct timer_list	mayday_timer;	/* L: SOS timer for workers */

@@ -777,7 +782,7 @@ static bool work_is_canceling(struct work_struct *work)

static bool __need_more_worker(struct worker_pool *pool)
{
	return !atomic_read(&pool->nr_running);
	return !pool->nr_running;
}

/*
@@ -802,8 +807,7 @@ static bool may_start_working(struct worker_pool *pool)
/* Do I need to keep working?  Called from currently running workers. */
static bool keep_working(struct worker_pool *pool)
{
	return !list_empty(&pool->worklist) &&
		atomic_read(&pool->nr_running) <= 1;
	return !list_empty(&pool->worklist) && (pool->nr_running <= 1);
}

/* Do we need a new worker?  Called from manager. */
@@ -826,7 +830,7 @@ static bool too_many_workers(struct worker_pool *pool)
 * Wake up functions.
 */

/* Return the first idle worker.  Safe with preemption disabled */
/* Return the first idle worker.  Called with pool->lock held. */
static struct worker *first_idle_worker(struct worker_pool *pool)
{
	if (unlikely(list_empty(&pool->idle_list)))
@@ -873,7 +877,7 @@ void wq_worker_running(struct task_struct *task)
	 */
	preempt_disable();
	if (!(worker->flags & WORKER_NOT_RUNNING))
		atomic_inc(&worker->pool->nr_running);
		worker->pool->nr_running++;
	preempt_enable();
	worker->sleeping = 0;
}
@@ -887,7 +891,7 @@ void wq_worker_running(struct task_struct *task)
 */
void wq_worker_sleeping(struct task_struct *task)
{
	struct worker *next, *worker = kthread_data(task);
	struct worker *worker = kthread_data(task);
	struct worker_pool *pool;

	/*
@@ -917,23 +921,9 @@ void wq_worker_sleeping(struct task_struct *task)
		return;
	}

	/*
	 * The counterpart of the following dec_and_test, implied mb,
	 * worklist not empty test sequence is in insert_work().
	 * Please read comment there.
	 *
	 * NOT_RUNNING is clear.  This means that we're bound to and
	 * running on the local cpu w/ rq lock held and preemption
	 * disabled, which in turn means that none else could be
	 * manipulating idle_list, so dereferencing idle_list without pool
	 * lock is safe.
	 */
	if (atomic_dec_and_test(&pool->nr_running) &&
	    !list_empty(&pool->worklist)) {
		next = first_idle_worker(pool);
		if (next)
			wake_up_process(next->task);
	}
	pool->nr_running--;
	if (need_more_worker(pool))
		wake_up_worker(pool);
	raw_spin_unlock_irq(&pool->lock);
}

@@ -987,7 +977,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
	/* If transitioning into NOT_RUNNING, adjust nr_running. */
	if ((flags & WORKER_NOT_RUNNING) &&
	    !(worker->flags & WORKER_NOT_RUNNING)) {
		atomic_dec(&pool->nr_running);
		pool->nr_running--;
	}

	worker->flags |= flags;
@@ -1019,7 +1009,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
	 */
	if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
		if (!(worker->flags & WORKER_NOT_RUNNING))
			atomic_inc(&pool->nr_running);
			pool->nr_running++;
}

/**
@@ -1372,13 +1362,6 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
	list_add_tail(&work->entry, head);
	get_pwq(pwq);

	/*
	 * Ensure either wq_worker_sleeping() sees the above
	 * list_add_tail() or we see zero nr_running to avoid workers lying
	 * around lazily while there are works to be processed.
	 */
	smp_mb();

	if (__need_more_worker(pool))
		wake_up_worker(pool);
}
@@ -1827,8 +1810,7 @@ static void worker_enter_idle(struct worker *worker)
		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);

	/* Sanity check nr_running. */
	WARN_ON_ONCE(pool->nr_workers == pool->nr_idle &&
		     atomic_read(&pool->nr_running));
	WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
}

/**
@@ -5006,7 +4988,7 @@ static void unbind_workers(int cpu)
		 * an unbound (in terms of concurrency management) pool which
		 * are served by workers tied to the pool.
		 */
		atomic_set(&pool->nr_running, 0);
		pool->nr_running = 0;

		/*
		 * With concurrency management just turned off, a busy