Commit f9fc8cad authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched: Add TASK_ANY for wait_task_inactive()



Now that wait_task_inactive()'s @match_state argument is a mask (like
ttwu()) it is possible to replace the special !match_state case with
an 'all-states' value such that any blocked state will match.

Suggested-by: default avatarIngo Molnar <(mingo@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/YxhkzfuFTvRnpUaH@hirez.programming.kicks-ass.net
parent 9204a97f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -254,7 +254,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
		iit = per_cpu_ptr(&idle_inject_thread, cpu);
		iit->should_run = 0;

		wait_task_inactive(iit->tsk, 0);
		wait_task_inactive(iit->tsk, TASK_ANY);
	}

	cpu_hotplug_enable();
+1 −1
Original line number Diff line number Diff line
@@ -412,7 +412,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
		 */
		ptr = core_state->dumper.next;
		while (ptr != NULL) {
			wait_task_inactive(ptr->task, 0);
			wait_task_inactive(ptr->task, TASK_ANY);
			ptr = ptr->next;
		}
	}
+2 −0
Original line number Diff line number Diff line
@@ -101,6 +101,8 @@ struct task_group;
#define TASK_RTLOCK_WAIT		0x1000
#define TASK_STATE_MAX			0x2000

#define TASK_ANY			(TASK_STATE_MAX-1)

/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
+8 −8
Original line number Diff line number Diff line
@@ -3253,12 +3253,12 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
/*
 * wait_task_inactive - wait for a thread to unschedule.
 *
 * If @match_state is nonzero, it's the @p->state value just checked and
 * not expected to change.  If it changes, i.e. @p might have woken up,
 * then return zero.  When we succeed in waiting for @p to be off its CPU,
 * we return a positive number (its total switch count).  If a second call
 * a short while later returns the same number, the caller can be sure that
 * @p has remained unscheduled the whole time.
 * Wait for the thread to block in any of the states set in @match_state.
 * If it changes, i.e. @p might have woken up, then return zero.  When we
 * succeed in waiting for @p to be off its CPU, we return a positive number
 * (its total switch count).  If a second call a short while later returns the
 * same number, the caller can be sure that @p has remained unscheduled the
 * whole time.
 *
 * The caller must ensure that the task *will* unschedule sometime soon,
 * else this function might spin for a *long* time. This function can't
@@ -3294,7 +3294,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
		 * is actually now running somewhere else!
		 */
		while (task_on_cpu(rq, p)) {
			if (match_state && !(READ_ONCE(p->__state) & match_state))
			if (!(READ_ONCE(p->__state) & match_state))
				return 0;
			cpu_relax();
		}
@@ -3309,7 +3309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
		running = task_on_cpu(rq, p);
		queued = task_on_rq_queued(p);
		ncsw = 0;
		if (!match_state || (READ_ONCE(p->__state) & match_state))
		if (READ_ONCE(p->__state) & match_state)
			ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
		task_rq_unlock(rq, p, &rf);