Commit 0b9d46fc authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched: Rename task_running() to task_on_cpu()



There is some ambiguity about task_running() in that it is unrelated
to TASK_RUNNING but instead tests ->on_cpu. As such, rename the thing
task_on_cpu().

Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/Yxhkhn55uHZx+NGl@hirez.programming.kicks-ass.net
parent 96c1c0cf
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -2777,7 +2777,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
		return -EINVAL;
	}

	if (task_running(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
	if (task_on_cpu(rq, p) || READ_ONCE(p->__state) == TASK_WAKING) {
		/*
		 * MIGRATE_ENABLE gets here because 'p == current', but for
		 * anything else we cannot do is_migration_disabled(), punt
@@ -3289,11 +3289,11 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
		 *
		 * NOTE! Since we don't hold any locks, it's not
		 * even sure that "rq" stays as the right runqueue!
		 * But we don't care, since "task_running()" will
		 * But we don't care, since "task_on_cpu()" will
		 * return false if the runqueue has changed and p
		 * is actually now running somewhere else!
		 */
		while (task_running(rq, p)) {
		while (task_on_cpu(rq, p)) {
			if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
				return 0;
			cpu_relax();
@@ -3306,7 +3306,7 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
		 */
		rq = task_rq_lock(p, &rf);
		trace_sched_wait_task(p);
		running = task_running(rq, p);
		running = task_on_cpu(rq, p);
		queued = task_on_rq_queued(p);
		ncsw = 0;
		if (!match_state || READ_ONCE(p->__state) == match_state)
@@ -8648,7 +8648,7 @@ int __sched yield_to(struct task_struct *p, bool preempt)
	if (curr->sched_class != p->sched_class)
		goto out_unlock;

	if (task_running(p_rq, p) || !task_is_running(p))
	if (task_on_cpu(p_rq, p) || !task_is_running(p))
		goto out_unlock;

	yielded = curr->sched_class->yield_to_task(rq, p);
+1 −1
Original line number Diff line number Diff line
@@ -88,7 +88,7 @@ static unsigned long sched_core_update_cookie(struct task_struct *p,
	 * core has now entered/left forced idle state. Defer accounting to the
	 * next scheduling edge, rather than always forcing a reschedule here.
	 */
	if (task_running(rq, p))
	if (task_on_cpu(rq, p))
		resched_curr(rq);

	task_rq_unlock(rq, p, &rf);
+3 −3
Original line number Diff line number Diff line
@@ -2092,7 +2092,7 @@ static void task_fork_dl(struct task_struct *p)

static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
	if (!task_on_cpu(rq, p) &&
	    cpumask_test_cpu(cpu, &p->cpus_mask))
		return 1;
	return 0;
@@ -2244,7 +2244,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
		if (double_lock_balance(rq, later_rq)) {
			if (unlikely(task_rq(task) != rq ||
				     !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
				     task_running(rq, task) ||
				     task_on_cpu(rq, task) ||
				     !dl_task(task) ||
				     !task_on_rq_queued(task))) {
				double_unlock_balance(rq, later_rq);
@@ -2474,7 +2474,7 @@ static void pull_dl_task(struct rq *this_rq)
 */
static void task_woken_dl(struct rq *rq, struct task_struct *p)
{
	if (!task_running(rq, p) &&
	if (!task_on_cpu(rq, p) &&
	    !test_tsk_need_resched(rq->curr) &&
	    p->nr_cpus_allowed > 1 &&
	    dl_task(rq->curr) &&
+1 −1
Original line number Diff line number Diff line
@@ -7935,7 +7935,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
	/* Record that we found at least one task that could run on dst_cpu */
	env->flags &= ~LBF_ALL_PINNED;

	if (task_running(env->src_rq, p)) {
	if (task_on_cpu(env->src_rq, p)) {
		schedstat_inc(p->stats.nr_failed_migrations_running);
		return 0;
	}
+3 −3
Original line number Diff line number Diff line
@@ -1845,7 +1845,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)

static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
	if (!task_running(rq, p) &&
	if (!task_on_cpu(rq, p) &&
	    cpumask_test_cpu(cpu, &p->cpus_mask))
		return 1;

@@ -2000,7 +2000,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
			 */
			if (unlikely(task_rq(task) != rq ||
				     !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
				     task_running(rq, task) ||
				     task_on_cpu(rq, task) ||
				     !rt_task(task) ||
				     !task_on_rq_queued(task))) {

@@ -2458,7 +2458,7 @@ static void pull_rt_task(struct rq *this_rq)
 */
static void task_woken_rt(struct rq *rq, struct task_struct *p)
{
	bool need_to_push = !task_running(rq, p) &&
	bool need_to_push = !task_on_cpu(rq, p) &&
			    !test_tsk_need_resched(rq->curr) &&
			    p->nr_cpus_allowed > 1 &&
			    (dl_task(rq->curr) || rt_task(rq->curr)) &&
Loading