Commit 9cfc3e18 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched: Massage set_cpus_allowed()



Thread a u32 flags word through the *set_cpus_allowed*() callchain.
This will allow adding behavioural tweaks for future users.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Reviewed-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Link: https://lkml.kernel.org/r/20201023102346.729082820@infradead.org
parent 120455c5
Loading
Loading
Loading
Loading
+18 −10
Original line number Original line Diff line number Diff line
@@ -1824,13 +1824,14 @@ static int migration_cpu_stop(void *data)
 * sched_class::set_cpus_allowed must do the below, but is not required to
 * sched_class::set_cpus_allowed must do the below, but is not required to
 * actually call this function.
 * actually call this function.
 */
 */
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
{
	cpumask_copy(&p->cpus_mask, new_mask);
	cpumask_copy(&p->cpus_mask, new_mask);
	p->nr_cpus_allowed = cpumask_weight(new_mask);
	p->nr_cpus_allowed = cpumask_weight(new_mask);
}
}


void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
static void
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
{
	struct rq *rq = task_rq(p);
	struct rq *rq = task_rq(p);
	bool queued, running;
	bool queued, running;
@@ -1851,7 +1852,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
	if (running)
	if (running)
		put_prev_task(rq, p);
		put_prev_task(rq, p);


	p->sched_class->set_cpus_allowed(p, new_mask);
	p->sched_class->set_cpus_allowed(p, new_mask, flags);


	if (queued)
	if (queued)
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -1859,6 +1860,11 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
		set_next_task(rq, p);
		set_next_task(rq, p);
}
}


void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
	__do_set_cpus_allowed(p, new_mask, 0);
}

/*
/*
 * Change a given task's CPU affinity. Migrate the thread to a
 * Change a given task's CPU affinity. Migrate the thread to a
 * proper CPU and schedule it away if the CPU it's executing on
 * proper CPU and schedule it away if the CPU it's executing on
@@ -1869,7 +1875,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 * call is not atomic; no spinlocks may be held.
 * call is not atomic; no spinlocks may be held.
 */
 */
static int __set_cpus_allowed_ptr(struct task_struct *p,
static int __set_cpus_allowed_ptr(struct task_struct *p,
				  const struct cpumask *new_mask, bool check)
				  const struct cpumask *new_mask,
				  u32 flags)
{
{
	const struct cpumask *cpu_valid_mask = cpu_active_mask;
	const struct cpumask *cpu_valid_mask = cpu_active_mask;
	unsigned int dest_cpu;
	unsigned int dest_cpu;
@@ -1891,7 +1898,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
	 * Must re-check here, to close a race against __kthread_bind(),
	 * Must re-check here, to close a race against __kthread_bind(),
	 * sched_setaffinity() is not guaranteed to observe the flag.
	 * sched_setaffinity() is not guaranteed to observe the flag.
	 */
	 */
	if (check && (p->flags & PF_NO_SETAFFINITY)) {
	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
		ret = -EINVAL;
		ret = -EINVAL;
		goto out;
		goto out;
	}
	}
@@ -1910,7 +1917,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
		goto out;
		goto out;
	}
	}


	do_set_cpus_allowed(p, new_mask);
	__do_set_cpus_allowed(p, new_mask, flags);


	if (p->flags & PF_KTHREAD) {
	if (p->flags & PF_KTHREAD) {
		/*
		/*
@@ -1947,7 +1954,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,


int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
{
	return __set_cpus_allowed_ptr(p, new_mask, false);
	return __set_cpus_allowed_ptr(p, new_mask, 0);
}
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);


@@ -2406,7 +2413,8 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
#else
#else


static inline int __set_cpus_allowed_ptr(struct task_struct *p,
static inline int __set_cpus_allowed_ptr(struct task_struct *p,
					 const struct cpumask *new_mask, bool check)
					 const struct cpumask *new_mask,
					 u32 flags)
{
{
	return set_cpus_allowed_ptr(p, new_mask);
	return set_cpus_allowed_ptr(p, new_mask);
}
}
@@ -6006,7 +6014,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
	}
	}
#endif
#endif
again:
again:
	retval = __set_cpus_allowed_ptr(p, new_mask, true);
	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);


	if (!retval) {
	if (!retval) {
		cpuset_cpus_allowed(p, cpus_allowed);
		cpuset_cpus_allowed(p, cpus_allowed);
@@ -6590,7 +6598,7 @@ void init_idle(struct task_struct *idle, int cpu)
	 *
	 *
	 * And since this is boot we can forgo the serialization.
	 * And since this is boot we can forgo the serialization.
	 */
	 */
	set_cpus_allowed_common(idle, cpumask_of(cpu));
	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
#endif
#endif
	/*
	/*
	 * We're having a chicken and egg problem, even though we are
	 * We're having a chicken and egg problem, even though we are
+3 −2
Original line number Original line Diff line number Diff line
@@ -2301,7 +2301,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
}
}


static void set_cpus_allowed_dl(struct task_struct *p,
static void set_cpus_allowed_dl(struct task_struct *p,
				const struct cpumask *new_mask)
				const struct cpumask *new_mask,
				u32 flags)
{
{
	struct root_domain *src_rd;
	struct root_domain *src_rd;
	struct rq *rq;
	struct rq *rq;
@@ -2330,7 +2331,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
		raw_spin_unlock(&src_dl_b->lock);
		raw_spin_unlock(&src_dl_b->lock);
	}
	}


	set_cpus_allowed_common(p, new_mask);
	set_cpus_allowed_common(p, new_mask, flags);
}
}


/* Assumes rq->lock is held */
/* Assumes rq->lock is held */
+5 −2
Original line number Original line Diff line number Diff line
@@ -1814,7 +1814,8 @@ struct sched_class {
	void (*task_woken)(struct rq *this_rq, struct task_struct *task);
	void (*task_woken)(struct rq *this_rq, struct task_struct *task);


	void (*set_cpus_allowed)(struct task_struct *p,
	void (*set_cpus_allowed)(struct task_struct *p,
				 const struct cpumask *newmask);
				 const struct cpumask *newmask,
				 u32 flags);


	void (*rq_online)(struct rq *rq);
	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
@@ -1907,7 +1908,9 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);


extern void trigger_load_balance(struct rq *rq);
extern void trigger_load_balance(struct rq *rq);


extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
#define SCA_CHECK		0x01

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);


#endif
#endif