Commit 713a2e21 authored by Waiman Long's avatar Waiman Long Committed by Peter Zijlstra
Browse files

sched: Introduce affinity_context



In order to prepare for passing through additional data through the
affinity call-chains, convert the mask and flags argument into a
structure.

Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220922180041.1768141-5-longman@redhat.com
parent 5584e8ac
Loading
Loading
Loading
Loading
+75 −39
Original line number Diff line number Diff line
@@ -2189,14 +2189,18 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
#ifdef CONFIG_SMP

static void
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx);

static int __set_cpus_allowed_ptr(struct task_struct *p,
				  const struct cpumask *new_mask,
				  u32 flags);
				  struct affinity_context *ctx);

static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
{
	struct affinity_context ac = {
		.new_mask  = cpumask_of(rq->cpu),
		.flags     = SCA_MIGRATE_DISABLE,
	};

	if (likely(!p->migration_disabled))
		return;

@@ -2206,7 +2210,7 @@ static void migrate_disable_switch(struct rq *rq, struct task_struct *p)
	/*
	 * Violates locking rules! see comment in __do_set_cpus_allowed().
	 */
	__do_set_cpus_allowed(p, cpumask_of(rq->cpu), SCA_MIGRATE_DISABLE);
	__do_set_cpus_allowed(p, &ac);
}

void migrate_disable(void)
@@ -2228,6 +2232,10 @@ EXPORT_SYMBOL_GPL(migrate_disable);
void migrate_enable(void)
{
	struct task_struct *p = current;
	struct affinity_context ac = {
		.new_mask  = &p->cpus_mask,
		.flags     = SCA_MIGRATE_ENABLE,
	};

	if (p->migration_disabled > 1) {
		p->migration_disabled--;
@@ -2243,7 +2251,7 @@ void migrate_enable(void)
	 */
	preempt_disable();
	if (p->cpus_ptr != &p->cpus_mask)
		__set_cpus_allowed_ptr(p, &p->cpus_mask, SCA_MIGRATE_ENABLE);
		__set_cpus_allowed_ptr(p, &ac);
	/*
	 * Mustn't clear migration_disabled() until cpus_ptr points back at the
	 * regular cpus_mask, otherwise things that race (eg.
@@ -2523,19 +2531,19 @@ int push_cpu_stop(void *arg)
 * sched_class::set_cpus_allowed must do the below, but is not required to
 * actually call this function.
 */
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx)
{
	if (flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
		p->cpus_ptr = new_mask;
	if (ctx->flags & (SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) {
		p->cpus_ptr = ctx->new_mask;
		return;
	}

	cpumask_copy(&p->cpus_mask, new_mask);
	p->nr_cpus_allowed = cpumask_weight(new_mask);
	cpumask_copy(&p->cpus_mask, ctx->new_mask);
	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);
}

static void
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
__do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
{
	struct rq *rq = task_rq(p);
	bool queued, running;
@@ -2552,7 +2560,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
	 *
	 * XXX do further audits, this smells like something putrid.
	 */
	if (flags & SCA_MIGRATE_DISABLE)
	if (ctx->flags & SCA_MIGRATE_DISABLE)
		SCHED_WARN_ON(!p->on_cpu);
	else
		lockdep_assert_held(&p->pi_lock);
@@ -2571,7 +2579,7 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32
	if (running)
		put_prev_task(rq, p);

	p->sched_class->set_cpus_allowed(p, new_mask, flags);
	p->sched_class->set_cpus_allowed(p, ctx);

	if (queued)
		enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
@@ -2581,7 +2589,12 @@ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
	__do_set_cpus_allowed(p, new_mask, 0);
	struct affinity_context ac = {
		.new_mask  = new_mask,
		.flags     = 0,
	};

	__do_set_cpus_allowed(p, &ac);
}

int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
@@ -2834,8 +2847,7 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
 * Called with both p->pi_lock and rq->lock held; drops both before returning.
 */
static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
					 const struct cpumask *new_mask,
					 u32 flags,
					 struct affinity_context *ctx,
					 struct rq *rq,
					 struct rq_flags *rf)
	__releases(rq->lock)
@@ -2864,7 +2876,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
		cpu_valid_mask = cpu_online_mask;
	}

	if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
	if (!kthread && !cpumask_subset(ctx->new_mask, cpu_allowed_mask)) {
		ret = -EINVAL;
		goto out;
	}
@@ -2873,18 +2885,18 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
	 * Must re-check here, to close a race against __kthread_bind(),
	 * sched_setaffinity() is not guaranteed to observe the flag.
	 */
	if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
	if ((ctx->flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
		ret = -EINVAL;
		goto out;
	}

	if (!(flags & SCA_MIGRATE_ENABLE)) {
		if (cpumask_equal(&p->cpus_mask, new_mask))
	if (!(ctx->flags & SCA_MIGRATE_ENABLE)) {
		if (cpumask_equal(&p->cpus_mask, ctx->new_mask))
			goto out;

		if (WARN_ON_ONCE(p == current &&
				 is_migration_disabled(p) &&
				 !cpumask_test_cpu(task_cpu(p), new_mask))) {
				 !cpumask_test_cpu(task_cpu(p), ctx->new_mask))) {
			ret = -EBUSY;
			goto out;
		}
@@ -2895,18 +2907,18 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
	 * for groups of tasks (ie. cpuset), so that load balancing is not
	 * immediately required to distribute the tasks within their new mask.
	 */
	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, new_mask);
	dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
	if (dest_cpu >= nr_cpu_ids) {
		ret = -EINVAL;
		goto out;
	}

	__do_set_cpus_allowed(p, new_mask, flags);
	__do_set_cpus_allowed(p, ctx);

	if (flags & SCA_USER)
	if (ctx->flags & SCA_USER)
		user_mask = clear_user_cpus_ptr(p);

	ret = affine_move_task(rq, p, rf, dest_cpu, flags);
	ret = affine_move_task(rq, p, rf, dest_cpu, ctx->flags);

	kfree(user_mask);

@@ -2928,18 +2940,23 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
 * call is not atomic; no spinlocks may be held.
 */
static int __set_cpus_allowed_ptr(struct task_struct *p,
				  const struct cpumask *new_mask, u32 flags)
				  struct affinity_context *ctx)
{
	struct rq_flags rf;
	struct rq *rq;

	rq = task_rq_lock(p, &rf);
	return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, &rf);
	return __set_cpus_allowed_ptr_locked(p, ctx, rq, &rf);
}

int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
	return __set_cpus_allowed_ptr(p, new_mask, 0);
	struct affinity_context ac = {
		.new_mask  = new_mask,
		.flags     = 0,
	};

	return __set_cpus_allowed_ptr(p, &ac);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

@@ -2955,6 +2972,7 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
				     const struct cpumask *subset_mask)
{
	struct cpumask *user_mask = NULL;
	struct affinity_context ac;
	struct rq_flags rf;
	struct rq *rq;
	int err;
@@ -2991,7 +3009,11 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
		p->user_cpus_ptr = user_mask;
	}

	return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);
	ac = (struct affinity_context){
		.new_mask = new_mask,
	};

	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);

err_unlock:
	task_rq_unlock(rq, p, &rf);
@@ -3045,7 +3067,7 @@ void force_compatible_cpus_allowed_ptr(struct task_struct *p)
}

static int
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);

/*
 * Restore the affinity of a task @p which was previously restricted by a
@@ -3058,6 +3080,9 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
{
	struct cpumask *user_mask = p->user_cpus_ptr;
	struct affinity_context ac = {
		.new_mask  = user_mask,
	};
	unsigned long flags;

	/*
@@ -3065,7 +3090,7 @@ void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
	 * we free the mask explicitly to avoid it being inherited across
	 * a subsequent fork().
	 */
	if (!user_mask || !__sched_setaffinity(p, user_mask))
	if (!user_mask || !__sched_setaffinity(p, &ac))
		return;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -3550,10 +3575,9 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
#else /* CONFIG_SMP */

static inline int __set_cpus_allowed_ptr(struct task_struct *p,
					 const struct cpumask *new_mask,
					 u32 flags)
					 struct affinity_context *ctx)
{
	return set_cpus_allowed_ptr(p, new_mask);
	return set_cpus_allowed_ptr(p, ctx->new_mask);
}

static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
@@ -8090,7 +8114,7 @@ int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
#endif

static int
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
__sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
{
	int retval;
	cpumask_var_t cpus_allowed, new_mask;
@@ -8104,13 +8128,16 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
	}

	cpuset_cpus_allowed(p, cpus_allowed);
	cpumask_and(new_mask, mask, cpus_allowed);
	cpumask_and(new_mask, ctx->new_mask, cpus_allowed);

	ctx->new_mask = new_mask;
	ctx->flags |= SCA_CHECK;

	retval = dl_task_check_affinity(p, new_mask);
	if (retval)
		goto out_free_new_mask;
again:
	retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
	retval = __set_cpus_allowed_ptr(p, ctx);
	if (retval)
		goto out_free_new_mask;

@@ -8133,6 +8160,9 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)

long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
	struct affinity_context ac = {
		.new_mask = in_mask,
	};
	struct task_struct *p;
	int retval;

@@ -8167,7 +8197,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
	if (retval)
		goto out_put_task;

	retval = __sched_setaffinity(p, in_mask);
	retval = __sched_setaffinity(p, &ac);
out_put_task:
	put_task_struct(p);
	return retval;
@@ -8948,6 +8978,12 @@ void show_state_filter(unsigned int state_filter)
 */
void __init init_idle(struct task_struct *idle, int cpu)
{
#ifdef CONFIG_SMP
	struct affinity_context ac = (struct affinity_context) {
		.new_mask  = cpumask_of(cpu),
		.flags     = 0,
	};
#endif
	struct rq *rq = cpu_rq(cpu);
	unsigned long flags;

@@ -8972,7 +9008,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
	 *
	 * And since this is boot we can forgo the serialization.
	 */
	set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
	set_cpus_allowed_common(idle, &ac);
#endif
	/*
	 * We're having a chicken and egg problem, even though we are
+3 −4
Original line number Diff line number Diff line
@@ -2485,8 +2485,7 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
}

static void set_cpus_allowed_dl(struct task_struct *p,
				const struct cpumask *new_mask,
				u32 flags)
				struct affinity_context *ctx)
{
	struct root_domain *src_rd;
	struct rq *rq;
@@ -2501,7 +2500,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
	 * update. We already made space for us in the destination
	 * domain (see cpuset_can_attach()).
	 */
	if (!cpumask_intersects(src_rd->span, new_mask)) {
	if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
		struct dl_bw *src_dl_b;

		src_dl_b = dl_bw_of(cpu_of(rq));
@@ -2515,7 +2514,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
		raw_spin_unlock(&src_dl_b->lock);
	}

	set_cpus_allowed_common(p, new_mask, flags);
	set_cpus_allowed_common(p, ctx);
}

/* Assumes rq->lock is held */
+7 −4
Original line number Diff line number Diff line
@@ -2145,6 +2145,11 @@ extern const u32 sched_prio_to_wmult[40];

#define RETRY_TASK		((void *)-1UL)

struct affinity_context {
	const struct cpumask *new_mask;
	unsigned int flags;
};

struct sched_class {

#ifdef CONFIG_UCLAMP_TASK
@@ -2173,9 +2178,7 @@ struct sched_class {

	void (*task_woken)(struct rq *this_rq, struct task_struct *task);

	void (*set_cpus_allowed)(struct task_struct *p,
				 const struct cpumask *newmask,
				 u32 flags);
	void (*set_cpus_allowed)(struct task_struct *p, struct affinity_context *ctx);

	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
@@ -2286,7 +2289,7 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);

extern void trigger_load_balance(struct rq *rq);

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
extern void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx);

static inline struct task_struct *get_push_task(struct rq *rq)
{