Commit 8f9ea86f authored by Waiman Long's avatar Waiman Long Committed by Peter Zijlstra
Browse files

sched: Always preserve the user requested cpumask



Unconditionally preserve the user requested cpumask on
sched_setaffinity() calls. This allows using it outside of the fairly
narrow restrict_cpus_allowed_ptr() use-case and fix some cpuset issues
that currently suffer destruction of cpumasks.

Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220922180041.1768141-3-longman@redhat.com
parent 713a2e21
Loading
Loading
Loading
Loading
+64 −55
Original line number Diff line number Diff line
@@ -2540,6 +2540,12 @@ void set_cpus_allowed_common(struct task_struct *p, struct affinity_context *ctx

	cpumask_copy(&p->cpus_mask, ctx->new_mask);
	p->nr_cpus_allowed = cpumask_weight(ctx->new_mask);

	/*
	 * Swap in a new user_cpus_ptr if SCA_USER flag set
	 */
	if (ctx->flags & SCA_USER)
		swap(p->user_cpus_ptr, ctx->user_mask);
}

static void
@@ -2600,6 +2606,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
		      int node)
{
	unsigned long flags;

	if (!src->user_cpus_ptr)
		return 0;

@@ -2607,7 +2615,10 @@ int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
	if (!dst->user_cpus_ptr)
		return -ENOMEM;

	/* Use pi_lock to protect content of user_cpus_ptr */
	raw_spin_lock_irqsave(&src->pi_lock, flags);
	cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
	raw_spin_unlock_irqrestore(&src->pi_lock, flags);
	return 0;
}

@@ -2856,7 +2867,6 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
	const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
	const struct cpumask *cpu_valid_mask = cpu_active_mask;
	bool kthread = p->flags & PF_KTHREAD;
	struct cpumask *user_mask = NULL;
	unsigned int dest_cpu;
	int ret = 0;

@@ -2915,14 +2925,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,

	__do_set_cpus_allowed(p, ctx);

	if (ctx->flags & SCA_USER)
		user_mask = clear_user_cpus_ptr(p);

	ret = affine_move_task(rq, p, rf, dest_cpu, ctx->flags);

	kfree(user_mask);

	return ret;
	return affine_move_task(rq, p, rf, dest_cpu, ctx->flags);

out:
	task_rq_unlock(rq, p, rf);
@@ -2962,8 +2965,10 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

/*
 * Change a given task's CPU affinity to the intersection of its current
 * affinity mask and @subset_mask, writing the resulting mask to @new_mask
 * and pointing @p->user_cpus_ptr to a copy of the old mask.
 * affinity mask and @subset_mask, writing the resulting mask to @new_mask.
 * If user_cpus_ptr is defined, use it as the basis for restricting CPU
 * affinity or use cpu_online_mask instead.
 *
 * If the resulting mask is empty, leave the affinity unchanged and return
 * -EINVAL.
 */
@@ -2971,18 +2976,14 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
				     struct cpumask *new_mask,
				     const struct cpumask *subset_mask)
{
	struct cpumask *user_mask = NULL;
	struct affinity_context ac;
	struct affinity_context ac = {
		.new_mask  = new_mask,
		.flags     = 0,
	};
	struct rq_flags rf;
	struct rq *rq;
	int err;

	if (!p->user_cpus_ptr) {
		user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
		if (!user_mask)
			return -ENOMEM;
	}

	rq = task_rq_lock(p, &rf);

	/*
@@ -2995,29 +2996,15 @@ static int restrict_cpus_allowed_ptr(struct task_struct *p,
		goto err_unlock;
	}

	if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
	if (!cpumask_and(new_mask, task_user_cpus(p), subset_mask)) {
		err = -EINVAL;
		goto err_unlock;
	}

	/*
	 * We're about to butcher the task affinity, so keep track of what
	 * the user asked for in case we're able to restore it later on.
	 */
	if (user_mask) {
		cpumask_copy(user_mask, p->cpus_ptr);
		p->user_cpus_ptr = user_mask;
	}

	ac = (struct affinity_context){
		.new_mask = new_mask,
	};

	return __set_cpus_allowed_ptr_locked(p, &ac, rq, &rf);

err_unlock:
	task_rq_unlock(rq, p, &rf);
	kfree(user_mask);
	return err;
}

@@ -3071,33 +3058,25 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx);

/*
 * Restore the affinity of a task @p which was previously restricted by a
 * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
 * @p->user_cpus_ptr.
 * call to force_compatible_cpus_allowed_ptr().
 *
 * It is the caller's responsibility to serialise this with any calls to
 * force_compatible_cpus_allowed_ptr(@p).
 */
void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
{
	struct cpumask *user_mask = p->user_cpus_ptr;
	struct affinity_context ac = {
		.new_mask  = user_mask,
		.new_mask  = task_user_cpus(p),
		.flags     = 0,
	};
	unsigned long flags;
	int ret;

	/*
	 * Try to restore the old affinity mask. If this fails, then
	 * we free the mask explicitly to avoid it being inherited across
	 * a subsequent fork().
	 * Try to restore the old affinity mask with __sched_setaffinity().
	 * Cpuset masking will be done there too.
	 */
	if (!user_mask || !__sched_setaffinity(p, &ac))
		return;

	raw_spin_lock_irqsave(&p->pi_lock, flags);
	user_mask = clear_user_cpus_ptr(p);
	raw_spin_unlock_irqrestore(&p->pi_lock, flags);

	kfree(user_mask);
	ret = __sched_setaffinity(p, &ac);
	WARN_ON_ONCE(ret);
}

void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
@@ -8136,7 +8115,7 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
	retval = dl_task_check_affinity(p, new_mask);
	if (retval)
		goto out_free_new_mask;
again:

	retval = __set_cpus_allowed_ptr(p, ctx);
	if (retval)
		goto out_free_new_mask;
@@ -8148,7 +8127,24 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)
		 * Just reset the cpumask to the cpuset's cpus_allowed.
		 */
		cpumask_copy(new_mask, cpus_allowed);
		goto again;

		/*
		 * If SCA_USER is set, a 2nd call to __set_cpus_allowed_ptr()
		 * will restore the previous user_cpus_ptr value.
		 *
		 * In the unlikely event a previous user_cpus_ptr exists,
		 * we need to further restrict the mask to what is allowed
		 * by that old user_cpus_ptr.
		 */
		if (unlikely((ctx->flags & SCA_USER) && ctx->user_mask)) {
			bool empty = !cpumask_and(new_mask, new_mask,
						  ctx->user_mask);

			if (WARN_ON_ONCE(empty))
				cpumask_copy(new_mask, cpus_allowed);
		}
		__set_cpus_allowed_ptr(p, ctx);
		retval = -EINVAL;
	}

out_free_new_mask:
@@ -8160,9 +8156,8 @@ __sched_setaffinity(struct task_struct *p, struct affinity_context *ctx)

long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{
	struct affinity_context ac = {
		.new_mask = in_mask,
	};
	struct affinity_context ac;
	struct cpumask *user_mask;
	struct task_struct *p;
	int retval;

@@ -8197,7 +8192,21 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
	if (retval)
		goto out_put_task;

	user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
	if (!user_mask) {
		retval = -ENOMEM;
		goto out_put_task;
	}
	cpumask_copy(user_mask, in_mask);
	ac = (struct affinity_context){
		.new_mask  = in_mask,
		.user_mask = user_mask,
		.flags     = SCA_USER,
	};

	retval = __sched_setaffinity(p, &ac);
	kfree(ac.user_mask);

out_put_task:
	put_task_struct(p);
	return retval;
+8 −0
Original line number Diff line number Diff line
@@ -1878,6 +1878,13 @@ static inline void dirty_sched_domain_sysctl(int cpu)
#endif

extern int sched_update_scaling(void);

static inline const struct cpumask *task_user_cpus(struct task_struct *p)
{
	if (!p->user_cpus_ptr)
		return cpu_possible_mask; /* &init_task.cpus_mask */
	return p->user_cpus_ptr;
}
#endif /* CONFIG_SMP */

#include "stats.h"
@@ -2147,6 +2154,7 @@ extern const u32 sched_prio_to_wmult[40];

struct affinity_context {
	const struct cpumask *new_mask;
	struct cpumask *user_mask;
	unsigned int flags;
};