Commit 966350ea authored by Dietmar Eggemann's avatar Dietmar Eggemann Committed by sanglipeng
Browse files

cgroup/cpuset: Free DL BW in case can_attach() fails

stable inclusion
from stable-v5.10.193
commit 2d69f68ad409a4945d1c5998e537082abf89096c
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I9399M

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=2d69f68ad409a4945d1c5998e537082abf89096c



--------------------------------

commit 2ef269ef upstream.

cpuset_can_attach() can fail. Postpone DL BW allocation until all tasks
have been checked. DL BW is not allocated per-task but as a sum over
all DL tasks migrating.

If multiple controllers are attached to the cgroup next to the cpuset
controller a non-cpuset can_attach() can fail. In this case free DL BW
in cpuset_cancel_attach().

Finally, update cpuset DL task count (nr_deadline_tasks) only in
cpuset_attach().

Suggested-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: default avatarJuri Lelli <juri.lelli@redhat.com>
Reviewed-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
[ Fix conflicts in kernel/cgroup/cpuset.c due to new code being applied
  that is not applicable on this branch. Reject new code. ]
Signed-off-by: default avatarQais Yousef (Google) <qyousef@layalina.io>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarsanglipeng <sanglipeng1@jd.com>

Conflicts:
	kernel/cgroup/cpuset.c
parent 8c369adc
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1765,7 +1765,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}

extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
+47 −4
Original line number Diff line number Diff line
@@ -171,6 +171,8 @@ struct cpuset {
	 * know when to rebuild associated root domain bandwidth information.
	 */
	int nr_deadline_tasks;
	int nr_migrate_dl_tasks;
	u64 sum_migrate_dl_bw;

	KABI_RESERVE(1)
	KABI_RESERVE(2)
@@ -2276,16 +2278,23 @@ static int fmeter_getrate(struct fmeter *fmp)

static struct cpuset *cpuset_attach_old_cs;

static void reset_migrate_dl_data(struct cpuset *cs)
{
	cs->nr_migrate_dl_tasks = 0;
	cs->sum_migrate_dl_bw = 0;
}

/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_taskset *tset)
{
	struct cgroup_subsys_state *css;
	struct cpuset *cs;
	struct cpuset *cs, *oldcs;
	struct task_struct *task;
	int ret;

	/* used later by cpuset_attach() */
	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
	oldcs = cpuset_attach_old_cs;
	cs = css_cs(css);

	mutex_lock(&cpuset_mutex);
@@ -2297,7 +2306,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
		goto out_unlock;

	cgroup_taskset_for_each(task, css, tset) {
		ret = task_can_attach(task, cs->effective_cpus);
		ret = task_can_attach(task);
		if (ret)
			goto out_unlock;
		ret = security_task_setscheduler(task);
@@ -2305,11 +2314,31 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
			goto out_unlock;

		if (dl_task(task)) {
			cs->nr_deadline_tasks++;
			cpuset_attach_old_cs->nr_deadline_tasks--;
			cs->nr_migrate_dl_tasks++;
			cs->sum_migrate_dl_bw += task->dl.dl_bw;
		}
	}

	if (!cs->nr_migrate_dl_tasks)
		goto out_success;

	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);

		if (unlikely(cpu >= nr_cpu_ids)) {
			reset_migrate_dl_data(cs);
			ret = -EINVAL;
			goto out_unlock;
		}

		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
		if (ret) {
			reset_migrate_dl_data(cs);
			goto out_unlock;
		}
	}

out_success:
	/*
	 * Mark attach is in progress.  This makes validate_change() fail
	 * changes which zero cpus/mems_allowed.
@@ -2333,6 +2362,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset)
	cs->attach_in_progress--;
	if (!cs->attach_in_progress)
		wake_up(&cpuset_attach_wq);

	if (cs->nr_migrate_dl_tasks) {
		int cpu = cpumask_any(cs->effective_cpus);

		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
		reset_migrate_dl_data(cs);
	}

	mutex_unlock(&cpuset_mutex);
}

@@ -2414,6 +2451,12 @@ static void cpuset_attach(struct cgroup_taskset *tset)

	cs->old_mems_allowed = cpuset_attach_nodemask_to;

	if (cs->nr_migrate_dl_tasks) {
		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
		reset_migrate_dl_data(cs);
	}

	cs->attach_in_progress--;
	if (!cs->attach_in_progress)
		wake_up(&cpuset_attach_wq);
+2 −15
Original line number Diff line number Diff line
@@ -7755,8 +7755,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
	return ret;
}

int task_can_attach(struct task_struct *p,
		    const struct cpumask *cs_effective_cpus)
int task_can_attach(struct task_struct *p)
{
	int ret = 0;

@@ -7769,21 +7768,9 @@ int task_can_attach(struct task_struct *p,
	 * success of set_cpus_allowed_ptr() on all attached tasks
	 * before cpus_mask may be changed.
	 */
	if (p->flags & PF_NO_SETAFFINITY) {
	if (p->flags & PF_NO_SETAFFINITY)
		ret = -EINVAL;
		goto out;
	}

	if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
					      cs_effective_cpus)) {
		int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);

		if (unlikely(cpu >= nr_cpu_ids))
			return -EINVAL;
		ret = dl_bw_alloc(cpu, p->dl.dl_bw);
	}

out:
	return ret;
}