Unverified Commit f0e063d2 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!11421 Some fixes About cpuset partition

Merge Pull Request from: @ci-robot 
 
PR sync from: Chen Ridong <chenridong@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/K5FP3ZRUQS5CY7CWWLYXOSNUPOOUG2EA/ 
Chen Ridong (1):
  cgroup/cpuset: fix panic caused by partcmd_update

Waiman Long (3):
  cgroup/cpuset: Optimize isolated partition only
    generate_sched_domains() calls
  cgroup/cpuset: Fix remote root partition creation problem
  cgroup/cpuset: Clear effective_xcpus on cpus_allowed clearing only if
    cpus.exclusive not set


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/IAOPW2 
 
Link:https://gitee.com/openeuler/kernel/pulls/11421

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 37e8f067 aa72549b
Loading
Loading
Loading
Loading
+54 −14
Original line number Diff line number Diff line
@@ -180,7 +180,7 @@ struct cpuset {
	/* for custom sched domain */
	int relax_domain_level;

	/* number of valid sub-partitions */
	/* number of valid local child partitions */
	int nr_subparts;

	/* partition root state */
@@ -1114,13 +1114,15 @@ static int generate_sched_domains(cpumask_var_t **domains,
	int nslot;		/* next empty doms[] struct cpumask slot */
	struct cgroup_subsys_state *pos_css;
	bool root_load_balance = is_sched_load_balance(&top_cpuset);
	bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);

	doms = NULL;
	dattr = NULL;
	csa = NULL;

	/* Special case for the 99% of systems with one, full, sched domain */
	if (root_load_balance && !top_cpuset.nr_subparts) {
	if (root_load_balance && cpumask_empty(subpartitions_cpus)) {
single_root_domain:
		ndoms = 1;
		doms = alloc_sched_domains(ndoms);
		if (!doms)
@@ -1148,16 +1150,18 @@ static int generate_sched_domains(cpumask_var_t **domains,
	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
		if (cp == &top_cpuset)
			continue;

		if (cgrpv2)
			goto v2;

		/*
		 * v1:
		 * Continue traversing beyond @cp iff @cp has some CPUs and
		 * isn't load balancing.  The former is obvious.  The
		 * latter: All child cpusets contain a subset of the
		 * parent's cpus, so just skip them, and then we call
		 * update_domain_attr_tree() to calc relax_domain_level of
		 * the corresponding sched domain.
		 *
		 * If root is load-balancing, we can skip @cp if it
		 * is a subset of the root's effective_cpus.
		 */
		if (!cpumask_empty(cp->cpus_allowed) &&
		    !(is_sched_load_balance(cp) &&
@@ -1165,20 +1169,39 @@ static int generate_sched_domains(cpumask_var_t **domains,
					 housekeeping_cpumask(HK_TYPE_DOMAIN))))
			continue;

		if (root_load_balance &&
		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
		if (is_sched_load_balance(cp) &&
		    !cpumask_empty(cp->effective_cpus))
			csa[csn++] = cp;

		/* skip @cp's subtree */
		pos_css = css_rightmost_descendant(pos_css);
		continue;

		if (is_sched_load_balance(cp) &&
v2:
		/*
		 * Only valid partition roots that are not isolated and with
		 * non-empty effective_cpus will be saved into csn[].
		 */
		if ((cp->partition_root_state == PRS_ROOT) &&
		    !cpumask_empty(cp->effective_cpus))
			csa[csn++] = cp;

		/* skip @cp's subtree if not a partition root */
		if (!is_partition_valid(cp))
		/*
		 * Skip @cp's subtree if not a partition root and has no
		 * exclusive CPUs to be granted to child cpusets.
		 */
		if (!is_partition_valid(cp) && cpumask_empty(cp->exclusive_cpus))
			pos_css = css_rightmost_descendant(pos_css);
	}
	rcu_read_unlock();

	/*
	 * If there are only isolated partitions underneath the cgroup root,
	 * we can optimize out unneeded sched domains scanning.
	 */
	if (root_load_balance && (csn == 1))
		goto single_root_domain;

	for (i = 0; i < csn; i++)
		csa[i]->pn = i;
	ndoms = csn;
@@ -1221,6 +1244,20 @@ static int generate_sched_domains(cpumask_var_t **domains,
	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
			      GFP_KERNEL);

	/*
	 * Cgroup v2 doesn't support domain attributes, just set all of them
	 * to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
	 * subset of HK_TYPE_DOMAIN housekeeping CPUs.
	 */
	if (cgrpv2) {
		for (i = 0; i < ndoms; i++) {
			cpumask_copy(doms[i], csa[i]->effective_cpus);
			if (dattr)
				dattr[i] = SD_ATTR_INIT;
		}
		goto done;
	}

	for (nslot = 0, i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
		struct cpumask *dp;
@@ -1380,7 +1417,7 @@ static void rebuild_sched_domains_locked(void)
	 * root should be only a subset of the active CPUs.  Since a CPU in any
	 * partition root could be offlined, all must be checked.
	 */
	if (top_cpuset.nr_subparts) {
	if (!cpumask_empty(subpartitions_cpus)) {
		rcu_read_lock();
		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
			if (!is_partition_valid(cs)) {
@@ -1954,6 +1991,8 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
			part_error = PERR_CPUSEMPTY;
			goto write_error;
		}
		/* Check newmask again, whether cpus are available for parent/cs */
		nocpu |= tasks_nocpu_error(parent, cs, newmask);

		/*
		 * partcmd_update with newmask:
@@ -2496,6 +2535,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
	 */
	if (!*buf) {
		cpumask_clear(trialcs->cpus_allowed);
		if (cpumask_empty(trialcs->exclusive_cpus))
			cpumask_clear(trialcs->effective_xcpus);
	} else {
		retval = cpulist_parse(buf, trialcs->cpus_allowed);
@@ -4671,7 +4711,7 @@ static void cpuset_handle_hotplug(void)
	 * In the rare case that hotplug removes all the cpus in
	 * subpartitions_cpus, we assumed that cpus are updated.
	 */
	if (!cpus_updated && top_cpuset.nr_subparts)
	if (!cpus_updated && !cpumask_empty(subpartitions_cpus))
		cpus_updated = true;

	/* For v1, synchronize cpus_allowed to cpu_active_mask */