Commit 79bec4c6 authored by Steve Sistare's avatar Steve Sistare Committed by Zheng Zengkai
Browse files

sched/topology: Provide hooks to allocate data shared per LLC

hulk inclusion
category: feature
bugzilla: 38261, https://gitee.com/openeuler/kernel/issues/I49XPZ


CVE: NA

---------------------------

Add functions sd_llc_alloc_all() and sd_llc_free_all() to allocate and
free data pointed to by struct sched_domain_shared at the last-level-cache
domain.  sd_llc_alloc_all() is called after the SD hierarchy is known, to
eliminate the unnecessary allocations that would occur if we instead
allocated in __sdt_alloc() and then figured out which shared nodes are
redundant.

Signed-off-by: default avatarSteve Sistare <steven.sistare@oracle.com>
Signed-off-by: default avatarCheng Jian <cj.chengjian@huawei.com>
Reviewed-by: default avatarHanjun Guo <guohanjun@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
Reviewed-by: default avatarChen Hui <judy.chenhui@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent bf693f72
Loading
Loading
Loading
Loading
+74 −1
Original line number Diff line number Diff line
@@ -10,6 +10,12 @@ DEFINE_MUTEX(sched_domains_mutex);
static cpumask_var_t sched_domains_tmpmask;
static cpumask_var_t sched_domains_tmpmask2;

struct s_data;
static int sd_llc_alloc(struct sched_domain *sd);
static void sd_llc_free(struct sched_domain *sd);
static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d);
static void sd_llc_free_all(const struct cpumask *cpu_map);

#ifdef CONFIG_SCHED_DEBUG

static int __init sched_debug_setup(char *str)
@@ -596,8 +602,10 @@ static void destroy_sched_domain(struct sched_domain *sd)
	 */
	free_sched_groups(sd->groups, 1);

	if (sd->shared && atomic_dec_and_test(&sd->shared->ref))
	if (sd->shared && atomic_dec_and_test(&sd->shared->ref)) {
		sd_llc_free(sd);
		kfree(sd->shared);
	}
	kfree(sd);
}

@@ -1238,6 +1246,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
		free_percpu(d->sd);
		fallthrough;
	case sa_sd_storage:
		sd_llc_free_all(cpu_map);
		__sdt_free(cpu_map);
		fallthrough;
	case sa_none:
@@ -1849,6 +1858,62 @@ static void __sdt_free(const struct cpumask *cpu_map)
	}
}

static int sd_llc_alloc(struct sched_domain *sd)
{
	/* Allocate sd->shared data here. Empty for now. */

	return 0;
}

static void sd_llc_free(struct sched_domain *sd)
{
	struct sched_domain_shared *sds = sd->shared;

	if (!sds)
		return;

	/* Free data here. Empty for now. */
}

static int sd_llc_alloc_all(const struct cpumask *cpu_map, struct s_data *d)
{
	struct sched_domain *sd, *hsd;
	int i;

	for_each_cpu(i, cpu_map) {
		/* Find highest domain that shares resources */
		hsd = NULL;
		for (sd = *per_cpu_ptr(d->sd, i); sd; sd = sd->parent) {
			if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
				break;
			hsd = sd;
		}
		if (hsd && sd_llc_alloc(hsd))
			return 1;
	}

	return 0;
}

static void sd_llc_free_all(const struct cpumask *cpu_map)
{
	struct sched_domain_topology_level *tl;
	struct sched_domain *sd;
	struct sd_data *sdd;
	int j;

	for_each_sd_topology(tl) {
		sdd = &tl->data;
		if (!sdd)
			continue;
		for_each_cpu(j, cpu_map) {
			sd = *per_cpu_ptr(sdd->sd, j);
			if (sd)
				sd_llc_free(sd);
		}
	}
}

static struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
		const struct cpumask *cpu_map, struct sched_domain_attr *attr,
		struct sched_domain *child, int dflags, int cpu)
@@ -2049,6 +2114,14 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
		}
	}

	/*
	 * Allocate shared sd data at last level cache.  Must be done after
	 * domains are built above, but before the data is used in
	 * cpu_attach_domain and descendants below.
	 */
	if (sd_llc_alloc_all(cpu_map, &d))
		goto error;

	/* Attach the domains */
	rcu_read_lock();
	for_each_cpu(i, cpu_map) {