Commit ceff622c authored by Barry Song's avatar Barry Song Committed by Jie Liu
Browse files

sched: Add cpus_share_resources API

mainline inclusion
from mainline-v6.7-rc1
commit b95303e0aeaf446b65169dd4142cacdaeb7d4c8b
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8E8NN
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=b95303e0aeaf446b65169dd4142cacdaeb7d4c8b



----------------------------------------------------------------------

Add cpus_share_resources() API. This is the preparation for the
optimization of select_idle_cpu() on platforms with cluster scheduler
level.

On a machine with clusters cpus_share_resources() will test whether
two cpus are within the same cluster. On a non-cluster machine it
will behaves the same as cpus_share_cache(). So we use "resources"
here for cache resources.

Signed-off-by: default avatarBarry Song <song.bao.hua@hisilicon.com>
Signed-off-by: default avatarYicong Yang <yangyicong@hisilicon.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarGautham R. Shenoy <gautham.shenoy@amd.com>
Reviewed-by: default avatarTim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: default avatarVincent Guittot <vincent.guittot@linaro.org>
Tested-and-reviewed-by: default avatarChen Yu <yu.c.chen@intel.com>
Tested-by: default avatarK Prateek Nayak <kprateek.nayak@amd.com>
Link: https://lkml.kernel.org/r/20231019033323.54147-2-yangyicong@huawei.com


Signed-off-by: default avatarJie Liu <liujie375@h-partners.com>
parent 727ea34d
Loading
Loading
Loading
Loading
+7 −0
Original line number Original line Diff line number Diff line
@@ -109,6 +109,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
 */
 */
SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)


/*
 * Domain members share CPU cluster (LLC tags or L2 cache)
 *
 * NEEDS_GROUPS: Clusters are shared between groups.
 */
SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)

/*
/*
 * Domain members share CPU package resources (i.e. caches)
 * Domain members share CPU package resources (i.e. caches)
 *
 *
+7 −1
Original line number Original line Diff line number Diff line
@@ -46,7 +46,7 @@ static inline int cpu_smt_flags(void)
#ifdef CONFIG_SCHED_CLUSTER
#ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void)
static inline int cpu_cluster_flags(void)
{
{
	return SD_SHARE_PKG_RESOURCES;
	return SD_CLUSTER | SD_SHARE_PKG_RESOURCES;
}
}
#endif
#endif


@@ -189,6 +189,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);


bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_resources(int this_cpu, int that_cpu);


typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
typedef int (*sched_domain_flags_f)(void);
@@ -243,6 +244,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
	return true;
	return true;
}
}


static inline bool cpus_share_resources(int this_cpu, int that_cpu)
{
	return true;
}

#endif	/* !CONFIG_SMP */
#endif	/* !CONFIG_SMP */


#ifndef arch_scale_cpu_capacity
#ifndef arch_scale_cpu_capacity
+12 −0
Original line number Original line Diff line number Diff line
@@ -3021,6 +3021,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
	return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}
}


/*
 * Whether CPUs are share cache resources, which means LLC on non-cluster
 * machines and LLC tag or L2 on machines with clusters.
 */
bool cpus_share_resources(int this_cpu, int that_cpu)
{
	if (this_cpu == that_cpu)
		return true;

	return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu);
}

static inline bool ttwu_queue_cond(int cpu)
static inline bool ttwu_queue_cond(int cpu)
{
{
	/*
	/*
+1 −0
Original line number Original line Diff line number Diff line
@@ -1844,6 +1844,7 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(int, sd_share_id);
DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
+13 −0
Original line number Original line Diff line number Diff line
@@ -647,6 +647,7 @@ static void destroy_sched_domains(struct sched_domain *sd)
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(int, sd_share_id);
DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
@@ -682,6 +683,17 @@ static void update_top_cache_domain(int cpu)
	per_cpu(sd_llc_id, cpu) = id;
	per_cpu(sd_llc_id, cpu) = id;
	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);
	rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);


	sd = lowest_flag_domain(cpu, SD_CLUSTER);
	if (sd)
		id = cpumask_first(sched_domain_span(sd));

	/*
	 * This assignment should be placed after the sd_llc_id as
	 * we want this id equals to cluster id on cluster machines
	 * but equals to LLC id on non-Cluster machines.
	 */
	per_cpu(sd_share_id, cpu) = id;

	sd = lowest_flag_domain(cpu, SD_NUMA);
	sd = lowest_flag_domain(cpu, SD_NUMA);
	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
	rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);


@@ -1535,6 +1547,7 @@ int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
 */
 */
#define TOPOLOGY_SD_FLAGS		\
#define TOPOLOGY_SD_FLAGS		\
	(SD_SHARE_CPUCAPACITY	|	\
	(SD_SHARE_CPUCAPACITY	|	\
	 SD_CLUSTER		|	\
	 SD_SHARE_PKG_RESOURCES |	\
	 SD_SHARE_PKG_RESOURCES |	\
	 SD_NUMA		|	\
	 SD_NUMA		|	\
	 SD_ASYM_PACKING)
	 SD_ASYM_PACKING)