Commit 72730bfc authored by Srikar Dronamraju's avatar Srikar Dronamraju Committed by Michael Ellerman
Browse files

powerpc/smp: Create coregroup domain



Add percpu coregroup maps and masks to create coregroup domain.
If a coregroup doesn't exist, the coregroup domain will be degenerated
in favour of SMT/CACHE domain. Do note this patch is only creating stubs
for cpu_to_coregroup_id. The actual cpu_to_coregroup_id implementation
would be in a subsequent patch.

Signed-off-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Reviewed-by: default avatarGautham R. Shenoy <ego@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200810071834.92514-10-srikar@linux.vnet.ibm.com
parent 6e086302
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -88,12 +88,22 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)

#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int find_and_online_cpu_nid(int cpu);
extern int cpu_to_coregroup_id(int cpu);
#else
static inline int find_and_online_cpu_nid(int cpu)
{
	return 0;
}

static inline int cpu_to_coregroup_id(int cpu)
{
#ifdef CONFIG_SMP
	return cpu_to_core_id(cpu);
#else
	return 0;
#endif
}

#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */

#include <asm-generic/topology.h>
+53 −1
Original line number Diff line number Diff line
@@ -81,12 +81,22 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);

EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
EXPORT_SYMBOL_GPL(has_big_cores);

enum {
#ifdef CONFIG_SCHED_SMT
	smt_idx,
#endif
	cache_idx,
	mc_idx,
	die_idx,
};

#define MAX_THREAD_LIST_SIZE	8
#define THREAD_GROUP_SHARE_L1   1
struct thread_groups {
@@ -862,11 +872,27 @@ static const struct cpumask *smallcore_smt_mask(int cpu)
}
#endif

static struct cpumask *cpu_coregroup_mask(int cpu)
{
	return per_cpu(cpu_coregroup_map, cpu);
}

static bool has_coregroup_support(void)
{
	return coregroup_enabled;
}

static const struct cpumask *cpu_mc_mask(int cpu)
{
	return cpu_coregroup_mask(cpu);
}

static struct sched_domain_topology_level powerpc_topology[] = {
#ifdef CONFIG_SCHED_SMT
	{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
#endif
	{ shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
	{ cpu_mc_mask, SD_INIT_NAME(MC) },
	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
	{ NULL, },
};
@@ -913,6 +939,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
					GFP_KERNEL, cpu_to_node(cpu));
		zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
					GFP_KERNEL, cpu_to_node(cpu));
		if (has_coregroup_support())
			zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
						GFP_KERNEL, cpu_to_node(cpu));

#ifdef CONFIG_NEED_MULTIPLE_NODES
		/*
		 * numa_node_id() works after this.
@@ -930,6 +960,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
	cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
	cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));

	if (has_coregroup_support())
		cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));

	init_big_cores();
	if (has_big_cores) {
		cpumask_set_cpu(boot_cpuid,
@@ -1234,6 +1267,8 @@ static void remove_cpu_from_masks(int cpu)
		set_cpus_unrelated(cpu, i, cpu_sibling_mask);
		if (has_big_cores)
			set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
		if (has_coregroup_support())
			set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
	}
}
#endif
@@ -1294,6 +1329,20 @@ static void add_cpu_to_masks(int cpu)
	add_cpu_to_smallcore_masks(cpu);
	update_mask_by_l2(cpu, cpu_l2_cache_mask);

	if (has_coregroup_support()) {
		int coregroup_id = cpu_to_coregroup_id(cpu);

		cpumask_set_cpu(cpu, cpu_coregroup_mask(cpu));
		for_each_cpu_and(i, cpu_online_mask, cpu_cpu_mask(cpu)) {
			int fcpu = cpu_first_thread_sibling(i);

			if (fcpu == first_thread)
				set_cpus_related(cpu, i, cpu_coregroup_mask);
			else if (coregroup_id == cpu_to_coregroup_id(i))
				set_cpus_related(cpu, i, cpu_coregroup_mask);
		}
	}

	if (pkg_id == -1) {
		struct cpumask *(*mask)(int) = cpu_sibling_mask;

@@ -1388,9 +1437,12 @@ static void fixup_topology(void)
#ifdef CONFIG_SCHED_SMT
	if (has_big_cores) {
		pr_info("Big cores detected but using small core scheduling\n");
		powerpc_topology[0].mask = smallcore_smt_mask;
		powerpc_topology[smt_idx].mask = smallcore_smt_mask;
	}
#endif

	if (!has_coregroup_support())
		powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
}

void __init smp_cpus_done(unsigned int max_cpus)
+5 −0
Original line number Diff line number Diff line
@@ -1245,6 +1245,11 @@ int find_and_online_cpu_nid(int cpu)
	return new_nid;
}

int cpu_to_coregroup_id(int cpu)
{
	return cpu_to_core_id(cpu);
}

static int topology_update_init(void)
{
	topology_inited = 1;