Unverified Commit c005c632 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3745 【OLK-6.6】Support SMT control on arm64

Merge Pull Request from: @liujie-248683921 
 
The core CPU control framework supports runtime SMT control which is not yet supported on arm64. Besides the general vulnerabilities concerns we want this runtime control on our arm64 server for:

- better single CPU performance in some cases
- saving overall power consumption

This patchset implements it in the following aspects:

- implements the basic support in arch_topology driver
- support retrieve SMT thread number on OF based system
- support retrieve SMT thread number on ACPI based system
- select HOTPLUG_SMT for arm64

https://gitee.com/openeuler/kernel/issues/I8TAPW

Tests has been done on our real ACPI based arm64 server and on ACPI/OF based QEMU VMs. 
 
Link:https://gitee.com/openeuler/kernel/pulls/3745

 

Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Reviewed-by: default avatarWeilong Chen <chenweilong@huawei.com>
Reviewed-by: default avatarLiu Chao <liuchao173@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 8b95abd8 0dc61bb6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -235,6 +235,7 @@ config ARM64
	select HAVE_KRETPROBES
	select HAVE_GENERIC_VDSO
	select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU
	select HOTPLUG_SMT if (SMP && HOTPLUG_CPU)
	select IRQ_DOMAIN
	select IRQ_FORCED_THREADING
	select KASAN_VMALLOC if KASAN
+1 −0
Original line number Diff line number Diff line
@@ -730,6 +730,7 @@ CONFIG_KVM=y
#
# General architecture-dependent options
#
CONFIG_HOTPLUG_SMT=y
CONFIG_HOTPLUG_CORE_SYNC=y
CONFIG_HOTPLUG_CORE_SYNC_DEAD=y
CONFIG_KPROBES=y
+23 −0
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/xarray.h>

#include <asm/cpu.h>
#include <asm/cputype.h>
@@ -43,11 +44,16 @@ static bool __init acpi_cpu_is_threaded(int cpu)
 */
int __init parse_acpi_topology(void)
{
	int thread_num, max_smt_thread_num = 1;
	struct xarray core_threads;
	int cpu, topology_id;
	void *entry;

	if (acpi_disabled)
		return 0;

	xa_init(&core_threads);

	for_each_possible_cpu(cpu) {
		topology_id = find_acpi_cpu_topology(cpu, 0);
		if (topology_id < 0)
@@ -57,6 +63,20 @@ int __init parse_acpi_topology(void)
			cpu_topology[cpu].thread_id = topology_id;
			topology_id = find_acpi_cpu_topology(cpu, 1);
			cpu_topology[cpu].core_id   = topology_id;

			entry = xa_load(&core_threads, topology_id);
			if (!entry) {
				xa_store(&core_threads, topology_id,
					 xa_mk_value(1), GFP_KERNEL);
			} else {
				thread_num = xa_to_value(entry);
				thread_num++;
				xa_store(&core_threads, topology_id,
					 xa_mk_value(thread_num), GFP_KERNEL);

				if (thread_num > max_smt_thread_num)
					max_smt_thread_num = thread_num;
			}
		} else {
			cpu_topology[cpu].thread_id  = -1;
			cpu_topology[cpu].core_id    = topology_id;
@@ -67,6 +87,9 @@ int __init parse_acpi_topology(void)
		cpu_topology[cpu].package_id = topology_id;
	}

	topology_smt_set_num_threads(max_smt_thread_num);

	xa_destroy(&core_threads);
	return 0;
}
#endif
+45 −0
Original line number Diff line number Diff line
@@ -540,6 +540,13 @@ static int __init parse_core(struct device_node *core, int package_id,
		i++;
	} while (t);

	/*
	 * We've already gotten threads number in this core, update the SMT
	 * threads number when necessary.
	 */
	if (i > topology_smt_get_num_threads())
		topology_smt_set_num_threads(i);

	cpu = get_cpu_for_node(core);
	if (cpu >= 0) {
		if (!leaf) {
@@ -743,6 +750,36 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
	return &cpu_topology[cpu].cluster_sibling;
}

#ifdef CONFIG_HOTPLUG_SMT

/* Maximum threads number per-Core */
static unsigned int topology_smt_num_threads = 1;

void __init topology_smt_set_num_threads(unsigned int num_threads)
{
	topology_smt_num_threads = num_threads;
}

unsigned int __init topology_smt_get_num_threads(void)
{
	return topology_smt_num_threads;
}

/*
 * On SMT Hotplug the primary thread of the SMT won't be disabled. For x86 they
 * seem to have a primary thread for special purpose. For other arthitectures
 * like arm64 there's no such restriction for a primary thread, so make the
 * first thread in the SMT as the primary thread.
 */
bool topology_is_primary_thread(unsigned int cpu)
{
	if (cpu == cpumask_first(topology_sibling_cpumask(cpu)))
		return true;

	return false;
}
#endif

void update_siblings_masks(unsigned int cpuid)
{
	struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -855,6 +892,14 @@ void __init init_cpu_topology(void)
		reset_cpu_topology();
	}

	/*
	 * By this stage we get to know whether we support SMT or not, update
	 * the information for the core. We don't support
	 * CONFIG_SMT_NUM_THREADS_DYNAMIC so make the max_threads == num_threads.
	 */
	cpu_smt_set_num_threads(topology_smt_get_num_threads(),
				topology_smt_get_num_threads());

	for_each_possible_cpu(cpu) {
		ret = fetch_cache_info(cpu);
		if (!ret)
+14 −0
Original line number Diff line number Diff line
@@ -92,6 +92,20 @@ void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
int parse_acpi_topology(void);

#ifdef CONFIG_HOTPLUG_SMT
bool topology_is_primary_thread(unsigned int cpu);
void topology_smt_set_num_threads(unsigned int num_threads);
unsigned int topology_smt_get_num_threads(void);
#else
static inline bool topology_is_primary_thread(unsigned int cpu) { return false; }
static inline void topology_smt_set_num_threads(unsigned int num_threads) { }
static inline unsigned int topology_smt_get_num_threads(void)
{
	return 1;
}
#endif

#endif

#endif /* _LINUX_ARCH_TOPOLOGY_H_ */