Commit 30f89e52 authored by Juergen Gross's avatar Juergen Gross Committed by Borislav Petkov
Browse files

x86/cacheinfo: Switch cache_ap_init() to hotplug callback



Instead of explicitly calling cache_ap_init() in
identify_secondary_cpu() use a CPU hotplug callback instead. By
registering the callback only after having started the non-boot CPUs
and initializing cache_aps_delayed_init with "true", calling
set_cache_aps_delayed_init() at boot time can be dropped.

It should be noted that this change results in cache_ap_init() being
called a little bit later when hotplugging CPUs. By using a new
hotplug slot right at the start of the low level bringup this is not
problematic, as no operations requiring a specific caching mode are
performed that early in CPU initialization.

Suggested-by: default avatarBorislav Petkov <bp@alien8.de>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-15-jgross@suse.com


Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent adfe7512
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -16,7 +16,6 @@ void set_cache_aps_delayed_init(bool val);
bool get_cache_aps_delayed_init(void);
void cache_bp_init(void);
void cache_bp_restore(void);
void cache_ap_init(void);
void cache_aps_init(void);

#endif /* _ASM_X86_CACHEINFO_H */
+15 −3
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/cacheinfo.h>
#include <linux/cpu.h>
#include <linux/cpuhotplug.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/sysfs.h>
@@ -1139,7 +1140,7 @@ static void cache_cpu_init(void)
	local_irq_restore(flags);
}

static bool cache_aps_delayed_init;
static bool cache_aps_delayed_init = true;

void set_cache_aps_delayed_init(bool val)
{
@@ -1174,10 +1175,10 @@ void cache_bp_restore(void)
		cache_cpu_init();
}

void cache_ap_init(void)
static int cache_ap_init(unsigned int cpu)
{
	if (!memory_caching_control || get_cache_aps_delayed_init())
		return;
		return 0;

	/*
	 * Ideally we should hold mtrr_mutex here to avoid MTRR entries
@@ -1194,6 +1195,8 @@ void cache_ap_init(void)
	 */
	stop_machine_from_inactive_cpu(cache_rendezvous_handler, NULL,
				       cpu_callout_mask);

	return 0;
}

/*
@@ -1207,3 +1210,12 @@ void cache_aps_init(void)
	stop_machine(cache_rendezvous_handler, NULL, cpu_online_mask);
	set_cache_aps_delayed_init(false);
}

static int __init cache_ap_register(void)
{
	cpuhp_setup_state_nocalls(CPUHP_AP_CACHECTRL_STARTING,
				  "x86/cachectrl:starting",
				  cache_ap_init, NULL);
	return 0;
}
core_initcall(cache_ap_register);
+0 −1
Original line number Diff line number Diff line
@@ -1949,7 +1949,6 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_32
	enable_sep_cpu();
#endif
	cache_ap_init();
	validate_apic_and_package_id(c);
	x86_spec_ctrl_setup_ap();
	update_srbds_msr();
+0 −2
Original line number Diff line number Diff line
@@ -1429,8 +1429,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)

	uv_system_init();

	set_cache_aps_delayed_init(true);

	smp_quirk_init_udelay();

	speculative_store_bypass_ht_init();
+1 −0
Original line number Diff line number Diff line
@@ -140,6 +140,7 @@ enum cpuhp_state {
	 */
	CPUHP_AP_IDLE_DEAD,
	CPUHP_AP_OFFLINE,
	CPUHP_AP_CACHECTRL_STARTING,
	CPUHP_AP_SCHED_STARTING,
	CPUHP_AP_RCUTREE_DYING,
	CPUHP_AP_CPU_PM_STARTING,