Unverified Commit d7903642 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!2431 Backport microcode patches from upstream 5.10.173

Merge Pull Request from: @allen-shi 
 
This PR is to backport microcode related patches from upstream 5.10.173.

The first 4 commits are from upstream 5.10.173:
8e83e1619fac9151b1515177f5066ae67b0cbda2 x86/microcode: Adjust late loading result reporting message
511e27e5fdd658e6cb06b4947fb0d3ac76163776 x86/microcode: Check CPU capabilities after late microcode update correctly
89e848bb4aa140e701eb0d017736ce5d1ee198da x86/microcode: Add a parameter to microcode_check() to store CPU capabilities
e6230806681fa25b7a3829c021d5b33d68c9bd09 x86/microcode: Default-disable late loading

The 5th commit is to enable microcode late loading as default for x86 in openeuler_defconfig.

 **Intel-Kernel Issue** 
[#I873BU](https://gitee.com/openeuler/intel-kernel/issues/I873BU)

 **Test** 
Built and run the kernel successfully.
Microcode late loading function is verified.

 **Known Issue** 
N/A

 **Default config change** 
+CONFIG_MICROCODE_LATE_LOADING=y 
 
Link:https://gitee.com/openeuler/kernel/pulls/2431

 

Reviewed-by: default avatarJason Zeng <jason.zeng@intel.com>
Reviewed-by: default avatarsanglipeng <sanglipeng1@jd.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents ab2aaa31 9ed60efc
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -1357,6 +1357,17 @@ config MICROCODE_OLD_INTERFACE
	  should've switched to the early loading method with the initrd or
	  builtin microcode by now: Documentation/x86/microcode.rst

config MICROCODE_LATE_LOADING
	bool "Late microcode loading (DANGEROUS)"
	default n
	depends on MICROCODE
	help
	  Loading microcode late, when the system is up and executing instructions
	  is a tricky business and should be avoided if possible. Just the sequence
	  of synchronizing all cores and SMT threads is one fragile dance which does
	  not guarantee that cores might not softlock after the loading. Therefore,
	  use this at your own risk. Late loading taints the kernel too.

config X86_MSR
	tristate "/dev/cpu/*/msr - Model-specific register support"
	help
+1 −0
Original line number Diff line number Diff line
@@ -414,6 +414,7 @@ CONFIG_MICROCODE=y
CONFIG_MICROCODE_INTEL=y
CONFIG_MICROCODE_AMD=y
CONFIG_MICROCODE_OLD_INTERFACE=y
CONFIG_MICROCODE_LATE_LOADING=y
CONFIG_X86_MSR=y
CONFIG_X86_CPUID=y
CONFIG_X86_5LEVEL=y
+2 −1
Original line number Diff line number Diff line
@@ -874,7 +874,8 @@ bool xen_set_default_idle(void);
#endif

void __noreturn stop_this_cpu(void *dummy);
void microcode_check(void);
void microcode_check(struct cpuinfo_x86 *prev_info);
void store_cpu_caps(struct cpuinfo_x86 *info);

enum l1tf_mitigations {
	L1TF_MITIGATION_OFF,
+32 −14
Original line number Diff line number Diff line
@@ -2176,36 +2176,54 @@ void cpu_init_secondary(void)
}
#endif

/*
#ifdef CONFIG_MICROCODE_LATE_LOADING
/**
 * store_cpu_caps() - Store a snapshot of CPU capabilities
 * @curr_info: Pointer where to store it
 *
 * Returns: None
 */
void store_cpu_caps(struct cpuinfo_x86 *curr_info)
{
	/* Reload CPUID max function as it might've changed. */
	curr_info->cpuid_level = cpuid_eax(0);

	/* Copy all capability leafs and pick up the synthetic ones. */
	memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
	       sizeof(curr_info->x86_capability));

	/* Get the hardware CPUID leafs */
	get_cpu_cap(curr_info);
}

/**
 * microcode_check() - Check if any CPU capabilities changed after an update.
 * @prev_info:	CPU capabilities stored before an update.
 *
 * The microcode loader calls this upon late microcode load to recheck features,
 * only when microcode has been updated. Caller holds microcode_mutex and CPU
 * hotplug lock.
 *
 * Return: None
 */
void microcode_check(void)
void microcode_check(struct cpuinfo_x86 *prev_info)
{
	struct cpuinfo_x86 info;
	struct cpuinfo_x86 curr_info;

	perf_check_microcode();

	amd_check_microcode();
	/* Reload CPUID max function as it might've changed. */
	info.cpuid_level = cpuid_eax(0);

	/*
	 * Copy all capability leafs to pick up the synthetic ones so that
	 * memcmp() below doesn't fail on that. The ones coming from CPUID will
	 * get overwritten in get_cpu_cap().
	 */
	memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
	store_cpu_caps(&curr_info);

	get_cpu_cap(&info);

	if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
	if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
		    sizeof(prev_info->x86_capability)))
		return;

	pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
	pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
}
#endif

/*
 * Invoked from core CPU hotplug code after hotplug operations
+21 −6
Original line number Diff line number Diff line
@@ -503,6 +503,7 @@ static void __exit microcode_dev_exit(void)
/* fake device for request_firmware */
static struct platform_device	*microcode_pdev;

#ifdef CONFIG_MICROCODE_LATE_LOADING
/*
 * Late loading dance. Why the heavy-handed stomp_machine effort?
 *
@@ -618,16 +619,26 @@ static int __reload_late(void *info)
static int microcode_reload_late(void)
{
	int old = boot_cpu_data.microcode, ret;
	struct cpuinfo_x86 prev_info;

	atomic_set(&late_cpus_in,  0);
	atomic_set(&late_cpus_out, 0);

	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
	if (ret == 0)
		microcode_check();
	/*
	 * Take a snapshot before the microcode update in order to compare and
	 * check whether any bits changed after an update.
	 */
	store_cpu_caps(&prev_info);

	pr_info("Reload completed, microcode revision: 0x%x -> 0x%x\n",
	ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
	if (!ret) {
		pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n",
			old, boot_cpu_data.microcode);
		microcode_check(&prev_info);
	} else {
		pr_info("Reload failed, current microcode revision: 0x%x\n",
			boot_cpu_data.microcode);
	}

	return ret;
}
@@ -671,6 +682,9 @@ static ssize_t reload_store(struct device *dev,
	return ret;
}

static DEVICE_ATTR_WO(reload);
#endif

static ssize_t version_show(struct device *dev,
			struct device_attribute *attr, char *buf)
{
@@ -687,7 +701,6 @@ static ssize_t pf_show(struct device *dev,
	return sprintf(buf, "0x%x\n", uci->cpu_sig.pf);
}

static DEVICE_ATTR_WO(reload);
static DEVICE_ATTR(version, 0444, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);

@@ -840,7 +853,9 @@ static int mc_cpu_down_prep(unsigned int cpu)
}

static struct attribute *cpu_root_microcode_attrs[] = {
#ifdef CONFIG_MICROCODE_LATE_LOADING
	&dev_attr_reload.attr,
#endif
	NULL
};