Commit e583bffe authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'x86-urgent-2023-09-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 fixes from Ingo Molnar:

 - Fix a kexec bug

 - Fix an UML build bug

 - Fix a handful of SRSO related bugs

 - Fix a shadow stacks handling bug & robustify related code

* tag 'x86-urgent-2023-09-22' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/shstk: Add warning for shadow stack double unmap
  x86/shstk: Remove useless clone error handling
  x86/shstk: Handle vfork clone failure correctly
  x86/srso: Fix SBPB enablement for spec_rstack_overflow=off
  x86/srso: Don't probe microcode in a guest
  x86/srso: Set CPUID feature bits independently of bug or mitigation status
  x86/srso: Fix srso_show_state() side effect
  x86/asm: Fix build of UML with KASAN
  x86/mm, kexec, ima: Use memblock_free_late() from ima_free_kexec_buffer()
parents 5b47b576 509ff51e
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -105,6 +105,13 @@
	CFI_POST_PADDING					\
	SYM_FUNC_END(__cfi_##name)

/* UML needs to be able to override memcpy() and friends for KASAN. */
#ifdef CONFIG_UML
# define SYM_FUNC_ALIAS_MEMFUNC	SYM_FUNC_ALIAS_WEAK
#else
# define SYM_FUNC_ALIAS_MEMFUNC	SYM_FUNC_ALIAS
#endif

/* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
#define SYM_TYPED_FUNC_START(name)				\
	SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN)	\
+1 −2
Original line number Diff line number Diff line
@@ -186,7 +186,6 @@ do { \
#else
#define deactivate_mm(tsk, mm)			\
do {						\
	if (!tsk->vfork_done)			\
	shstk_free(tsk);			\
	load_gs_index(0);			\
	loadsegment(fs, 0);			\
+0 −2
Original line number Diff line number Diff line
@@ -683,13 +683,11 @@ extern u16 get_llc_id(unsigned int cpu);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_nodes_per_socket(void)	{ return 0; }
static inline u32 amd_get_highest_perf(void)		{ return 0; }
static inline bool cpu_has_ibpb_brtype_microcode(void)	{ return false; }
static inline void amd_clear_divider(void)		{ }
static inline void amd_check_microcode(void)		{ }
#endif
+9 −19
Original line number Diff line number Diff line
@@ -766,6 +766,15 @@ static void early_init_amd(struct cpuinfo_x86 *c)

	if (cpu_has(c, X86_FEATURE_TOPOEXT))
		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;

	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
		else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
			setup_force_cpu_cap(X86_FEATURE_SBPB);
		}
	}
}

static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -1301,25 +1310,6 @@ void amd_check_microcode(void)
	on_each_cpu(zenbleed_check_cpu, NULL, 1);
}

bool cpu_has_ibpb_brtype_microcode(void)
{
	switch (boot_cpu_data.x86) {
	/* Zen1/2 IBPB flushes branch type predictions too. */
	case 0x17:
		return boot_cpu_has(X86_FEATURE_AMD_IBPB);
	case 0x19:
		/* Poke the MSR bit on Zen3/4 to check its presence. */
		if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
			setup_force_cpu_cap(X86_FEATURE_SBPB);
			return true;
		} else {
			return false;
		}
	default:
		return false;
	}
}

/*
 * Issue a DIV 0/1 insn to clear any division data from previous DIV
 * operations.
+3 −14
Original line number Diff line number Diff line
@@ -2404,26 +2404,15 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);

static void __init srso_select_mitigation(void)
{
	bool has_microcode;
	bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);

	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
		goto pred_cmd;

	/*
	 * The first check is for the kernel running as a guest in order
	 * for guests to verify whether IBPB is a viable mitigation.
	 */
	has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
	if (!has_microcode) {
		pr_warn("IBPB-extending microcode not applied!\n");
		pr_warn(SRSO_NOTICE);
	} else {
		/*
		 * Enable the synthetic (even if in a real CPUID leaf)
		 * flags for guests.
		 */
		setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);

		/*
		 * Zen1/2 with SMT off aren't vulnerable after the right
		 * IBPB microcode has been applied.
@@ -2444,7 +2433,7 @@ static void __init srso_select_mitigation(void)

	switch (srso_cmd) {
	case SRSO_CMD_OFF:
		return;
		goto pred_cmd;

	case SRSO_CMD_MICROCODE:
		if (has_microcode) {
@@ -2717,7 +2706,7 @@ static ssize_t srso_show_state(char *buf)

	return sysfs_emit(buf, "%s%s\n",
			  srso_strings[srso_mitigation],
			  (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
			  boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
}

static ssize_t gds_show_state(char *buf)
Loading