Commit 1b5277c0 authored by Borislav Petkov (AMD)'s avatar Borislav Petkov (AMD)
Browse files

x86/srso: Add SRSO_NO support



Add support for the CPUID flag which denotes that the CPU is not
affected by SRSO.

Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
parent 79113e40
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -445,7 +445,9 @@
#define X86_FEATURE_AUTOIBRS		(20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR	(20*32+ 9) /* "" SMM_CTL MSR is not present */

#define X86_FEATURE_SBPB		(20*32+27) /* "" Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE		(20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO		(20*32+29) /* "" CPU is not affected by SRSO */

/*
 * BUG word(s)
+1 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@

#define MSR_IA32_PRED_CMD		0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB			BIT(0)	   /* Indirect Branch Prediction Barrier */
#define PRED_CMD_SBPB			BIT(7)	   /* Selective Branch Prediction Barrier */

#define MSR_PPIN_CTL			0x0000004e
#define MSR_PPIN			0x0000004f
+3 −3
Original line number Diff line number Diff line
@@ -492,11 +492,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
		: "memory");
}

extern u64 x86_pred_cmd;

static inline void indirect_branch_prediction_barrier(void)
{
	u64 val = PRED_CMD_IBPB;

	alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
	alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
}

/* The Intel SPEC CTRL MSR base value cache */
+6 −6
Original line number Diff line number Diff line
@@ -1240,12 +1240,12 @@ bool cpu_has_ibpb_brtype_microcode(void)
{
	u8 fam = boot_cpu_data.x86;

	if (fam == 0x17) {
	/* Zen1/2 IBPB flushes branch type predictions too. */
	if (fam == 0x17)
		return boot_cpu_has(X86_FEATURE_AMD_IBPB);
	} else if (fam == 0x19) {
		return false;
	}

	/* Poke the MSR bit on Zen3/4 to check its presence. */
	else if (fam == 0x19)
		return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB);
	else
		return false;
}
+20 −4
Original line number Diff line number Diff line
@@ -57,6 +57,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);

u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
EXPORT_SYMBOL_GPL(x86_pred_cmd);

static DEFINE_MUTEX(spec_ctrl_mutex);

/* Update SPEC_CTRL MSR and its cached copy unconditionally */
@@ -2236,7 +2239,7 @@ static void __init srso_select_mitigation(void)
	bool has_microcode;

	if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
		return;
		goto pred_cmd;

	/*
	 * The first check is for the kernel running as a guest in order
@@ -2249,9 +2252,18 @@ static void __init srso_select_mitigation(void)
	} else {
		/*
		 * Enable the synthetic (even if in a real CPUID leaf)
		 * flag for guests.
		 * flags for guests.
		 */
		setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
		setup_force_cpu_cap(X86_FEATURE_SBPB);

		/*
		 * Zen1/2 with SMT off aren't vulnerable after the right
		 * IBPB microcode has been applied.
		 */
		if ((boot_cpu_data.x86 < 0x19) &&
		    (cpu_smt_control == CPU_SMT_DISABLED))
			setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
	}

	switch (srso_cmd) {
@@ -2274,16 +2286,20 @@ static void __init srso_select_mitigation(void)
			srso_mitigation = SRSO_MITIGATION_SAFE_RET;
		} else {
			pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
			return;
			goto pred_cmd;
		}
		break;

	default:
		break;

	}

	pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));

pred_cmd:
	if (boot_cpu_has(X86_FEATURE_SRSO_NO) ||
	    srso_cmd == SRSO_CMD_OFF)
		x86_pred_cmd = PRED_CMD_SBPB;
}

#undef pr_fmt
Loading