Commit d025b7ba authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Borislav Petkov (AMD)
Browse files

x86/cpu: Rename original retbleed methods



Rename the original retbleed return thunk and untrain_ret to
retbleed_return_thunk() and retbleed_untrain_ret().

No functional changes.

Suggested-by: default avatarJosh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230814121148.909378169@infradead.org
parent d43490d0
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -272,7 +272,7 @@
.endm

#ifdef CONFIG_CPU_UNRET_ENTRY
#define CALL_ZEN_UNTRAIN_RET	"call zen_untrain_ret"
#define CALL_ZEN_UNTRAIN_RET	"call retbleed_untrain_ret"
#else
#define CALL_ZEN_UNTRAIN_RET	""
#endif
@@ -282,7 +282,7 @@
 * return thunk isn't mapped into the userspace tables (then again, AMD
 * typically has NO_MELTDOWN).
 *
 * While zen_untrain_ret() doesn't clobber anything but requires stack,
 * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
 * entry_ibpb() will clobber AX, CX, DX.
 *
 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
@@ -347,11 +347,11 @@ extern void __x86_return_thunk(void);
static inline void __x86_return_thunk(void) {}
#endif

extern void zen_return_thunk(void);
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);

extern void zen_untrain_ret(void);
extern void retbleed_untrain_ret(void);
extern void srso_untrain_ret(void);
extern void srso_untrain_ret_alias(void);

+1 −1
Original line number Diff line number Diff line
@@ -1043,7 +1043,7 @@ static void __init retbleed_select_mitigation(void)
		setup_force_cpu_cap(X86_FEATURE_UNRET);

		if (IS_ENABLED(CONFIG_RETHUNK))
			x86_return_thunk = zen_return_thunk;
			x86_return_thunk = retbleed_return_thunk;

		if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
		    boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+1 −1
Original line number Diff line number Diff line
@@ -521,7 +521,7 @@ INIT_PER_CPU(irq_stack_backing_store);
#endif

#ifdef CONFIG_RETHUNK
. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned");
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
#endif

+15 −15
Original line number Diff line number Diff line
@@ -188,32 +188,32 @@ SYM_CODE_END(srso_alias_return_thunk)

/*
 * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
 * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for
 * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
 *    alignment within the BTB.
 * 2) The instruction at zen_untrain_ret must contain, and not
 * 2) The instruction at retbleed_untrain_ret must contain, and not
 *    end with, the 0xc3 byte of the RET.
 * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
 *    from re-poisioning the BTB prediction.
 */
	.align 64
	.skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc
SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
	.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
	ANNOTATE_NOENDBR
	/*
	 * As executed from zen_untrain_ret, this is:
	 * As executed from retbleed_untrain_ret, this is:
	 *
	 *   TEST $0xcc, %bl
	 *   LFENCE
	 *   JMP zen_return_thunk
	 *   JMP retbleed_return_thunk
	 *
	 * Executing the TEST instruction has a side effect of evicting any BTB
	 * prediction (potentially attacker controlled) attached to the RET, as
	 * zen_return_thunk + 1 isn't an instruction boundary at the moment.
	 * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
	 */
	.byte	0xf6

	/*
	 * As executed from zen_return_thunk, this is a plain RET.
	 * As executed from retbleed_return_thunk, this is a plain RET.
	 *
	 * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
	 *
@@ -225,13 +225,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
	 * With SMT enabled and STIBP active, a sibling thread cannot poison
	 * RET's prediction to a type of its choice, but can evict the
	 * prediction due to competitive sharing. If the prediction is
	 * evicted, zen_return_thunk will suffer Straight Line Speculation
	 * evicted, retbleed_return_thunk will suffer Straight Line Speculation
	 * which will be contained safely by the INT3.
	 */
SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL)
SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
	ret
	int3
SYM_CODE_END(zen_return_thunk)
SYM_CODE_END(retbleed_return_thunk)

	/*
	 * Ensure the TEST decoding / BTB invalidation is complete.
@@ -242,13 +242,13 @@ SYM_CODE_END(zen_return_thunk)
	 * Jump back and execute the RET in the middle of the TEST instruction.
	 * INT3 is for SLS protection.
	 */
	jmp zen_return_thunk
	jmp retbleed_return_thunk
	int3
SYM_FUNC_END(zen_untrain_ret)
__EXPORT_THUNK(zen_untrain_ret)
SYM_FUNC_END(retbleed_untrain_ret)
__EXPORT_THUNK(retbleed_untrain_ret)

/*
 * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
 * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
 * above. On kernel entry, srso_untrain_ret() is executed which is a
 *
 * movabs $0xccccc30824648d48,%rax
+1 −1
Original line number Diff line number Diff line
@@ -829,6 +829,6 @@ bool arch_is_rethunk(struct symbol *sym)

bool arch_is_embedded_insn(struct symbol *sym)
{
	return !strcmp(sym->name, "zen_return_thunk") ||
	return !strcmp(sym->name, "retbleed_return_thunk") ||
	       !strcmp(sym->name, "srso_safe_ret");
}
Loading