Commit 3e3f0695 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

x86/ibt: Annotate text references



Annotate away some of the generic code references. This is things
where we take the address of a symbol for exception handling or return
addresses (eg. context switch).

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154318.877758523@infradead.org
parent fe379fa4
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -277,6 +277,7 @@ SYM_FUNC_END(__switch_to_asm)
.pushsection .text, "ax"
SYM_CODE_START(ret_from_fork)
	UNWIND_HINT_EMPTY
	ANNOTATE_NOENDBR // copy_thread
	movq	%rax, %rdi
	call	schedule_tail			/* rdi: 'prev' task parameter */

@@ -569,6 +570,7 @@ __irqentry_text_start:
	.align 16
	.globl __irqentry_text_end
__irqentry_text_end:
	ANNOTATE_NOENDBR

SYM_CODE_START_LOCAL(common_interrupt_return)
SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
@@ -650,6 +652,7 @@ SYM_INNER_LABEL(early_xen_iret_patch, SYM_L_GLOBAL)
#endif

SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL)
	ANNOTATE_NOENDBR // exc_double_fault
	/*
	 * This may fault.  Non-paranoid faults on return to userspace are
	 * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
@@ -744,6 +747,7 @@ SYM_FUNC_START(asm_load_gs_index)
	FRAME_BEGIN
	swapgs
.Lgs_change:
	ANNOTATE_NOENDBR // error_entry
	movl	%edi, %gs
2:	ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
	swapgs
@@ -1322,6 +1326,7 @@ first_nmi:
#endif

repeat_nmi:
	ANNOTATE_NOENDBR // this code
	/*
	 * If there was a nested NMI, the first NMI's iret will return
	 * here. But NMIs are still enabled and we can take another
@@ -1350,6 +1355,7 @@ repeat_nmi:
	.endr
	subq	$(5*8), %rsp
end_repeat_nmi:
	ANNOTATE_NOENDBR // this code

	/*
	 * Everything below this point can be preempted by a nested NMI.
+1 −0
Original line number Diff line number Diff line
@@ -148,6 +148,7 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
	popfq
	jmp	.Lsysenter_flags_fixed
SYM_INNER_LABEL(__end_entry_SYSENTER_compat, SYM_L_GLOBAL)
	ANNOTATE_NOENDBR // is_sysenter_singlestep
SYM_CODE_END(entry_SYSENTER_compat)

/*
+8 −2
Original line number Diff line number Diff line
@@ -713,6 +713,7 @@ asm (
"	.pushsection	.init.text, \"ax\", @progbits\n"
"	.type		int3_magic, @function\n"
"int3_magic:\n"
	ANNOTATE_NOENDBR
"	movl	$1, (%" _ASM_ARG1 ")\n"
	ASM_RET
"	.size		int3_magic, .-int3_magic\n"
@@ -724,16 +725,19 @@ extern void int3_selftest_ip(void); /* defined in asm below */
static int __init
int3_exception_notify(struct notifier_block *self, unsigned long val, void *data)
{
	unsigned long selftest = (unsigned long)&int3_selftest_ip;
	struct die_args *args = data;
	struct pt_regs *regs = args->regs;

	OPTIMIZER_HIDE_VAR(selftest);

	if (!regs || user_mode(regs))
		return NOTIFY_DONE;

	if (val != DIE_INT3)
		return NOTIFY_DONE;

	if (regs->ip - INT3_INSN_SIZE != (unsigned long)&int3_selftest_ip)
	if (regs->ip - INT3_INSN_SIZE != selftest)
		return NOTIFY_DONE;

	int3_emulate_call(regs, (unsigned long)&int3_magic);
@@ -757,7 +761,9 @@ static noinline void __init int3_selftest(void)
	 * INT3 padded with NOP to CALL_INSN_SIZE. The int3_exception_nb
	 * notifier above will emulate CALL for us.
	 */
	asm volatile ("int3_selftest_ip: int3; nop; nop; nop; nop\n\t"
	asm volatile ("int3_selftest_ip:\n\t"
		      ANNOTATE_NOENDBR
		      "    int3; nop; nop; nop; nop\n\t"
		      : ASM_CALL_CONSTRAINT
		      : __ASM_SEL_RAW(a, D) (&val)
		      : "memory");
+4 −0
Original line number Diff line number Diff line
@@ -99,6 +99,7 @@ SYM_CODE_END(startup_64)

SYM_CODE_START(secondary_startup_64)
	UNWIND_HINT_EMPTY
	ANNOTATE_NOENDBR
	/*
	 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
	 * and someone has loaded a mapped page table.
@@ -127,6 +128,7 @@ SYM_CODE_START(secondary_startup_64)
	 */
SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
	UNWIND_HINT_EMPTY
	ANNOTATE_NOENDBR

	/*
	 * Retrieve the modifier (SME encryption mask if SME is active) to be
@@ -192,6 +194,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
	jmp	*%rax
1:
	UNWIND_HINT_EMPTY
	ANNOTATE_NOENDBR // above

	/*
	 * We must switch to a new descriptor in kernel space for the GDT
@@ -299,6 +302,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
	pushq	%rax		# target address in negative space
	lretq
.Lafter_lret:
	ANNOTATE_NOENDBR
SYM_CODE_END(secondary_startup_64)

#include "verify_cpu.S"
+1 −0
Original line number Diff line number Diff line
@@ -1033,6 +1033,7 @@ asm(
	".type __kretprobe_trampoline, @function\n"
	"__kretprobe_trampoline:\n"
#ifdef CONFIG_X86_64
	ANNOTATE_NOENDBR
	/* Push a fake return address to tell the unwinder it's a kretprobe. */
	"	pushq $__kretprobe_trampoline\n"
	UNWIND_HINT_FUNC
Loading