Commit b6be002b authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/entry: Move nmi entry/exit into common code



Lockdep state handling on NMI enter and exit is nothing specific to X86. It's
not any different on other architectures. Also the extra state type is not
necessary, irqentry_state_t can carry the necessary information as well.

Move it to common code and extend irqentry_state_t to carry lockdep state.

[ Ira: Make exit_rcu and lockdep a union as they are mutually exclusive
  between the IRQ and NMI exceptions, and add kernel documentation for
  struct irqentry_state_t ]

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20201102205320.1458656-7-ira.weiny@intel.com
parent 01be83ee
Loading
Loading
Loading
Loading
+0 −34
Original line number Diff line number Diff line
@@ -209,40 +209,6 @@ SYSCALL_DEFINE0(ni_syscall)
	return -ENOSYS;
}

noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
{
	bool irq_state = lockdep_hardirqs_enabled();

	__nmi_enter();
	lockdep_hardirqs_off(CALLER_ADDR0);
	lockdep_hardirq_enter();
	rcu_nmi_enter();

	instrumentation_begin();
	trace_hardirqs_off_finish();
	ftrace_nmi_enter();
	instrumentation_end();

	return irq_state;
}

noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
{
	instrumentation_begin();
	ftrace_nmi_exit();
	if (restore) {
		trace_hardirqs_on_prepare();
		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
	}
	instrumentation_end();

	rcu_nmi_exit();
	lockdep_hardirq_exit();
	if (restore)
		lockdep_hardirqs_on(CALLER_ADDR0);
	__nmi_exit();
}

#ifdef CONFIG_XEN_PV
#ifndef CONFIG_PREEMPTION
/*
+0 −3
Original line number Diff line number Diff line
@@ -11,9 +11,6 @@

#include <asm/irq_stack.h>

bool idtentry_enter_nmi(struct pt_regs *regs);
void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);

/**
 * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
 *		      No error code pushed by hardware
+3 −3
Original line number Diff line number Diff line
@@ -1983,7 +1983,7 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;

static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
{
	bool irq_state;
	irqentry_state_t irq_state;

	WARN_ON_ONCE(user_mode(regs));

@@ -1995,7 +1995,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
	    mce_check_crashing_cpu())
		return;

	irq_state = idtentry_enter_nmi(regs);
	irq_state = irqentry_nmi_enter(regs);
	/*
	 * The call targets are marked noinstr, but objtool can't figure
	 * that out because it's an indirect call. Annotate it.
@@ -2006,7 +2006,7 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
	if (regs->flags & X86_EFLAGS_IF)
		trace_hardirqs_on_prepare();
	instrumentation_end();
	idtentry_exit_nmi(regs, irq_state);
	irqentry_nmi_exit(regs, irq_state);
}

static __always_inline void exc_machine_check_user(struct pt_regs *regs)
+3 −3
Original line number Diff line number Diff line
@@ -475,7 +475,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);

DEFINE_IDTENTRY_RAW(exc_nmi)
{
	bool irq_state;
	irqentry_state_t irq_state;

	/*
	 * Re-enable NMIs right here when running as an SEV-ES guest. This might
@@ -502,14 +502,14 @@ DEFINE_IDTENTRY_RAW(exc_nmi)

	this_cpu_write(nmi_dr7, local_db_save());

	irq_state = idtentry_enter_nmi(regs);
	irq_state = irqentry_nmi_enter(regs);

	inc_irq_stat(__nmi_count);

	if (!ignore_nmis)
		default_do_nmi(regs);

	idtentry_exit_nmi(regs, irq_state);
	irqentry_nmi_exit(regs, irq_state);

	local_db_restore(this_cpu_read(nmi_dr7));

+7 −6
Original line number Diff line number Diff line
@@ -405,7 +405,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
	}
#endif

	idtentry_enter_nmi(regs);
	irqentry_nmi_enter(regs);
	instrumentation_begin();
	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);

@@ -651,12 +651,13 @@ DEFINE_IDTENTRY_RAW(exc_int3)
		instrumentation_end();
		irqentry_exit_to_user_mode(regs);
	} else {
		bool irq_state = idtentry_enter_nmi(regs);
		irqentry_state_t irq_state = irqentry_nmi_enter(regs);

		instrumentation_begin();
		if (!do_int3(regs))
			die("int3", regs, 0);
		instrumentation_end();
		idtentry_exit_nmi(regs, irq_state);
		irqentry_nmi_exit(regs, irq_state);
	}
}

@@ -851,7 +852,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
	 * includes the entry stack is excluded for everything.
	 */
	unsigned long dr7 = local_db_save();
	bool irq_state = idtentry_enter_nmi(regs);
	irqentry_state_t irq_state = irqentry_nmi_enter(regs);
	instrumentation_begin();

	/*
@@ -908,7 +909,7 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
		regs->flags &= ~X86_EFLAGS_TF;
out:
	instrumentation_end();
	idtentry_exit_nmi(regs, irq_state);
	irqentry_nmi_exit(regs, irq_state);

	local_db_restore(dr7);
}
@@ -926,7 +927,7 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,

	/*
	 * NB: We can't easily clear DR7 here because
	 * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
	 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
	 * user memory, etc.  This means that a recursive #DB is possible.  If
	 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
	 * Since we're not on the IST stack right now, everything will be
Loading