Commit e7f89001 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

x86/irq: Sanitize irq stack tracking



The recursion protection for hard interrupt stacks is an unsigned int per
CPU variable initialized to -1 named __irq_count. 

The irq stack switching is only done when the variable is -1, which creates
worse code than just checking for 0. When the stack switching happens it
uses this_cpu_add/sub(1), but there is no reason to do so. It simply can
use straight writes. This is a historical leftover from the low level ASM
code which used inc and jz to make a decision.

Rename it to hardirq_stack_inuse, make it a bool and use plain stores.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20210210002512.228830141@linutronix.de
parent 15f720aa
Loading
Loading
Loading
Loading
+7 −7
Original line number Diff line number Diff line
@@ -9,7 +9,7 @@
#ifdef CONFIG_X86_64
static __always_inline bool irqstack_active(void)
{
	return __this_cpu_read(irq_count) != -1;
	return __this_cpu_read(hardirq_stack_inuse);
}

void asm_call_on_stack(void *sp, void (*func)(void), void *arg);
@@ -22,9 +22,9 @@ static __always_inline void __run_on_irqstack(void (*func)(void))
{
	void *tos = __this_cpu_read(hardirq_stack_ptr);

	__this_cpu_add(irq_count, 1);
	__this_cpu_write(hardirq_stack_inuse, true);
	asm_call_on_stack(tos - 8, func, NULL);
	__this_cpu_sub(irq_count, 1);
	__this_cpu_write(hardirq_stack_inuse, false);
}

static __always_inline void
@@ -33,9 +33,9 @@ __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs),
{
	void *tos = __this_cpu_read(hardirq_stack_ptr);

	__this_cpu_add(irq_count, 1);
	__this_cpu_write(hardirq_stack_inuse, true);
	asm_call_sysvec_on_stack(tos - 8, func, regs);
	__this_cpu_sub(irq_count, 1);
	__this_cpu_write(hardirq_stack_inuse, false);
}

static __always_inline void
@@ -44,9 +44,9 @@ __run_irq_on_irqstack(void (*func)(struct irq_desc *desc),
{
	void *tos = __this_cpu_read(hardirq_stack_ptr);

	__this_cpu_add(irq_count, 1);
	__this_cpu_write(hardirq_stack_inuse, true);
	asm_call_irq_on_stack(tos - 8, func, desc);
	__this_cpu_sub(irq_count, 1);
	__this_cpu_write(hardirq_stack_inuse, false);
}

#else /* CONFIG_X86_64 */
+1 −1
Original line number Diff line number Diff line
@@ -454,7 +454,7 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu)
	return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu);
}

DECLARE_PER_CPU(unsigned int, irq_count);
DECLARE_PER_CPU(bool, hardirq_stack_inuse);
extern asmlinkage void ignore_sysret(void);

/* Save actual FS/GS selectors and bases to current->thread */
+1 −1
Original line number Diff line number Diff line
@@ -1740,7 +1740,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
EXPORT_PER_CPU_SYMBOL(current_task);

DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
DEFINE_PER_CPU(bool, hardirq_stack_inuse);

DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
+1 −1
Original line number Diff line number Diff line
@@ -539,7 +539,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
	int cpu = smp_processor_id();

	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
		     this_cpu_read(irq_count) != -1);
		     this_cpu_read(hardirq_stack_inuse));

	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
		switch_fpu_prepare(prev_fpu, cpu);