Commit 59dc5bfc authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc/64s: avoid reloading (H)SRR registers if they are still valid



When an interrupt is taken, the SRR registers are set to return to where
it left off. Unless they are modified in the meantime, or the return
address or MSR are modified, there is no need to reload these registers
when returning from interrupt.

Introduce per-CPU flags that track the validity of SRR and HSRR
registers. These are cleared when returning from interrupt, when
using the registers for something else (e.g., OPAL calls), when
adjusting the return address or MSR of a context, and when context
switching (which changes the return address and MSR).

This improves the performance of interrupt returns.

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
[mpe: Fold in fixup patch from Nick]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210617155116.2167984-5-npiggin@gmail.com
parent 1df7d5e4
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -85,6 +85,10 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
	bool "Include extra checks for powerpc irq soft masking"

config PPC_RFI_SRR_DEBUG
	bool "Include extra checks for RFI SRR register validity"
	depends on PPC_BOOK3S_64

config XMON
	bool "Include xmon kernel debugger"
	depends on DEBUG_KERNEL
+9 −1
Original line number Diff line number Diff line
@@ -389,7 +389,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
	return !(regs->msr & MSR_EE);
}

static inline void may_hard_irq_enable(void) { }
static inline bool may_hard_irq_enable(void)
{
	return false;
}

static inline void do_hard_irq_enable(void)
{
	BUILD_BUG();
}

static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
+13 −1
Original line number Diff line number Diff line
@@ -73,13 +73,25 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>

#ifdef CONFIG_PPC_BOOK3S_64
static inline void srr_regs_clobbered(void)
{
	local_paca->srr_valid = 0;
	local_paca->hsrr_valid = 0;
}
#else
static inline void srr_regs_clobbered(void)
{
}
#endif

static inline void nap_adjust_return(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_970_NAP
	if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
		/* Can avoid a test-and-clear because NMIs do not call this */
		clear_thread_local_flags(_TLF_NAPPING);
		regs->nip = (unsigned long)power4_idle_nap_return;
		regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
	}
#endif
}
+1 −1
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
	struct pt_regs *regs = ftrace_get_regs(fregs);

	regs->nip = ip;
	regs_set_return_ip(regs, ip);
}

#define klp_get_ftrace_location klp_get_ftrace_location
+4 −0
Original line number Diff line number Diff line
@@ -167,6 +167,10 @@ struct paca_struct {
	u64 saved_msr;			/* MSR saved here by enter_rtas */
#ifdef CONFIG_PPC_BOOK3E
	u16 trap_save;			/* Used when bad stack is encountered */
#endif
#ifdef CONFIG_PPC_BOOK3S_64
	u8 hsrr_valid;			/* HSRRs set for HRFID */
	u8 srr_valid;			/* SRRs set for RFID */
#endif
	u8 irq_soft_mask;		/* mask for irq soft masking */
	u8 irq_happened;		/* irq happened while soft-disabled */
Loading