Commit 325678fd authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc/64s: add a table of implicit soft-masked addresses



Commit 9d1988ca ("powerpc/64: treat low kernel text as irqs
soft-masked") ends up catching too much code, including ret_from_fork,
and parts of interrupt and syscall return that do not expect to be
interrupts to be soft-masked. If an interrupt gets marked pending,
and then the code proceeds out of the implicit soft-masked region it
will fail to deal with the pending interrupt.

Fix this by adding a new table of addresses which explicitly marks
the regions of code that are soft masked. This table is only checked
for interrupts that below __end_soft_masked, so most kernel interrupts
will not have the overhead of the table search.

Fixes: 9d1988ca ("powerpc/64: treat low kernel text as irqs soft-masked")
Reported-by: default avatarSachin Sant <sachinp@linux.vnet.ibm.com>
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Tested-by: default avatarSachin Sant <sachinp@linux.vnet.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210630074621.2109197-5-npiggin@gmail.com
parent 9b69d48c
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -75,6 +75,7 @@

#ifdef CONFIG_PPC_BOOK3S_64
extern char __end_soft_masked[];
bool search_kernel_soft_mask_table(unsigned long addr);
unsigned long search_kernel_restart_table(unsigned long addr);

DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
@@ -87,7 +88,7 @@ static inline bool is_implicit_soft_masked(struct pt_regs *regs)
	if (regs->nip >= (unsigned long)__end_soft_masked)
		return false;

	return true;
	return search_kernel_soft_mask_table(regs->nip);
}

static inline void srr_regs_clobbered(void)
+7 −0
Original line number Diff line number Diff line
@@ -762,6 +762,13 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
	stringify_in_c(.long (_target) - . ;)	\
	stringify_in_c(.previous)

#define SOFT_MASK_TABLE(_start, _end)		\
	stringify_in_c(.section __soft_mask_table,"a";)\
	stringify_in_c(.balign 8;)		\
	stringify_in_c(.llong (_start);)	\
	stringify_in_c(.llong (_end);)		\
	stringify_in_c(.previous)

#define RESTART_TABLE(_start, _end, _target)	\
	stringify_in_c(.section __restart_table,"a";)\
	stringify_in_c(.balign 8;)		\
+54 −10
Original line number Diff line number Diff line
@@ -428,21 +428,31 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)

		/* If coming from user, skip soft-mask tests. */
		andi.	r10,r12,MSR_PR
		bne	2f
		bne	3f

		/*
		 * Kernel code running below __end_soft_masked is implicitly
		 * soft-masked
		 * Kernel code running below __end_soft_masked may be
		 * implicitly soft-masked if it is within the regions
		 * in the soft mask table.
		 */
		LOAD_HANDLER(r10, __end_soft_masked)
		cmpld	r11,r10
		bge+	1f

		/* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
		mtctr	r12
		stw	r9,PACA_EXGEN+EX_CCR(r13)
		SEARCH_SOFT_MASK_TABLE
		cmpdi	r12,0
		mfctr	r12		/* Restore r12 to SRR1 */
		lwz	r9,PACA_EXGEN+EX_CCR(r13)
		beq	1f		/* Not in soft-mask table */
		li	r10,IMASK
		blt-	1f
		b	2f		/* In soft-mask table, always mask */

		/* Test the soft mask state against our interrupt's bit */
		lbz	r10,PACAIRQSOFTMASK(r13)
1:		andi.	r10,r10,IMASK
1:		lbz	r10,PACAIRQSOFTMASK(r13)
2:		andi.	r10,r10,IMASK
		/* Associate vector numbers with bits in paca->irq_happened */
		.if IVEC == 0x500 || IVEC == 0xea0
		li	r10,PACA_IRQ_EE
@@ -473,7 +483,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)

	.if ISTACK
	andi.	r10,r12,MSR_PR		/* See if coming from user	*/
2:	mr	r10,r1			/* Save r1			*/
3:	mr	r10,r1			/* Save r1			*/
	subi	r1,r1,INT_FRAME_SIZE	/* alloc frame on kernel stack	*/
	beq-	100f
	ld	r1,PACAKSAVE(r13)	/* kernel stack to use		*/
@@ -624,6 +634,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
303:
.endm

.macro SEARCH_SOFT_MASK_TABLE
#ifdef CONFIG_RELOCATABLE
	mr	r12,r2
	ld	r2,PACATOC(r13)
	LOAD_REG_ADDR(r9, __start___soft_mask_table)
	LOAD_REG_ADDR(r10, __stop___soft_mask_table)
	mr	r2,r12
#else
	LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
	LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
#endif
300:
	cmpd	r9,r10
	beq	302f
	ld	r12,0(r9)
	cmpld	r11,r12
	blt	301f
	ld	r12,8(r9)
	cmpld	r11,r12
	bge	301f
	li	r12,1
	b	303f
301:
	addi	r9,r9,16
	b	300b
302:
	li	r12,0
303:
.endm

/*
 * Restore all registers including H/SRR0/1 saved in a stack frame of a
 * standard exception.
@@ -754,8 +794,8 @@ __start_interrupts:
 * scv instructions enter the kernel without changing EE, RI, ME, or HV.
 * In particular, this means we can take a maskable interrupt at any point
 * in the scv handler, which is unlike any other interrupt. This is solved
 * by treating the instruction addresses below __end_soft_masked as being
 * soft-masked.
 * by treating the instruction addresses in the handler as being soft-masked,
 * by adding a SOFT_MASK_TABLE entry for them.
 *
 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
 * ensure scv is never executed with relocation off, which means AIL-0
@@ -772,6 +812,7 @@ __start_interrupts:
 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
 */
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
1:
	/* SCV 0 */
	mr	r9,r13
	GET_PACA(r13)
@@ -801,8 +842,11 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
	b	system_call_vectored_sigill
#endif
	.endr
2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)

SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.

#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
	__LOAD_HANDLER(r10, system_call_vectored_common)
+8 −0
Original line number Diff line number Diff line
@@ -207,7 +207,9 @@ syscall_vectored_\name\()_restart:
	bl	syscall_exit_restart
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Lsyscall_vectored_\name\()_rst_start
1:

SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)

.endm
@@ -410,7 +412,9 @@ syscall_restart:
	bl	syscall_exit_restart
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Lsyscall_rst_start
1:

SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif

@@ -607,7 +611,9 @@ interrupt_return_\srr\()_user_restart:
	bl	interrupt_exit_user_restart
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Linterrupt_return_\srr\()_user_rst_start
1:

SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
#endif

@@ -738,7 +744,9 @@ interrupt_return_\srr\()_kernel_restart:
	bl	interrupt_exit_kernel_restart
	std	r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
	b	.Linterrupt_return_\srr\()_kernel_rst_start
1:

SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
#endif

+9 −0
Original line number Diff line number Diff line
@@ -9,6 +9,14 @@
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN	0

#define SOFT_MASK_TABLE(align)						\
	. = ALIGN(align);						\
	__soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) {	\
		__start___soft_mask_table = .;				\
		KEEP(*(__soft_mask_table))				\
		__stop___soft_mask_table = .;				\
	}

#define RESTART_TABLE(align)						\
	. = ALIGN(align);						\
	__restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) {	\
@@ -132,6 +140,7 @@ SECTIONS
	RO_DATA(PAGE_SIZE)

#ifdef CONFIG_PPC64
	SOFT_MASK_TABLE(8)
	RESTART_TABLE(8)

	. = ALIGN(8);
Loading