Commit 1b03e71f authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/32s: Handle PROTFAULT in hash_page() also for CONFIG_PPC_KUAP



On hash 32 bits, handling minor protection faults like unsetting
dirty flag is heavy if done from the normal page_fault processing,
because it implies hash table software lookup for flushing the entry
and then a DSI is taken anyway to add the entry back.

When KUAP was implemented, as explained in commit a68c31fc
("powerpc/32s: Implement Kernel Userspace Access Protection"),
protection faults has been diverted from hash_page() because
hash_page() was not able to identify a KUAP fault.

Implement KUAP verification in hash_page(), by clearing write
permission when the access is a kernel access and Ks is 1.
This works regardless of the address because kernel segments always
have Ks set to 0 while user segments have Ks set to 0 only
when kernel write to userspace is granted.

Then protection faults can be handled by hash_page() even for KUAP.

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/8a4ffe4798e9ea32aaaccdf85e411bb1beed3500.1605542955.git.christophe.leroy@csgroup.eu
parent 44e9754d
Loading
Loading
Loading
Loading
+0 −8
Original line number Diff line number Diff line
@@ -292,11 +292,7 @@ BEGIN_MMU_FTR_SECTION
	stw	r11, THR11(r10)
	mfspr	r10, SPRN_DSISR
	mfcr	r11
#ifdef CONFIG_PPC_KUAP
	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
#else
	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
#endif
	mfspr	r10, SPRN_SPRG_THREAD
	beq	hash_page_dsi
.Lhash_page_dsi_cont:
@@ -313,11 +309,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
	EXCEPTION_PROLOG handle_dar_dsisr=1
	get_and_save_dar_dsisr_on_stack	r4, r5, r11
BEGIN_MMU_FTR_SECTION
#ifdef CONFIG_PPC_KUAP
	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
#else
	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
#endif
	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
	bl	hash_page
+11 −2
Original line number Diff line number Diff line
@@ -89,8 +89,6 @@ _GLOBAL(hash_page)
#else
	rlwimi	r8,r4,23,20,28		/* compute pte address */
#endif
	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE

	/*
	 * Update the linux PTE atomically.  We do the lwarx up-front
@@ -106,7 +104,18 @@ _GLOBAL(hash_page)
#endif
.Lretry:
	lwarx	r6,0,r8			/* get linux-style pte, flag word */
#ifdef CONFIG_PPC_KUAP
	mfsrin	r5,r4
	rlwinm	r0,r9,28,_PAGE_RW	/* MSR[PR] => _PAGE_RW */
	rlwinm	r5,r5,12,_PAGE_RW	/* Ks => _PAGE_RW */
	andc	r5,r5,r0		/* Ks & ~MSR[PR] */
	andc	r5,r6,r5		/* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */
	andc.	r5,r3,r5		/* check access & ~permission */
#else
	andc.	r5,r3,r6		/* check access & ~permission */
#endif
	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
#ifdef CONFIG_SMP
	bne-	.Lhash_page_out		/* return if access not permitted */
#else