Commit 8e560921 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Michael Ellerman
Browse files

powerpc/book3s64/pkeys: Store/restore userspace AMR/IAMR correctly on entry and exit from kernel



This prepare kernel to operate with a different value than userspace AMR/IAMR.
For this, AMR/IAMR need to be saved and restored on entry and return from the
kernel.

With KUAP we modify kernel AMR when accessing user address from the kernel
via copy_to/from_user interfaces. We don't need to modify IAMR value in
similar fashion.

If MMU_FTR_PKEY is enabled we need to save AMR/IAMR in pt_regs on entering
kernel from userspace. If not we can assume that AMR/IAMR is not modified
from userspace.

We need to save AMR if we have MMU_FTR_BOOK3S_KUAP feature enabled and we are
interrupted within kernel. This is required so that if we get interrupted
within copy_to/from_user we continue with the right AMR value.

If we hae MMU_FTR_BOOK3S_KUEP enabled we need to restore IAMR on
return to userspace beause kernel will be running with a different
IAMR value.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Reviewed-by: default avatarSandipan Das <sandipan@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20201127044424.40686-11-aneesh.kumar@linux.ibm.com
parent d7df77e8
Loading
Loading
Loading
Loading
+183 −39
Original line number Diff line number Diff line
@@ -13,17 +13,46 @@

#ifdef __ASSEMBLY__

.macro kuap_restore_amr	gpr1, gpr2
#ifdef CONFIG_PPC_KUAP
.macro kuap_user_restore gpr1
#if defined(CONFIG_PPC_PKEY)
	BEGIN_MMU_FTR_SECTION_NESTED(67)
	mfspr	\gpr1, SPRN_AMR
	/*
	 * AMR and IAMR are going to be different when
	 * returning to userspace.
	 */
	ld	\gpr1, STACK_REGS_AMR(r1)
	isync
	mtspr	SPRN_AMR, \gpr1
	/*
	 * Restore IAMR only when returning to userspace
	 */
	ld	\gpr1, STACK_REGS_IAMR(r1)
	mtspr	SPRN_IAMR, \gpr1

	/* No isync required, see kuap_user_restore() */
	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_PKEY, 67)
#endif
.endm

.macro kuap_kernel_restore	gpr1, gpr2
#if defined(CONFIG_PPC_PKEY)

	BEGIN_MMU_FTR_SECTION_NESTED(67)
	/*
	 * AMR is going to be mostly the same since we are
	 * returning to the kernel. Compare and do a mtspr.
	 */
	ld	\gpr2, STACK_REGS_AMR(r1)
	mfspr	\gpr1, SPRN_AMR
	cmpd	\gpr1, \gpr2
	beq	998f
	beq	100f
	isync
	mtspr	SPRN_AMR, \gpr2
	/* No isync required, see kuap_restore_amr() */
998:
	/*
	 * No isync required, see kuap_restore_amr()
	 * No need to restore IAMR when returning to kernel space.
	 */
100:
	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
#endif
.endm
@@ -42,23 +71,98 @@
.endm
#endif

/*
 *	if (pkey) {
 *
 *		save AMR -> stack;
 *		if (kuap) {
 *			if (AMR != BLOCKED)
 *				KUAP_BLOCKED -> AMR;
 *		}
 *		if (from_user) {
 *			save IAMR -> stack;
 *			if (kuep) {
 *				KUEP_BLOCKED ->IAMR
 *			}
 *		}
 *		return;
 *	}
 *
 *	if (kuap) {
 *		if (from_kernel) {
 *			save AMR -> stack;
 *			if (AMR != BLOCKED)
 *				KUAP_BLOCKED -> AMR;
 *		}
 *
 *	}
 */
.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
#ifdef CONFIG_PPC_KUAP
#if defined(CONFIG_PPC_PKEY)

	/*
	 * if both pkey and kuap is disabled, nothing to do
	 */
	BEGIN_MMU_FTR_SECTION_NESTED(68)
	b	100f  // skip_save_amr
	END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)

	/*
	 * if pkey is disabled and we are entering from userspace
	 * don't do anything.
	 */
	BEGIN_MMU_FTR_SECTION_NESTED(67)
	.ifnb \msr_pr_cr
	bne	\msr_pr_cr, 99f
	/*
	 * Without pkey we are not changing AMR outside the kernel
	 * hence skip this completely.
	 */
	bne	\msr_pr_cr, 100f  // from userspace
	.endif
        END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)

	/*
	 * pkey is enabled or pkey is disabled but entering from kernel
	 */
	mfspr	\gpr1, SPRN_AMR
	std	\gpr1, STACK_REGS_AMR(r1)
	li	\gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
	sldi	\gpr2, \gpr2, AMR_KUAP_SHIFT

	/*
	 * update kernel AMR with AMR_KUAP_BLOCKED only
	 * if KUAP feature is enabled
	 */
	BEGIN_MMU_FTR_SECTION_NESTED(69)
	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
	cmpd	\use_cr, \gpr1, \gpr2
	beq	\use_cr, 99f
	// We don't isync here because we very recently entered via rfid
	beq	\use_cr, 102f
	/*
	 * We don't isync here because we very recently entered via an interrupt
	 */
	mtspr	SPRN_AMR, \gpr2
	isync
99:
	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
102:
	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)

	/*
	 * if entering from kernel we don't need save IAMR
	 */
	.ifnb \msr_pr_cr
	beq	\msr_pr_cr, 100f // from kernel space
	mfspr	\gpr1, SPRN_IAMR
	std	\gpr1, STACK_REGS_IAMR(r1)

	/*
	 * update kernel IAMR with AMR_KUEP_BLOCKED only
	 * if KUEP feature is enabled
	 */
	BEGIN_MMU_FTR_SECTION_NESTED(70)
	LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
	mtspr	SPRN_IAMR, \gpr2
	isync
	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
	.endif

100: // skip_save_amr
#endif
.endm

@@ -68,23 +172,43 @@

DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);

#ifdef CONFIG_PPC_KUAP
#ifdef CONFIG_PPC_PKEY

#include <asm/mmu.h>
#include <asm/ptrace.h>

static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
static inline void kuap_user_restore(struct pt_regs *regs)
{
	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) && unlikely(regs->kuap != amr)) {
	if (!mmu_has_feature(MMU_FTR_PKEY))
		return;

	isync();
	mtspr(SPRN_AMR, regs->amr);
	mtspr(SPRN_IAMR, regs->iamr);
	/*
	 * No isync required here because we are about to rfi
	 * back to previous context before any user accesses
	 * would be made, which is a CSI.
	 */
}
static inline void kuap_kernel_restore(struct pt_regs *regs,
					   unsigned long amr)
{
	if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
		if (unlikely(regs->amr != amr)) {
			isync();
		mtspr(SPRN_AMR, regs->kuap);
			mtspr(SPRN_AMR, regs->amr);
			/*
		 * No isync required here because we are about to RFI back to
		 * previous context before any user accesses would be made,
		 * which is a CSI.
			 * No isync required here because we are about to rfi
			 * back to previous context before any user accesses
			 * would be made, which is a CSI.
			 */
		}
	}
	/*
	 * No need to restore IAMR when returning to kernel space.
	 */
}

static inline unsigned long kuap_get_and_check_amr(void)
{
@@ -97,6 +221,26 @@ static inline unsigned long kuap_get_and_check_amr(void)
	return 0;
}

#else /* CONFIG_PPC_PKEY */

static inline void kuap_user_restore(struct pt_regs *regs)
{
}

static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{
}

static inline unsigned long kuap_get_and_check_amr(void)
{
	return 0;
}

#endif /* CONFIG_PPC_PKEY */


#ifdef CONFIG_PPC_KUAP

static inline void kuap_check_amr(void)
{
	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
@@ -145,21 +289,6 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
		    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
		    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }

static inline unsigned long kuap_get_and_check_amr(void)
{
	return 0UL;
}

static inline unsigned long get_kuap(void)
{
	return AMR_KUAP_BLOCKED;
}

static inline void set_kuap(unsigned long value) { }
#endif /* !CONFIG_PPC_KUAP */

static __always_inline void allow_user_access(void __user *to, const void __user *from,
					      unsigned long size, unsigned long dir)
@@ -176,6 +305,21 @@ static __always_inline void allow_user_access(void __user *to, const void __user
		BUILD_BUG();
}

#else /* CONFIG_PPC_KUAP */

static inline unsigned long get_kuap(void)
{
	return AMR_KUAP_BLOCKED;
}

static inline void set_kuap(unsigned long value) { }

static __always_inline void allow_user_access(void __user *to, const void __user *from,
					      unsigned long size, unsigned long dir)
{ }

#endif /* !CONFIG_PPC_KUAP */

static inline void prevent_user_access(void __user *to, const void __user *from,
				       unsigned long size, unsigned long dir)
{
+4 −1
Original line number Diff line number Diff line
@@ -61,8 +61,11 @@ struct pt_regs
				unsigned long amr;
#endif
			};
#ifdef CONFIG_PPC_PKEY
			unsigned long iamr;
#endif
		};
		unsigned long __pad[2];	/* Maintain 16 byte interrupt stack alignment */
		unsigned long __pad[4];	/* Maintain 16 byte interrupt stack alignment */
	};
};
#endif
+2 −0
Original line number Diff line number Diff line
@@ -358,11 +358,13 @@ int main(void)

#ifdef CONFIG_PPC_PKEY
	STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
	STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
#ifdef CONFIG_PPC_KUAP
	STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
#endif


#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
+4 −2
Original line number Diff line number Diff line
@@ -653,8 +653,8 @@ _ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
	kuap_check_amr r3, r4
	ld	r5,_MSR(r1)
	andi.	r0,r5,MSR_PR
	bne	.Lfast_user_interrupt_return
	kuap_restore_amr r3, r4
	bne	.Lfast_user_interrupt_return_amr
	kuap_kernel_restore r3, r4
	andi.	r0,r5,MSR_RI
	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
	bne+	.Lfast_kernel_interrupt_return
@@ -674,6 +674,8 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
	cmpdi	r3,0
	bne-	.Lrestore_nvgprs

.Lfast_user_interrupt_return_amr:
	kuap_user_restore r3
.Lfast_user_interrupt_return:
	ld	r11,_NIP(r1)
	ld	r12,_MSR(r1)
+2 −2
Original line number Diff line number Diff line
@@ -1059,7 +1059,7 @@ EXC_COMMON_BEGIN(system_reset_common)
	ld	r10,SOFTE(r1)
	stb	r10,PACAIRQSOFTMASK(r13)

	kuap_restore_amr r9, r10
	kuap_kernel_restore r9, r10
	EXCEPTION_RESTORE_REGS
	RFI_TO_USER_OR_KERNEL

@@ -2875,7 +2875,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
	ld	r10,SOFTE(r1)
	stb	r10,PACAIRQSOFTMASK(r13)

	kuap_restore_amr r9, r10
	kuap_kernel_restore r9, r10
	EXCEPTION_RESTORE_REGS hsrr=0
	RFI_TO_KERNEL

Loading