Commit 7cf283c7 authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas
Browse files

arm64: uaccess: remove redundant PAN toggling



Some code (e.g. futex) needs to make privileged accesses to userspace
memory, and uses uaccess_{enable,disable}_privileged() in order to
permit this. All other uaccess primitives use LDTR/STTR, and never need
to toggle PAN.

Remove the redundant PAN toggling.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-12-mark.rutland@arm.com


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent b5a5a01d
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -17,7 +17,6 @@
#define ARM64_WORKAROUND_834220			7
#define ARM64_HAS_NO_HW_PREFETCH		8
#define ARM64_HAS_UAO				9
#define ARM64_ALT_PAN_NOT_UAO			10
#define ARM64_HAS_VIRT_HOST_EXTN		11
#define ARM64_WORKAROUND_CAVIUM_27456		12
#define ARM64_HAS_32BIT_EL0			13
+19 −40
Original line number Diff line number Diff line
@@ -159,41 +159,20 @@ static inline void __uaccess_enable_hw_pan(void)
			CONFIG_ARM64_PAN));
}

#define __uaccess_disable(alt)						\
do {									\
	if (!uaccess_ttbr0_disable())					\
		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,		\
				CONFIG_ARM64_PAN));			\
} while (0)

#define __uaccess_enable(alt)						\
do {									\
	if (!uaccess_ttbr0_enable())					\
		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,		\
				CONFIG_ARM64_PAN));			\
} while (0)

static inline void uaccess_disable_privileged(void)
{
	__uaccess_disable(ARM64_HAS_PAN);
}
	if (uaccess_ttbr0_disable())
		return;

static inline void uaccess_enable_privileged(void)
{
	__uaccess_enable(ARM64_HAS_PAN);
	__uaccess_enable_hw_pan();
}

/*
 * These functions are no-ops when UAO is present.
 */
static inline void uaccess_disable_not_uao(void)
static inline void uaccess_enable_privileged(void)
{
	__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
}
	if (uaccess_ttbr0_enable())
		return;

static inline void uaccess_enable_not_uao(void)
{
	__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
	__uaccess_disable_hw_pan();
}

/*
@@ -265,9 +244,9 @@ do { \
#define __raw_get_user(x, ptr, err)					\
do {									\
	__chk_user_ptr(ptr);						\
	uaccess_enable_not_uao();					\
	uaccess_ttbr0_enable();						\
	__raw_get_mem("ldtr", x, ptr, err);				\
	uaccess_disable_not_uao();					\
	uaccess_ttbr0_disable();					\
} while (0)

#define __get_user_error(x, ptr, err)					\
@@ -338,9 +317,9 @@ do { \
#define __raw_put_user(x, ptr, err)					\
do {									\
	__chk_user_ptr(ptr);						\
	uaccess_enable_not_uao();					\
	uaccess_ttbr0_enable();						\
	__raw_put_mem("sttr", x, ptr, err);				\
	uaccess_disable_not_uao();					\
	uaccess_ttbr0_disable();					\
} while (0)

#define __put_user_error(x, ptr, err)					\
@@ -378,10 +357,10 @@ extern unsigned long __must_check __arch_copy_from_user(void *to, const void __u
#define raw_copy_from_user(to, from, n)					\
({									\
	unsigned long __acfu_ret;					\
	uaccess_enable_not_uao();					\
	uaccess_ttbr0_enable();						\
	__acfu_ret = __arch_copy_from_user((to),			\
				      __uaccess_mask_ptr(from), (n));	\
	uaccess_disable_not_uao();					\
	uaccess_ttbr0_disable();					\
	__acfu_ret;							\
})

@@ -389,10 +368,10 @@ extern unsigned long __must_check __arch_copy_to_user(void __user *to, const voi
#define raw_copy_to_user(to, from, n)					\
({									\
	unsigned long __actu_ret;					\
	uaccess_enable_not_uao();					\
	uaccess_ttbr0_enable();						\
	__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),	\
				    (from), (n));			\
	uaccess_disable_not_uao();					\
	uaccess_ttbr0_disable();					\
	__actu_ret;							\
})

@@ -400,10 +379,10 @@ extern unsigned long __must_check __arch_copy_in_user(void __user *to, const voi
#define raw_copy_in_user(to, from, n)					\
({									\
	unsigned long __aciu_ret;					\
	uaccess_enable_not_uao();					\
	uaccess_ttbr0_enable();						\
	__aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to),	\
				    __uaccess_mask_ptr(from), (n));	\
	uaccess_disable_not_uao();					\
	uaccess_ttbr0_disable();					\
	__aciu_ret;							\
})

@@ -414,9 +393,9 @@ extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned lo
static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
{
	if (access_ok(to, n)) {
		uaccess_enable_not_uao();
		uaccess_ttbr0_enable();
		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
		uaccess_disable_not_uao();
		uaccess_ttbr0_disable();
	}
	return n;
}
+0 −17
Original line number Diff line number Diff line
@@ -153,10 +153,6 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
		.width = 0,				\
	}

/* meta feature for alternatives */
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);

static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);

static bool __system_matches_cap(unsigned int n);
@@ -1779,13 +1775,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
		.min_field_value = 1,
	},
#endif /* CONFIG_ARM64_UAO */
#ifdef CONFIG_ARM64_PAN
	{
		.capability = ARM64_ALT_PAN_NOT_UAO,
		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
		.matches = cpufeature_pan_not_uao,
	},
#endif /* CONFIG_ARM64_PAN */
#ifdef CONFIG_ARM64_VHE
	{
		.desc = "Virtualization Host Extensions",
@@ -2736,12 +2725,6 @@ void __init setup_cpu_features(void)
			ARCH_DMA_MINALIGN);
}

static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
{
	return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
}

static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
	cpu_replace_ttbr1(lm_alias(swapper_pg_dir));