Commit b9bd9f60 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

x86: uaccess: move 32-bit and 64-bit parts into proper <asm/uaccess_N.h> header



The x86 <asm/uaccess.h> file has grown features that are specific to
x86-64 like LAM support and the related access_ok() changes.  They
really should be in the <asm/uaccess_64.h> file and not pollute the
generic x86 header.

Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6ccdc91d
Loading
Loading
Loading
Loading
+3 −84
Original line number Diff line number Diff line
@@ -16,83 +16,10 @@
#include <asm/extable.h>
#include <asm/tlbflush.h>

#ifdef CONFIG_ADDRESS_MASKING
/*
 * Mask out tag bits from the address.
 *
 * Magic with the 'sign' allows to untag userspace pointer without any branches
 * while leaving kernel addresses intact.
 */
static inline unsigned long __untagged_addr(unsigned long addr)
{
	long sign;

	/*
	 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
	 * in alternative instructions. The relocation gets wrong when gets
	 * copied to the target place.
	 */
	asm (ALTERNATIVE("",
			 "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
			 "or %%gs:tlbstate_untag_mask, %[sign]\n\t"
			 "and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
	     : [addr] "+r" (addr), [sign] "=r" (sign)
	     : "m" (tlbstate_untag_mask), "[sign]" (addr));

	return addr;
}

#define untagged_addr(addr)	({					\
	unsigned long __addr = (__force unsigned long)(addr);		\
	(__force __typeof__(addr))__untagged_addr(__addr);		\
})

static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
						   unsigned long addr)
{
	long sign = addr >> 63;

	mmap_assert_locked(mm);
	addr &= (mm)->context.untag_mask | sign;

	return addr;
}

#define untagged_addr_remote(mm, addr)	({				\
	unsigned long __addr = (__force unsigned long)(addr);		\
	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);	\
})

#ifdef CONFIG_X86_32
# include <asm/uaccess_32.h>
#else
#define untagged_addr(addr)	(addr)
#endif

#ifdef CONFIG_X86_64
/*
 * On x86-64, we may have tag bits in the user pointer. Rather than
 * mask them off, just change the rules for __access_ok().
 *
 * Make the rule be that 'ptr+size' must not overflow, and must not
 * have the high bit set. Compilers generally understand about
 * unsigned overflow and the CF bit and generate reasonable code for
 * this. Although it looks like the combination confuses at least
 * clang (and instead of just doing an "add" followed by a test of
 * SF and CF, you'll see that unnecessary comparison).
 *
 * For the common case of small sizes that can be checked at compile
 * time, don't even bother with the addition, and just check that the
 * base pointer is ok.
 */
static inline bool __access_ok(const void __user *ptr, unsigned long size)
{
	if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
		return (long)ptr >= 0;
	} else {
		unsigned long sum = size + (unsigned long)ptr;
		return (long) sum >= 0 && sum >= (unsigned long)ptr;
	}
}
#define __access_ok __access_ok
# include <asm/uaccess_64.h>
#endif

#include <asm-generic/access_ok.h>
@@ -583,14 +510,6 @@ extern struct movsl_mask {

#define ARCH_HAS_NOCACHE_UACCESS 1

#ifdef CONFIG_X86_32
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
# include <asm/uaccess_32.h>
#else
# include <asm/uaccess_64.h>
#endif

/*
 * The "unsafe" user accesses aren't really "unsafe", but the naming
 * is a big fat warning: you have to not only do the access_ok()
+3 −0
Original line number Diff line number Diff line
@@ -33,4 +33,7 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
       return __copy_from_user_ll_nocache_nozero(to, from, n);
}

unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);

#endif /* _ASM_X86_UACCESS_32_H */
+76 −1
Original line number Diff line number Diff line
@@ -12,6 +12,81 @@
#include <asm/cpufeatures.h>
#include <asm/page.h>

#ifdef CONFIG_ADDRESS_MASKING
/*
 * Mask out tag bits from the address.
 *
 * Magic with the 'sign' allows to untag userspace pointer without any branches
 * while leaving kernel addresses intact.
 */
static inline unsigned long __untagged_addr(unsigned long addr)
{
	long sign;

	/*
	 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
	 * in alternative instructions. The relocation gets wrong when gets
	 * copied to the target place.
	 */
	asm (ALTERNATIVE("",
			 "sar $63, %[sign]\n\t" /* user_ptr ? 0 : -1UL */
			 "or %%gs:tlbstate_untag_mask, %[sign]\n\t"
			 "and %[sign], %[addr]\n\t", X86_FEATURE_LAM)
	     : [addr] "+r" (addr), [sign] "=r" (sign)
	     : "m" (tlbstate_untag_mask), "[sign]" (addr));

	return addr;
}

#define untagged_addr(addr)	({					\
	unsigned long __addr = (__force unsigned long)(addr);		\
	(__force __typeof__(addr))__untagged_addr(__addr);		\
})

static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
						   unsigned long addr)
{
	long sign = addr >> 63;

	mmap_assert_locked(mm);
	addr &= (mm)->context.untag_mask | sign;

	return addr;
}

#define untagged_addr_remote(mm, addr)	({				\
	unsigned long __addr = (__force unsigned long)(addr);		\
	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);	\
})

#endif

/*
 * On x86-64, we may have tag bits in the user pointer. Rather than
 * mask them off, just change the rules for __access_ok().
 *
 * Make the rule be that 'ptr+size' must not overflow, and must not
 * have the high bit set. Compilers generally understand about
 * unsigned overflow and the CF bit and generate reasonable code for
 * this. Although it looks like the combination confuses at least
 * clang (and instead of just doing an "add" followed by a test of
 * SF and CF, you'll see that unnecessary comparison).
 *
 * For the common case of small sizes that can be checked at compile
 * time, don't even bother with the addition, and just check that the
 * base pointer is ok.
 */
static inline bool __access_ok(const void __user *ptr, unsigned long size)
{
	if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
		return (long)ptr >= 0;
	} else {
		unsigned long sum = size + (unsigned long)ptr;
		return (long) sum >= 0 && sum >= (unsigned long)ptr;
	}
}
#define __access_ok __access_ok

/*
 * Copy To/From Userspace
 */
@@ -106,7 +181,7 @@ static __always_inline __must_check unsigned long __clear_user(void __user *addr

static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
{
	if (access_ok(to, n))
	if (__access_ok(to, n))
		return __clear_user(to, n);
	return n;
}