Commit 22b8cc3e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull x86 LAM (Linear Address Masking) support from Dave Hansen:
 "Add support for the new Linear Address Masking CPU feature.

  This is similar to ARM's Top Byte Ignore and allows userspace to store
  metadata in some bits of pointers without masking it out before use"

* tag 'x86_mm_for_6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/iommu/sva: Do not allow to set FORCE_TAGGED_SVA bit from outside
  x86/mm/iommu/sva: Fix error code for LAM enabling failure due to SVA
  selftests/x86/lam: Add test cases for LAM vs thread creation
  selftests/x86/lam: Add ARCH_FORCE_TAGGED_SVA test cases for linear-address masking
  selftests/x86/lam: Add inherit test cases for linear-address masking
  selftests/x86/lam: Add io_uring test cases for linear-address masking
  selftests/x86/lam: Add mmap and SYSCALL test cases for linear-address masking
  selftests/x86/lam: Add malloc and tag-bits test cases for linear-address masking
  x86/mm/iommu/sva: Make LAM and SVA mutually exclusive
  iommu/sva: Replace pasid_valid() helper with mm_valid_pasid()
  mm: Expose untagging mask in /proc/$PID/status
  x86/mm: Provide arch_prctl() interface for LAM
  x86/mm: Reduce untagged_addr() overhead for systems without LAM
  x86/uaccess: Provide untagged_addr() and remove tags before address check
  mm: Introduce untagged_addr_remote()
  x86/mm: Handle LAM on context switch
  x86: CPUID and CR3/CR4 flags for Linear Address Masking
  x86: Allow atomic MM_CONTEXT flags setting
  x86/mm: Rework address range check in get_user() and put_user()
parents 7b664cc3 97740266
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -288,6 +288,12 @@ void post_ttbr_update_workaround(void);
unsigned long arm64_mm_context_get(struct mm_struct *mm);
void arm64_mm_context_put(struct mm_struct *mm);

#define mm_untag_mask mm_untag_mask
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
{
	return -1UL >> 8;
}

#include <asm-generic/mmu_context.h>

#endif /* !__ASSEMBLY__ */
+6 −0
Original line number Diff line number Diff line
@@ -185,6 +185,12 @@ static inline void finish_arch_post_lock_switch(void)
	}
}

#define mm_untag_mask mm_untag_mask
static inline unsigned long mm_untag_mask(struct mm_struct *mm)
{
       return -1UL >> adi_nbits();
}

#include <asm-generic/mmu_context.h>

#endif /* !(__ASSEMBLY__) */
+2 −0
Original line number Diff line number Diff line
@@ -8,8 +8,10 @@

#include <linux/compiler.h>
#include <linux/string.h>
#include <linux/mm_types.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
#include <asm/pgtable.h>

#include <asm/processor.h>
#include <asm-generic/access_ok.h>
+11 −0
Original line number Diff line number Diff line
@@ -2290,6 +2290,17 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING

	  If unsure, leave at the default value.

config ADDRESS_MASKING
	bool "Linear Address Masking support"
	depends on X86_64
	help
	  Linear Address Masking (LAM) modifies the checking that is applied
	  to 64-bit linear addresses, allowing software to use of the
	  untranslated address bits for metadata.

	  The capability can be used for efficient address sanitizers (ASAN)
	  implementation and for optimizations in JITs.

config HOTPLUG_CPU
	def_bool y
	depends on SMP
+1 −1
Original line number Diff line number Diff line
@@ -317,7 +317,7 @@ static struct vm_area_struct gate_vma __ro_after_init = {
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
{
#ifdef CONFIG_COMPAT
	if (!mm || !(mm->context.flags & MM_CONTEXT_HAS_VSYSCALL))
	if (!mm || !test_bit(MM_CONTEXT_HAS_VSYSCALL, &mm->context.flags))
		return NULL;
#endif
	if (vsyscall_mode == NONE)
Loading