Commit e10cd4b0 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Andrew Morton
Browse files

x86/mm: enable ARCH_HAS_VM_GET_PAGE_PROT

This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT.  This also unsubscribes from config
ARCH_HAS_FILTER_PGPROT, after dropping off arch_filter_pgprot() and
arch_vm_get_page_prot().

Link: https://lkml.kernel.org/r/20220414062125.609297-6-anshuman.khandual@arm.com


Signed-off-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarAnshuman Khandual <anshuman.khandual@arm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David S. Miller <davem@davemloft.net>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 91d4ce98
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -76,7 +76,6 @@ config X86
	select ARCH_HAS_EARLY_DEBUG		if KGDB
	select ARCH_HAS_ELF_RANDOMIZE
	select ARCH_HAS_FAST_MULTIPLIER
	select ARCH_HAS_FILTER_PGPROT
	select ARCH_HAS_FORTIFY_SOURCE
	select ARCH_HAS_GCOV_PROFILE_ALL
	select ARCH_HAS_KCOV			if X86_64
@@ -95,6 +94,7 @@ config X86
	select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE
	select ARCH_HAS_SYSCALL_WRAPPER
	select ARCH_HAS_UBSAN_SANITIZE_ALL
	select ARCH_HAS_VM_GET_PAGE_PROT
	select ARCH_HAS_DEBUG_WX
	select ARCH_HAS_ZONE_DMA_SET if EXPERT
	select ARCH_HAVE_NMI_SAFE_CMPXCHG
+0 −5
Original line number Diff line number Diff line
@@ -649,11 +649,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)

#define canon_pgprot(p) __pgprot(massage_pgprot(p))

static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
{
	return canon_pgprot(prot);
}

static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
					 enum page_cache_mode pcm,
					 enum page_cache_mode new_pcm)
+0 −14
Original line number Diff line number Diff line
@@ -5,20 +5,6 @@
#define MAP_32BIT	0x40		/* only give out 32bit addresses */

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
/*
 * Take the 4 protection key bits out of the vma->vm_flags
 * value and turn them in to the bits that we can put in
 * to a pte.
 *
 * Only override these if Protection Keys are available
 * (which is only on 64-bit).
 */
#define arch_vm_get_page_prot(vm_flags)	__pgprot(	\
		((vm_flags) & VM_PKEY_BIT0 ? _PAGE_PKEY_BIT0 : 0) |	\
		((vm_flags) & VM_PKEY_BIT1 ? _PAGE_PKEY_BIT1 : 0) |	\
		((vm_flags) & VM_PKEY_BIT2 ? _PAGE_PKEY_BIT2 : 0) |	\
		((vm_flags) & VM_PKEY_BIT3 ? _PAGE_PKEY_BIT3 : 0))

#define arch_calc_vm_prot_bits(prot, key) (		\
		((key) & 0x1 ? VM_PKEY_BIT0 : 0) |      \
		((key) & 0x2 ? VM_PKEY_BIT1 : 0) |      \
+1 −1
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg
endif

obj-y				:=  init.o init_$(BITS).o fault.o ioremap.o extable.o mmap.o \
				    pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o
				    pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o pgprot.o

obj-y				+= pat/

arch/x86/mm/pgprot.c

0 → 100644
+35 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0

#include <linux/export.h>
#include <linux/mm.h>
#include <asm/pgtable.h>

pgprot_t vm_get_page_prot(unsigned long vm_flags)
{
	unsigned long val = pgprot_val(protection_map[vm_flags &
				      (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
	/*
	 * Take the 4 protection key bits out of the vma->vm_flags value and
	 * turn them in to the bits that we can put in to a pte.
	 *
	 * Only override these if Protection Keys are available (which is only
	 * on 64-bit).
	 */
	if (vm_flags & VM_PKEY_BIT0)
		val |= _PAGE_PKEY_BIT0;
	if (vm_flags & VM_PKEY_BIT1)
		val |= _PAGE_PKEY_BIT1;
	if (vm_flags & VM_PKEY_BIT2)
		val |= _PAGE_PKEY_BIT2;
	if (vm_flags & VM_PKEY_BIT3)
		val |= _PAGE_PKEY_BIT3;
#endif

	val = __sme_set(val);
	if (val & _PAGE_PRESENT)
		val &= __supported_pte_mask;
	return __pgprot(val);
}
EXPORT_SYMBOL(vm_get_page_prot);