Commit c41014c0 authored by Ma Wupeng's avatar Ma Wupeng Committed by Wupeng Ma
Browse files

arm64: mm: Update kernel pte entries if pbha bit0 enabled

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I7ZC0H



--------------------------------

Update kernel pte entries if pbha bit0 enabled. This can be used as a hit
for kernel page entry for MMU.

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
parent 0d65528c
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -147,6 +147,7 @@
#define PTE_PXN			(_AT(pteval_t, 1) << 53)	/* Privileged XN */
#define PTE_UXN			(_AT(pteval_t, 1) << 54)	/* User XN */
#define PTE_PBHA_MASK		(_AT(pteval_t, 0xf) << 59)	/* Page Base Hardware Attributes */
#define PTE_PBHA0		(_AT(pteval_t, 1) << 59)	/* PBHA 59 bit */

#define PTE_ADDR_LOW		(((_AT(pteval_t, 1) << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
#ifdef CONFIG_ARM64_PA_BITS_52
+13 −1
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/pbha.h>

#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -126,6 +127,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
	 */
	pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;

#ifdef CONFIG_ARM64_PBHA
	mask |= PTE_PBHA0;
#endif

	/* creating or taking down mappings is always safe */
	if (old == 0 || new == 0)
		return true;
@@ -372,6 +377,8 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
	if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
		return;

	prot = pgprot_pbha_bit0(prot);

	phys &= PAGE_MASK;
	addr = virt & PAGE_MASK;
	end = PAGE_ALIGN(virt + size);
@@ -1152,6 +1159,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
	unsigned long addr = start;
	unsigned long next;
	pgprot_t prot;
	pgd_t *pgdp;
	p4d_t *p4dp;
	pud_t *pudp;
@@ -1180,7 +1188,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
			if (!p)
				return -ENOMEM;

			pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
			prot = __pgprot(PROT_SECT_NORMAL);
			prot = pgprot_pbha_bit0(prot);

			pmd_set_huge(pmdp, __pa(p), prot);
		} else
			vmemmap_verify((pte_t *)pmdp, node, addr, next);
	} while (addr = next, addr != end);
@@ -1300,6 +1311,7 @@ void __set_fixmap(enum fixed_addresses idx,
	ptep = fixmap_pte(addr);

	if (pgprot_val(flags)) {
		flags = pgprot_pbha_bit0(flags);
		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
	} else {
		pte_clear(&init_mm, addr, ptep);
+12 −1
Original line number Diff line number Diff line
@@ -5,8 +5,10 @@
#ifndef __LINUX_PBHA_H
#define __LINUX_PBHA_H

#include <linux/efi.h>
#include <linux/libfdt.h>
#include <linux/pgtable.h>

#define PBHA_VAL_BIT0 1UL

#define EFI_OEMCONFIG_VARIABLE_GUID                                            \
	EFI_GUID(0x21f3b3c5, 0x946d, 0x41c1, 0x83, 0x8c, 0x19, 0x4e, 0x48,     \
@@ -23,8 +25,17 @@ static inline bool system_support_pbha_bit0(void)
{
	return pbha_bit0_enabled;
}

static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot)
{
	if (!system_support_pbha_bit0())
		return prot;

	return pgprot_pbha(prot, PBHA_VAL_BIT0);
}
#else
static inline bool system_support_pbha_bit0(void) { return false; }
static inline pgprot_t pgprot_pbha_bit0(pgprot_t prot) { return prot; }
#endif

#endif
+5 −0
Original line number Diff line number Diff line
@@ -38,6 +38,7 @@
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
#include <linux/share_pool.h>
#include <linux/pbha.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -307,6 +308,8 @@ int vmap_range(unsigned long addr, unsigned long end,
{
	int err;

	prot = pgprot_pbha_bit0(prot);

	err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
	flush_cache_vmap(addr, end);

@@ -549,6 +552,8 @@ static int vmap_pages_range_noflush(unsigned long addr, unsigned long end,

	WARN_ON(page_shift < PAGE_SHIFT);

	prot = pgprot_pbha_bit0(prot);

	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
			page_shift == PAGE_SHIFT)
		return vmap_small_pages_range_noflush(addr, end, prot, pages);