Commit bd933809 authored by Huacai Chen's avatar Huacai Chen Committed by Hongchen Zhang
Browse files

LoongArch: Use accessors to page table entries instead of direct dereference

mainline inclusion
from mainline-v6.11-rc3
commit 4574815abf43e2bf05643e1b3f7a2e5d6df894f0
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IB7Y4K
CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=4574815abf43e2bf05643e1b3f7a2e5d6df894f0



--------------------------------

As very well explained in commit 20a004e7 ("arm64: mm: Use
READ_ONCE/WRITE_ONCE when accessing page tables"), an architecture whose
page table walker can modify the PTE in parallel must use READ_ONCE()/
WRITE_ONCE() macro to avoid any compiler transformation.

So apply that to LoongArch which is such an architecture, in order to
avoid potential problems.

Similar to commit edf955647269422e ("riscv: Use accessors to page table
entries instead of direct dereference").

Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent fcb012bc
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep)
{
	pte_t clear;
	pte_t pte = *ptep;
	pte_t pte = ptep_get(ptep);

	pte_val(clear) = (unsigned long)invalid_pte_table;
	set_pte_at(mm, addr, ptep, clear);
@@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
					     pte_t *ptep, pte_t pte,
					     int dirty)
{
	int changed = !pte_same(*ptep, pte);
	int changed = !pte_same(ptep_get(ptep), pte);

	if (changed) {
		set_pte_at(vma->vm_mm, addr, ptep, pte);
+3 −3
Original line number Diff line number Diff line
@@ -43,13 +43,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
	pte_t *pte = virt_to_kpte(addr);

	if (WARN_ON(!pte) || pte_none(*pte))
	if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
		return false;

	if (protect)
		set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
		set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
	else
		set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
		set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));

	preempt_disable();
	local_flush_tlb_one(addr);
+29 −19
Original line number Diff line number Diff line
@@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START	(VMEMMAP_END + 1)
#define KFENCE_AREA_END		(KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)

#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))

#define pte_ERROR(e) \
	pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d)
	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
}

static inline void p4d_clear(p4d_t *p4dp)
{
	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}

static inline pud_t *p4d_pgtable(p4d_t p4d)
{
	return (pud_t *)p4d_val(p4d);
@@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)

static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{
	*p4d = p4dval;
	WRITE_ONCE(*p4d, p4dval);
}

static inline void p4d_clear(p4d_t *p4dp)
{
	set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
}

#define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
@@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud)
	return pud_val(pud) != (unsigned long)invalid_pmd_table;
}

static inline void pud_clear(pud_t *pudp)
static inline pmd_t *pud_pgtable(pud_t pud)
{
	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
	return (pmd_t *)pud_val(pud);
}

static inline pmd_t *pud_pgtable(pud_t pud)
static inline void set_pud(pud_t *pud, pud_t pudval)
{
	return (pmd_t *)pud_val(pud);
	WRITE_ONCE(*pud, pudval);
}

#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
static inline void pud_clear(pud_t *pudp)
{
	set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
}

#define pud_phys(pud)		PHYSADDR(pud_val(pud))
#define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
@@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd)
	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
}

static inline void pmd_clear(pmd_t *pmdp)
static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
{
	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
	WRITE_ONCE(*pmd, pmdval);
}

#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
static inline void pmd_clear(pmd_t *pmdp)
{
	set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
}

#define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))

@@ -314,7 +323,8 @@ extern void paging_init(void);

static inline void set_pte(pte_t *ptep, pte_t pteval)
{
	*ptep = pteval;
	WRITE_ONCE(*ptep, pteval);

	if (pte_val(pteval) & _PAGE_GLOBAL) {
		pte_t *buddy = ptep_buddy(ptep);
		/*
@@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
		: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
		: [global] "r" (page_global));
#else /* !CONFIG_SMP */
		if (pte_none(*buddy))
			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
		if (pte_none(ptep_get(buddy)))
			WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
#endif /* CONFIG_SMP */
	}
}
@@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
	/* Preserve global status for the pair */
	if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
	if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL)
		set_pte(ptep, __pte(_PAGE_GLOBAL));
	else
		set_pte(ptep, __pte(0));
@@ -591,7 +601,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
					    unsigned long address, pmd_t *pmdp)
{
	pmd_t old = *pmdp;
	pmd_t old = pmdp_get(pmdp);

	pmd_clear(pmdp);

+4 −4
Original line number Diff line number Diff line
@@ -727,19 +727,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
	 * value) and then p*d_offset() walks into the target huge page instead
	 * of the old page table (sees the new value).
	 */
	pgd = READ_ONCE(*pgd_offset(kvm->mm, hva));
	pgd = pgdp_get(pgd_offset(kvm->mm, hva));
	if (pgd_none(pgd))
		goto out;

	p4d = READ_ONCE(*p4d_offset(&pgd, hva));
	p4d = p4dp_get(p4d_offset(&pgd, hva));
	if (p4d_none(p4d) || !p4d_present(p4d))
		goto out;

	pud = READ_ONCE(*pud_offset(&p4d, hva));
	pud = pudp_get(pud_offset(&p4d, hva));
	if (pud_none(pud) || !pud_present(pud))
		goto out;

	pmd = READ_ONCE(*pmd_offset(&pud, hva));
	pmd = pmdp_get(pmd_offset(&pud, hva));
	if (pmd_none(pmd) || !pmd_present(pmd))
		goto out;

+3 −3
Original line number Diff line number Diff line
@@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
	pmd_t *pmd = NULL;

	pgd = pgd_offset(mm, addr);
	if (pgd_present(*pgd)) {
	if (pgd_present(pgdp_get(pgd))) {
		p4d = p4d_offset(pgd, addr);
		if (p4d_present(*p4d)) {
		if (p4d_present(p4dp_get(p4d))) {
			pud = pud_offset(p4d, addr);
			if (pud_present(*pud))
			if (pud_present(pudp_get(pud)))
				pmd = pmd_offset(pud, addr);
		}
	}
Loading