Commit 864609c6 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

riscv: implement the new page table range API

Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().  Change
the PG_dcache_clean flag from being per-page to per-folio.

Link: https://lkml.kernel.org/r/20230802151406.3735276-23-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarAlexandre Ghiti <alexghiti@rivosinc.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9fee28ba
Loading
Loading
Loading
Loading
+9 −10
Original line number Diff line number Diff line
@@ -15,20 +15,19 @@ static inline void local_flush_icache_all(void)

#define PG_dcache_clean PG_arch_1

static inline void flush_dcache_page(struct page *page)
static inline void flush_dcache_folio(struct folio *folio)
{
	/*
	 * HugeTLB pages are always fully mapped and only head page will be
	 * set PG_dcache_clean (see comments in flush_icache_pte()).
	 */
	if (PageHuge(page))
		page = compound_head(page);

	if (test_bit(PG_dcache_clean, &page->flags))
		clear_bit(PG_dcache_clean, &page->flags);
	if (test_bit(PG_dcache_clean, &folio->flags))
		clear_bit(PG_dcache_clean, &folio->flags);
}
#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1

static inline void flush_dcache_page(struct page *page)
{
	flush_dcache_folio(page_folio(page));
}

/*
 * RISC-V doesn't have an instruction to flush parts of the instruction cache,
 * so instead we just flush the whole thing.
+24 −13
Original line number Diff line number Diff line
@@ -445,8 +445,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)


/* Commit new configuration to MMU hardware */
static inline void update_mmu_cache(struct vm_area_struct *vma,
	unsigned long address, pte_t *ptep)
static inline void update_mmu_cache_range(struct vm_fault *vmf,
		struct vm_area_struct *vma, unsigned long address,
		pte_t *ptep, unsigned int nr)
{
	/*
	 * The kernel assumes that TLBs don't cache invalid entries, but
@@ -455,8 +456,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
	 * Relying on flush_tlb_fix_spurious_fault would suffice, but
	 * the extra traps reduce performance.  So, eagerly SFENCE.VMA.
	 */
	local_flush_tlb_page(address);
	while (nr--)
		local_flush_tlb_page(address + nr * PAGE_SIZE);
}
#define update_mmu_cache(vma, addr, ptep) \
	update_mmu_cache_range(NULL, vma, addr, ptep, 1)

#define __HAVE_ARCH_UPDATE_MMU_TLB
#define update_mmu_tlb update_mmu_cache
@@ -487,8 +491,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)

void flush_icache_pte(pte_t pte);

static inline void __set_pte_at(struct mm_struct *mm,
	unsigned long addr, pte_t *ptep, pte_t pteval)
static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
{
	if (pte_present(pteval) && pte_exec(pteval))
		flush_icache_pte(pteval);
@@ -496,17 +499,25 @@ static inline void __set_pte_at(struct mm_struct *mm,
	set_pte(ptep, pteval);
}

static inline void set_pte_at(struct mm_struct *mm,
	unsigned long addr, pte_t *ptep, pte_t pteval)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
		pte_t *ptep, pte_t pteval, unsigned int nr)
{
	page_table_check_ptes_set(mm, ptep, pteval, 1);
	__set_pte_at(mm, addr, ptep, pteval);
	page_table_check_ptes_set(mm, ptep, pteval, nr);

	for (;;) {
		__set_pte_at(ptep, pteval);
		if (--nr == 0)
			break;
		ptep++;
		pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
	}
}
#define set_ptes set_ptes

static inline void pte_clear(struct mm_struct *mm,
	unsigned long addr, pte_t *ptep)
{
	__set_pte_at(mm, addr, ptep, __pte(0));
	__set_pte_at(ptep, __pte(0));
}

#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
@@ -515,7 +526,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma,
					pte_t entry, int dirty)
{
	if (!pte_same(*ptep, entry))
		set_pte_at(vma->vm_mm, address, ptep, entry);
		__set_pte_at(ptep, entry);
	/*
	 * update_mmu_cache will unconditionally execute, handling both
	 * the case that the PTE changed and the spurious fault case.
@@ -688,14 +699,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
				pmd_t *pmdp, pmd_t pmd)
{
	page_table_check_pmd_set(mm, pmdp, pmd);
	return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd));
	return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
}

static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
				pud_t *pudp, pud_t pud)
{
	page_table_check_pud_set(mm, pudp, pud);
	return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud));
	return __set_pte_at((pte_t *)pudp, pud_pte(pud));
}

#ifdef CONFIG_PAGE_TABLE_CHECK
+3 −10
Original line number Diff line number Diff line
@@ -82,18 +82,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte)
{
	struct page *page = pte_page(pte);
	struct folio *folio = page_folio(pte_page(pte));

	/*
	 * HugeTLB pages are always fully mapped, so only setting head page's
	 * PG_dcache_clean flag is enough.
	 */
	if (PageHuge(page))
		page = compound_head(page);

	if (!test_bit(PG_dcache_clean, &page->flags)) {
	if (!test_bit(PG_dcache_clean, &folio->flags)) {
		flush_icache_all();
		set_bit(PG_dcache_clean, &page->flags);
		set_bit(PG_dcache_clean, &folio->flags);
	}
}
#endif /* CONFIG_MMU */