Commit 9fee28ba authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

powerpc: implement the new page table range API

Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().  Change
the PG_arch_1 (aka PG_dcache_dirty) flag from being per-page to per-folio.

[willy@infradead.org: re-export flush_dcache_icache_folio()]
  Link: https://lkml.kernel.org/r/ZMx1daYwvD9EM7Cv@casper.infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-22-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e70bbca6
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -462,11 +462,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
		     pgprot_val(pgprot));
}

static inline unsigned long pte_pfn(pte_t pte)
{
	return pte_val(pte) >> PTE_RPN_SHIFT;
}

/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
+1 −5
Original line number Diff line number Diff line
@@ -104,6 +104,7 @@
 * and every thing below PAGE_SHIFT;
 */
#define PTE_RPN_MASK	(((1UL << _PAGE_PA_MAX) - 1) & (PAGE_MASK))
#define PTE_RPN_SHIFT	PAGE_SHIFT
/*
 * set of bits not changed in pmd_modify. Even though we have hash specific bits
 * in here, on radix we expect them to be zero.
@@ -569,11 +570,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
	return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
}

static inline unsigned long pte_pfn(pte_t pte)
{
	return (pte_val(pte) & PTE_RPN_MASK) >> PAGE_SHIFT;
}

/* Generic modifiers for PTE bits */
static inline pte_t pte_wrprotect(pte_t pte)
{
+3 −8
Original line number Diff line number Diff line
@@ -9,13 +9,6 @@
#endif

#ifndef __ASSEMBLY__
/* Insert a PTE, top-level function is out of line. It uses an inline
 * low level function in the respective pgtable-* files
 */
extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
		       pte_t pte);


#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
				 pte_t *ptep, pte_t entry, int dirty);
@@ -36,7 +29,9 @@ void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t
 * corresponding HPTE into the hash table ahead of time, instead of
 * waiting for the inevitable extra hash-table miss exception.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
static inline void update_mmu_cache_range(struct vm_fault *vmf,
		struct vm_area_struct *vma, unsigned long address,
		pte_t *ptep, unsigned int nr)
{
	if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
		return;
+10 −4
Original line number Diff line number Diff line
@@ -35,13 +35,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 * It just marks the page as not i-cache clean.  We do the i-cache
 * flush later when the page is given to a user process, if necessary.
 */
static inline void flush_dcache_page(struct page *page)
static inline void flush_dcache_folio(struct folio *folio)
{
	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		return;
	/* avoid an atomic op if possible */
	if (test_bit(PG_dcache_clean, &page->flags))
		clear_bit(PG_dcache_clean, &page->flags);
	if (test_bit(PG_dcache_clean, &folio->flags))
		clear_bit(PG_dcache_clean, &folio->flags);
}
#define flush_dcache_folio flush_dcache_folio

static inline void flush_dcache_page(struct page *page)
{
	flush_dcache_folio(page_folio(page));
}

void flush_icache_range(unsigned long start, unsigned long stop);
@@ -51,7 +57,7 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
		unsigned long addr, int len);
#define flush_icache_user_page flush_icache_user_page

void flush_dcache_icache_page(struct page *page);
void flush_dcache_icache_folio(struct folio *folio);

/**
 * flush_dcache_range(): Write any modified data cache blocks out to memory and
+5 −5
Original line number Diff line number Diff line
@@ -894,7 +894,7 @@ void kvmppc_init_lpid(unsigned long nr_lpids);

static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
{
	struct page *page;
	struct folio *folio;
	/*
	 * We can only access pages that the kernel maps
	 * as memory. Bail out for unmapped ones.
@@ -903,10 +903,10 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
		return;

	/* Clear i-cache for new pages */
	page = pfn_to_page(pfn);
	if (!test_bit(PG_dcache_clean, &page->flags)) {
		flush_dcache_icache_page(page);
		set_bit(PG_dcache_clean, &page->flags);
	folio = page_folio(pfn_to_page(pfn));
	if (!test_bit(PG_dcache_clean, &folio->flags)) {
		flush_dcache_icache_folio(folio);
		set_bit(PG_dcache_clean, &folio->flags);
	}
}

Loading