Commit 4a169d61 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

arm64: implement the new page table range API

Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().  Change
the PG_dcache_clean flag from being per-page to per-folio.

Link: https://lkml.kernel.org/r/20230802151406.3735276-11-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8b5989f3
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define copy_to_user_page copy_to_user_page

/*
 * flush_dcache_page is used when the kernel has written to the page
 * flush_dcache_folio is used when the kernel has written to the page
 * cache page at virtual address page->virtual.
 *
 * If this page isn't mapped (ie, page_mapping == NULL), or it might
@@ -127,6 +127,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
void flush_dcache_folio(struct folio *);
#define flush_dcache_folio flush_dcache_folio

static __always_inline void icache_inval_all_pou(void)
{
+19 −7
Original line number Diff line number Diff line
@@ -345,12 +345,21 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
	set_pte(ptep, pte);
}

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte, unsigned int nr)
{
	page_table_check_ptes_set(mm, ptep, pte, 1);
	return __set_pte_at(mm, addr, ptep, pte);
	page_table_check_ptes_set(mm, ptep, pte, nr);

	for (;;) {
		__set_pte_at(mm, addr, ptep, pte);
		if (--nr == 0)
			break;
		ptep++;
		addr += PAGE_SIZE;
		pte_val(pte) += PAGE_SIZE;
	}
}
#define set_ptes set_ptes

/*
 * Huge pte definitions.
@@ -1049,8 +1058,9 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
/*
 * On AArch64, the cache coherency is handled via the set_pte_at() function.
 */
static inline void update_mmu_cache(struct vm_area_struct *vma,
				    unsigned long addr, pte_t *ptep)
static inline void update_mmu_cache_range(struct vm_fault *vmf,
		struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
		unsigned int nr)
{
	/*
	 * We don't do anything here, so there's a very small chance of
@@ -1059,6 +1069,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
	 */
}

#define update_mmu_cache(vma, addr, ptep) \
	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)

#ifdef CONFIG_ARM64_PA_BITS_52
+14 −22
Original line number Diff line number Diff line
@@ -51,20 +51,13 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,

void __sync_icache_dcache(pte_t pte)
{
	struct page *page = pte_page(pte);
	struct folio *folio = page_folio(pte_page(pte));

	/*
	 * HugeTLB pages are always fully mapped, so only setting head page's
	 * PG_dcache_clean flag is enough.
	 */
	if (PageHuge(page))
		page = compound_head(page);

	if (!test_bit(PG_dcache_clean, &page->flags)) {
		sync_icache_aliases((unsigned long)page_address(page),
				    (unsigned long)page_address(page) +
					    page_size(page));
		set_bit(PG_dcache_clean, &page->flags);
	if (!test_bit(PG_dcache_clean, &folio->flags)) {
		sync_icache_aliases((unsigned long)folio_address(folio),
				    (unsigned long)folio_address(folio) +
					    folio_size(folio));
		set_bit(PG_dcache_clean, &folio->flags);
	}
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -74,17 +67,16 @@ EXPORT_SYMBOL_GPL(__sync_icache_dcache);
 * it as dirty for later flushing when mapped in user space (if executable,
 * see __sync_icache_dcache).
 */
void flush_dcache_page(struct page *page)
void flush_dcache_folio(struct folio *folio)
{
	/*
	 * HugeTLB pages are always fully mapped and only head page will be
	 * set PG_dcache_clean (see comments in __sync_icache_dcache()).
	 */
	if (PageHuge(page))
		page = compound_head(page);
	if (test_bit(PG_dcache_clean, &folio->flags))
		clear_bit(PG_dcache_clean, &folio->flags);
}
EXPORT_SYMBOL(flush_dcache_folio);

	if (test_bit(PG_dcache_clean, &page->flags))
		clear_bit(PG_dcache_clean, &page->flags);
void flush_dcache_page(struct page *page)
{
	flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);