Commit 99420941 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

nios2: implement the new page table range API

Add set_ptes(), update_mmu_cache_range(), flush_icache_pages() and
flush_dcache_folio().  Change the PG_arch_1 (aka PG_dcache_dirty) flag
from being per-page to per-folio.

Link: https://lkml.kernel.org/r/20230802151406.3735276-19-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 15fa3e8e
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -29,9 +29,13 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
	unsigned long pfn);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio

extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
		unsigned int nr);
#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1);

#define flush_cache_vmap(start, end)		flush_dcache_range(start, end)
#define flush_cache_vunmap(start, end)		flush_dcache_range(start, end)
+19 −9
Original line number Diff line number Diff line
@@ -178,14 +178,21 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
	*ptep = pteval;
}

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pteval)
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
		pte_t *ptep, pte_t pte, unsigned int nr)
{
	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pteval));

	flush_dcache_range(paddr, paddr + PAGE_SIZE);
	set_pte(ptep, pteval);
	unsigned long paddr = (unsigned long)page_to_virt(pte_page(pte));

	flush_dcache_range(paddr, paddr + nr * PAGE_SIZE);
	for (;;) {
		set_pte(ptep, pte);
		if (--nr == 0)
			break;
		ptep++;
		pte_val(pte) += 1;
	}
}
#define set_ptes set_ptes

static inline int pmd_none(pmd_t pmd)
{
@@ -202,7 +209,7 @@ static inline void pte_clear(struct mm_struct *mm,

	pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;

	set_pte_at(mm, addr, ptep, null);
	set_pte(ptep, null);
}

/*
@@ -273,7 +280,10 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
extern void __init paging_init(void);
extern void __init mmu_init(void);

extern void update_mmu_cache(struct vm_area_struct *vma,
			     unsigned long address, pte_t *pte);
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
		unsigned long address, pte_t *ptep, unsigned int nr);

#define update_mmu_cache(vma, addr, ptep) \
	update_mmu_cache_range(NULL, vma, addr, ptep, 1)

#endif /* _ASM_NIOS2_PGTABLE_H */
+43 −36
Original line number Diff line number Diff line
@@ -71,26 +71,26 @@ static void __flush_icache(unsigned long start, unsigned long end)
	__asm__ __volatile(" flushp\n");
}

static void flush_aliases(struct address_space *mapping, struct page *page)
static void flush_aliases(struct address_space *mapping, struct folio *folio)
{
	struct mm_struct *mm = current->active_mm;
	struct vm_area_struct *mpnt;
	struct vm_area_struct *vma;
	pgoff_t pgoff;
	unsigned long nr = folio_nr_pages(folio);

	pgoff = page->index;
	pgoff = folio->index;

	flush_dcache_mmap_lock(mapping);
	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
		unsigned long offset;
	vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
		unsigned long start;

		if (mpnt->vm_mm != mm)
		if (vma->vm_mm != mm)
			continue;
		if (!(mpnt->vm_flags & VM_MAYSHARE))
		if (!(vma->vm_flags & VM_MAYSHARE))
			continue;

		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
		flush_cache_page(mpnt, mpnt->vm_start + offset,
			page_to_pfn(page));
		start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
		flush_cache_range(vma, start, start + nr * PAGE_SIZE);
	}
	flush_dcache_mmap_unlock(mapping);
}
@@ -138,10 +138,11 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		__flush_icache(start, end);
}

void flush_icache_page(struct vm_area_struct *vma, struct page *page)
void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
		unsigned int nr)
{
	unsigned long start = (unsigned long) page_address(page);
	unsigned long end = start + PAGE_SIZE;
	unsigned long end = start + nr * PAGE_SIZE;

	__flush_dcache(start, end);
	__flush_icache(start, end);
@@ -158,19 +159,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
		__flush_icache(start, end);
}

void __flush_dcache_page(struct address_space *mapping, struct page *page)
static void __flush_dcache_folio(struct folio *folio)
{
	/*
	 * Writeback any data associated with the kernel mapping of this
	 * page.  This ensures that data in the physical page is mutually
	 * coherent with the kernels mapping.
	 */
	unsigned long start = (unsigned long)page_address(page);
	unsigned long start = (unsigned long)folio_address(folio);

	__flush_dcache(start, start + PAGE_SIZE);
	__flush_dcache(start, start + folio_size(folio));
}

void flush_dcache_page(struct page *page)
void flush_dcache_folio(struct folio *folio)
{
	struct address_space *mapping;

@@ -178,32 +179,38 @@ void flush_dcache_page(struct page *page)
	 * The zero page is never written to, so never has any dirty
	 * cache lines, and therefore never needs to be flushed.
	 */
	if (page == ZERO_PAGE(0))
	if (is_zero_pfn(folio_pfn(folio)))
		return;

	mapping = page_mapping_file(page);
	mapping = folio_flush_mapping(folio);

	/* Flush this page if there are aliases. */
	if (mapping && !mapping_mapped(mapping)) {
		clear_bit(PG_dcache_clean, &page->flags);
		clear_bit(PG_dcache_clean, &folio->flags);
	} else {
		__flush_dcache_page(mapping, page);
		__flush_dcache_folio(folio);
		if (mapping) {
			unsigned long start = (unsigned long)page_address(page);
			flush_aliases(mapping,  page);
			flush_icache_range(start, start + PAGE_SIZE);
			unsigned long start = (unsigned long)folio_address(folio);
			flush_aliases(mapping, folio);
			flush_icache_range(start, start + folio_size(folio));
		}
		set_bit(PG_dcache_clean, &folio->flags);
	}
		set_bit(PG_dcache_clean, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_folio);

void flush_dcache_page(struct page *page)
{
	flush_dcache_folio(page_folio(page));
}
EXPORT_SYMBOL(flush_dcache_page);

void update_mmu_cache(struct vm_area_struct *vma,
		      unsigned long address, pte_t *ptep)
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
		unsigned long address, pte_t *ptep, unsigned int nr)
{
	pte_t pte = *ptep;
	unsigned long pfn = pte_pfn(pte);
	struct page *page;
	struct folio *folio;
	struct address_space *mapping;

	reload_tlb_page(vma, address, pte);
@@ -215,19 +222,19 @@ void update_mmu_cache(struct vm_area_struct *vma,
	* The zero page is never written to, so never has any dirty
	* cache lines, and therefore never needs to be flushed.
	*/
	page = pfn_to_page(pfn);
	if (page == ZERO_PAGE(0))
	if (is_zero_pfn(pfn))
		return;

	mapping = page_mapping_file(page);
	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
		__flush_dcache_page(mapping, page);
	folio = page_folio(pfn_to_page(pfn));
	if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
		__flush_dcache_folio(folio);

	if(mapping)
	{
		flush_aliases(mapping, page);
	mapping = folio_flush_mapping(folio);
	if (mapping) {
		flush_aliases(mapping, folio);
		if (vma->vm_flags & VM_EXEC)
			flush_icache_page(vma, page);
			flush_icache_pages(vma, &folio->page,
					folio_nr_pages(folio));
	}
}