Commit 8b5989f3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

arm: implement the new page table range API

Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().  Change the PG_dcache_clear flag from being per-page
to per-folio which makes __dma_page_dev_to_cpu() a bit more exciting. 
Also add flush_cache_pages(), even though this isn't used by generic code
(yet?)

[m.szyprowski@samsung.com: fix potential endless loop in __dma_page_dev_to_cpu()]
  Link: https://lkml.kernel.org/r/20230809172737.3574190-1-m.szyprowski@samsung.com
[willy@infradead.org: fix folio conversion in __dma_page_dev_to_cpu()]
  Link: https://lkml.kernel.org/r/20230823191852.1556561-1-willy@infradead.org
Link: https://lkml.kernel.org/r/20230802151406.3735276-10-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarMike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: default avatarRussell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent ac4cfacc
Loading
Loading
Loading
Loading
+15 −9
Original line number Diff line number Diff line
@@ -231,14 +231,15 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
					vma->vm_flags);
}

static inline void
vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
static inline void vivt_flush_cache_pages(struct vm_area_struct *vma,
		unsigned long user_addr, unsigned long pfn, unsigned int nr)
{
	struct mm_struct *mm = vma->vm_mm;

	if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
		unsigned long addr = user_addr & PAGE_MASK;
		__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
		__cpuc_flush_user_range(addr, addr + nr * PAGE_SIZE,
				vma->vm_flags);
	}
}

@@ -247,15 +248,17 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
		vivt_flush_cache_mm(mm)
#define flush_cache_range(vma,start,end) \
		vivt_flush_cache_range(vma,start,end)
#define flush_cache_page(vma,addr,pfn) \
		vivt_flush_cache_page(vma,addr,pfn)
#define flush_cache_pages(vma, addr, pfn, nr) \
		vivt_flush_cache_pages(vma, addr, pfn, nr)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr,
		unsigned long pfn, unsigned int nr);
#endif

#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_cache_page(vma, addr, pfn) flush_cache_pages(vma, addr, pfn, 1)

/*
 * flush_icache_user_range is used when we want to ensure that the
@@ -289,7 +292,9 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
 * See update_mmu_cache for the user space part.
 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
void flush_dcache_page(struct page *);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_folio flush_dcache_folio

#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
@@ -321,6 +326,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
 * duplicate cache flushing elsewhere performed by flush_dcache_page().
 */
#define flush_icache_page(vma,page)	do { } while (0)
#define flush_icache_pages(vma, page, nr)	do { } while (0)

/*
 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
+3 −2
Original line number Diff line number Diff line
@@ -207,8 +207,9 @@ static inline void __sync_icache_dcache(pte_t pteval)
extern void __sync_icache_dcache(pte_t pteval);
#endif

void set_pte_at(struct mm_struct *mm, unsigned long addr,
		      pte_t *ptep, pte_t pteval);
void set_ptes(struct mm_struct *mm, unsigned long addr,
		      pte_t *ptep, pte_t pteval, unsigned int nr);
#define set_ptes set_ptes

static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
{
+9 −5
Original line number Diff line number Diff line
@@ -619,18 +619,22 @@ extern void flush_bp_all(void);
 * If PG_dcache_clean is not set for the page, we need to ensure that any
 * cache entries for the kernels virtual memory range are written
 * back to the page. On ARMv6 and later, the cache coherency is handled via
 * the set_pte_at() function.
 * the set_ptes() function.
 */
#if __LINUX_ARM_ARCH__ < 6
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
	pte_t *ptep);
void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
		unsigned long addr, pte_t *ptep, unsigned int nr);
#else
static inline void update_mmu_cache(struct vm_area_struct *vma,
				    unsigned long addr, pte_t *ptep)
static inline void update_mmu_cache_range(struct vm_fault *vmf,
		struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
		unsigned int nr)
{
}
#endif

#define update_mmu_cache(vma, addr, ptep) \
	update_mmu_cache_range(NULL, vma, addr, ptep, 1)

#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)

#endif
+3 −2
Original line number Diff line number Diff line
@@ -64,10 +64,11 @@ static void mc_copy_user_page(void *from, void *to)
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
	unsigned long vaddr, struct vm_area_struct *vma)
{
	struct folio *src = page_folio(from);
	void *kto = kmap_atomic(to);

	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
		__flush_dcache_page(page_mapping_file(from), from);
	if (!test_and_set_bit(PG_dcache_clean, &src->flags))
		__flush_dcache_folio(folio_flush_mapping(src), src);

	raw_spin_lock(&minicache_lock);

+3 −2
Original line number Diff line number Diff line
@@ -69,11 +69,12 @@ static void discard_old_kernel_data(void *kto)
static void v6_copy_user_highpage_aliasing(struct page *to,
	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
	struct folio *src = page_folio(from);
	unsigned int offset = CACHE_COLOUR(vaddr);
	unsigned long kfrom, kto;

	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
		__flush_dcache_page(page_mapping_file(from), from);
	if (!test_and_set_bit(PG_dcache_clean, &src->flags))
		__flush_dcache_folio(folio_flush_mapping(src), src);

	/* FIXME: not highmem safe */
	discard_old_kernel_data(page_address(to));
Loading