Commit 203b7b6a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton
Browse files

mm: rationalise flush_icache_pages() and flush_icache_page()

Move the default (no-op) implementation of flush_icache_pages() to
<linux/cacheflush.h> from <asm-generic/cacheflush.h>.  Remove the
flush_icache_page() wrapper from each architecture into
<linux/cacheflush.h>.

Link: https://lkml.kernel.org/r/20230802151406.3735276-32-willy@infradead.org


Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 29269ad9
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -53,10 +53,6 @@ extern void flush_icache_user_page(struct vm_area_struct *vma,
#define flush_icache_user_page flush_icache_user_page
#endif /* CONFIG_SMP */

/* This is used only in __do_fault and do_swap_page.  */
#define flush_icache_page(vma, page) \
	flush_icache_user_page((vma), (page), 0, 0)

/*
 * Both implementations of flush_icache_user_page flush the entire
 * address space, so one call, no matter how many pages.
@@ -66,6 +62,7 @@ static inline void flush_icache_pages(struct vm_area_struct *vma,
{
	flush_icache_user_page(vma, page, 0, 0);
}
#define flush_icache_pages flush_icache_pages

#include <asm-generic/cacheflush.h>

+0 −9
Original line number Diff line number Diff line
@@ -18,15 +18,6 @@
#include <linux/mm.h>
#include <asm/shmparam.h>

/*
 * Semantically we need this because icache doesn't snoop dcache/dma.
 * However ARC Cache flush requires paddr as well as vaddr, latter not available
 * in the flush_icache_page() API. So we no-op it but do the equivalent work
 * in update_mmu_cache()
 */
#define flush_icache_page(vma, page)
#define flush_icache_pages(vma, page, nr)

void flush_cache_all(void);

void flush_icache_range(unsigned long kstart, unsigned long kend);
+0 −7
Original line number Diff line number Diff line
@@ -321,13 +321,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
#define flush_dcache_mmap_lock(mapping)		xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping)	xa_unlock_irq(&mapping->i_pages)

/*
 * We don't appear to need to do anything here.  In fact, if we did, we'd
 * duplicate cache flushing elsewhere performed by flush_dcache_page().
 */
#define flush_icache_page(vma,page)	do { } while (0)
#define flush_icache_pages(vma, page, nr)	do { } while (0)

/*
 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
 * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
+0 −1
Original line number Diff line number Diff line
@@ -45,7 +45,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
#define flush_cache_vmap(start, end)		cache_wbinv_all()
#define flush_cache_vunmap(start, end)		cache_wbinv_all()

#define flush_icache_page(vma, page)		do {} while (0);
#define flush_icache_range(start, end)		cache_wbinv_range(start, end)
#define flush_icache_mm_range(mm, start, end)	cache_wbinv_range(start, end)
#define flush_icache_deferred(mm)		do {} while (0);
+0 −1
Original line number Diff line number Diff line
@@ -33,7 +33,6 @@ static inline void flush_dcache_page(struct page *page)

#define flush_dcache_mmap_lock(mapping)		do { } while (0)
#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
#define flush_icache_page(vma, page)		do { } while (0)

#define flush_icache_range(start, end)		cache_wbinv_range(start, end)

Loading