Commit 1e9fdf21 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds
Browse files

mmu_gather: Remove per arch tlb_{start,end}_vma()



Scattered across the archs are 3 basic forms of tlb_{start,end}_vma().
Provide two new MMU_GATHER_knobs to enumerate them and remove the per
arch tlb_{start,end}_vma() implementations.

 - MMU_GATHER_NO_FLUSH_CACHE indicates the arch has flush_cache_range()
   but does *NOT* want to call it for each VMA.

 - MMU_GATHER_MERGE_VMAS indicates the arch wants to merge the
   invalidate across multiple VMAs if possible.

With these it is possible to capture the three forms:

  1) empty stubs;
     select MMU_GATHER_NO_FLUSH_CACHE and MMU_GATHER_MERGE_VMAS

  2) start: flush_cache_range(), end: empty;
     select MMU_GATHER_MERGE_VMAS

  3) start: flush_cache_range(), end: flush_tlb_range();
     default

Obviously, if the architecture does not have flush_cache_range() then
it also doesn't need to select MMU_GATHER_NO_FLUSH_CACHE.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Cc: David Miller <davem@davemloft.net>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 23a67619
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -438,6 +438,13 @@ config MMU_GATHER_PAGE_SIZE

config MMU_GATHER_NO_RANGE
	bool
	select MMU_GATHER_MERGE_VMAS

config MMU_GATHER_NO_FLUSH_CACHE
	bool

config MMU_GATHER_MERGE_VMAS
	bool

config MMU_GATHER_NO_GATHER
	bool
+0 −13
Original line number Diff line number Diff line
@@ -4,19 +4,6 @@
#define __ASM_CSKY_TLB_H

#include <asm/cacheflush.h>

#define tlb_start_vma(tlb, vma) \
	do { \
		if (!(tlb)->fullmm) \
			flush_cache_range(vma, (vma)->vm_start, (vma)->vm_end); \
	}  while (0)

#define tlb_end_vma(tlb, vma) \
	do { \
		if (!(tlb)->fullmm) \
			flush_tlb_range(vma, (vma)->vm_start, (vma)->vm_end); \
	}  while (0)

#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)

#include <asm-generic/tlb.h>
+1 −0
Original line number Diff line number Diff line
@@ -108,6 +108,7 @@ config LOONGARCH
	select TRACE_IRQFLAGS_SUPPORT
	select USE_PERCPU_NUMA_NODE_ID
	select ZONE_DMA32
	select MMU_GATHER_MERGE_VMAS if MMU

config 32BIT
	bool
+0 −10
Original line number Diff line number Diff line
@@ -137,16 +137,6 @@ static inline void invtlb_all(u32 op, u32 info, u64 addr)
		);
}

/*
 * LoongArch doesn't need any special per-pte or per-vma handling, except
 * we need to flush cache for area to be unmapped.
 */
#define tlb_start_vma(tlb, vma)					\
	do {							\
		if (!(tlb)->fullmm)				\
			flush_cache_range(vma, vma->vm_start, vma->vm_end); \
	}  while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)

static void tlb_flush(struct mmu_gather *tlb);
+1 −0
Original line number Diff line number Diff line
@@ -256,6 +256,7 @@ config PPC
	select IRQ_FORCED_THREADING
	select MMU_GATHER_PAGE_SIZE
	select MMU_GATHER_RCU_TABLE_FREE
	select MMU_GATHER_MERGE_VMAS
	select MODULES_USE_ELF_RELA
	select NEED_DMA_MAP_STATE		if PPC64 || NOT_COHERENT_CACHE
	select NEED_PER_CPU_EMBED_FIRST_CHUNK	if PPC64
Loading