Unverified Commit 510b4e28 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13974 arm64: optimize tlb flush

Merge Pull Request from: @ci-robot 
 
PR sync from: Kefeng Wang <wangkefeng.wang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/YBLJB54DVXA6G6Z5BLK5OKH4LPRYFIQ2/ 
Gavin Shan (1):
  arm64: tlb: Allow range operation for MAX_TLBI_RANGE_PAGES

Kefeng Wang (2):
  arm64: tlbflush: add __flush_tlb_range_limit_excess()
  arm64: optimize flush tlb kernel range

Oliver Upton (1):
  arm64: tlbflush: Rename MAX_TLBI_OPS


-- 
2.27.0
 
https://gitee.com/openeuler/kernel/issues/IB82FR 
 
Link:https://gitee.com/openeuler/kernel/pulls/13974

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 151d80c0 67561efe
Loading
Loading
Loading
Loading
+27 −17
Original line number Diff line number Diff line
@@ -339,7 +339,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
 * necessarily a performance improvement.
 */
#define MAX_TLBI_OPS	PTRS_PER_PTE
#define MAX_DVM_OPS	PTRS_PER_PTE

/*
 * __flush_tlb_range_op - Perform TLBI operation upon a range
@@ -402,6 +402,23 @@ do { \
#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)

static inline bool __flush_tlb_range_limit_excess(unsigned long start,
		unsigned long end, unsigned long pages, unsigned long stride)
{
	/*
	 * When the system does not support TLB range based flush
	 * operation, (MAX_DVM_OPS - 1) pages can be handled. But
	 * with TLB range based operation, MAX_TLBI_RANGE_PAGES
	 * pages can be handled.
	 */
	if ((!system_supports_tlb_range() &&
	     (end - start) >= (MAX_DVM_OPS * stride)) ||
	    pages > MAX_TLBI_RANGE_PAGES)
		return true;

	return false;
}

static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
				     unsigned long start, unsigned long end,
				     unsigned long stride, bool last_level,
@@ -413,15 +430,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
	end = round_up(end, stride);
	pages = (end - start) >> PAGE_SHIFT;

	/*
	 * When not uses TLB range ops, we can handle up to
	 * (MAX_TLBI_OPS - 1) pages;
	 * When uses TLB range ops, we can handle up to
	 * (MAX_TLBI_RANGE_PAGES - 1) pages.
	 */
	if ((!system_supports_tlb_range() &&
	     (end - start) >= (MAX_TLBI_OPS * stride)) ||
	    pages >= MAX_TLBI_RANGE_PAGES) {
	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
		flush_tlb_mm(vma->vm_mm);
		return;
	}
@@ -460,19 +469,20 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,

static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	unsigned long addr;
	const unsigned long stride = PAGE_SIZE;
	unsigned long pages;

	if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
	start = round_down(start, stride);
	end = round_up(end, stride);
	pages = (end - start) >> PAGE_SHIFT;

	if (__flush_tlb_range_limit_excess(start, end, pages, stride)) {
		flush_tlb_all();
		return;
	}

	start = __TLBI_VADDR(start, 0);
	end = __TLBI_VADDR(end, 0);

	dsb(ishst);
	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
		__tlbi(vaale1is, addr);
	__flush_tlb_range_op(vaale1is, start, pages, stride, 0, 0, false);
	dsb(ish);
	isb();
}