Unverified Commit c3b2d670 authored by Nanyong Sun's avatar Nanyong Sun Committed by Palmer Dabbelt
Browse files

riscv: mm: add param stride for __sbi_tlb_flush_range



Add a parameter: stride for __sbi_tlb_flush_range(),
represent the page stride between the address of start and end.
Normally, the stride is PAGE_SIZE, and when flush huge page
address, the stride can be the huge page size such as:PMD_SIZE,
then it only need to flush one tlb entry if the address range
within PMD_SIZE.

Signed-off-by: default avatarNanyong Sun <sunnanyong@huawei.com>
Signed-off-by: default avatarPalmer Dabbelt <palmerdabbelt@google.com>
parent 141682f5
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -15,7 +15,7 @@ void flush_tlb_all(void)
 * Kernel may panic if cmask is NULL.
 */
static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
				  unsigned long size)
				  unsigned long size, unsigned long stride)
{
	struct cpumask hmask;
	unsigned int cpuid;
@@ -27,7 +27,7 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,

	if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
		/* local cpu is the only cpu present in cpumask */
		if (size <= PAGE_SIZE)
		if (size <= stride)
			local_flush_tlb_page(start);
		else
			local_flush_tlb_all();
@@ -41,16 +41,16 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,

void flush_tlb_mm(struct mm_struct *mm)
{
	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
	__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1, PAGE_SIZE);
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE);
}

void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
		     unsigned long end)
{
	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
	__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE);
}