Commit 056f91a0 authored by zhaolichang's avatar zhaolichang
Browse files

tlbi: fix the problem of incorrect TLB flashing

kunpeng inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBU2Y1



--------------------------------

fix the problem of incorrect TLB flashing

Fixes: ebfca9b4 ("tlbi: Do not force the broadcasting of TLBI and ICache, and add TLB flush helpers based on IPI.")
Signed-off-by: default avatarzhaolichang <zhaolichang@huawei.com>
parent 860874bd
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -2679,10 +2679,8 @@ config ARM64_TLBI_IPI
	depends on ARM64
	default n
	help
	  adds new boot parameter 'disable_tlbflush_is' to disable TLB flush
	  within the same inner shareable domain for performance tuning.

	  When this new parameter is specified, TLB entry is invalidated by
	  Disable TLB flush within the same inner shareable domain for performance
	  tuning. When this new parameter is specified, TLB entry is invalidated by
	  __tlbi(aside1, asid) only on the CPUs specified by mm_cpumask(mm).

	  By using TLB.IS, all CPUs within the same inner shareable domain
+6 −8
Original line number Diff line number Diff line
@@ -249,6 +249,11 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)

static inline void __switch_mm(struct mm_struct *next)
{
#ifdef CONFIG_ARM64_TLBI_IPI
	unsigned int cpu = smp_processor_id();

	cpumask_set_cpu(cpu, mm_cpumask(next));
#endif
	/*
	 * init_mm.pgd does not contain any user mappings and it is always
	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -265,15 +270,8 @@ static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
	  struct task_struct *tsk)
{
	if (prev != next) {
	if (prev != next)
		__switch_mm(next);
#ifdef CONFIG_ARM64_TLBI_IPI
		if (unlikely(test_tlbi_ipi_switch())) {
			cpumask_clear_cpu(smp_processor_id(), mm_cpumask(prev));
			local_flush_tlb_mm(prev);
		}
#endif
	}

	/*
	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+0 −4
Original line number Diff line number Diff line
@@ -946,11 +946,7 @@ static inline int __ptep_clear_flush_young(struct vm_area_struct *vma,
		 * context-switch, which provides a DSB to complete the TLB
		 * invalidation.
		 */
#ifdef CONFIG_ARM64_TLBI_IPI
		flush_tlb_page_nosync_ipi(vma, address);
#else
		flush_tlb_page_nosync(vma, address);
#endif
	}

	return young;
+69 −95
Original line number Diff line number Diff line
@@ -16,8 +16,6 @@
#include <linux/mmu_notifier.h>
#include <asm/cputype.h>
#include <asm/mmu.h>
#include <linux/smp.h>
#include <linux/ctype.h>

/*
 * Raw TLBI operations.
@@ -253,125 +251,91 @@ static inline void flush_tlb_all(void)
}

#ifdef CONFIG_ARM64_TLBI_IPI
static unsigned int disable_tlbflush_is;

#define FLAG_TLBFLUSH_PAGE      0x0002
#define FLAG_TLBFLUSH_SWITCH    0x0004
#define FLAG_TLBFLUSH_MM        0x0008

#define TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG)                   \
bool test_tlbi_ipi_##flag(void)                                 \
{                                                               \
    return !!(disable_tlbflush_is & FLAG_TLBFLUSH_##FLAG);  \
}

#define TEST_TLBFLUSH_FLAG(flag, FLAG)                          \
static __always_inline TEST_TLBFLUSH_FLAG_EXTERN(flag, FLAG)

TEST_TLBFLUSH_FLAG(mm, MM)
TEST_TLBFLUSH_FLAG(page, PAGE)
TEST_TLBFLUSH_FLAG(switch, SWITCH)

static inline void local_flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_mm_nosync(struct mm_struct *mm)
{
    unsigned long asid = __TLBI_VADDR(0, ASID(mm));
	unsigned long asid;

    dsb(nshst);
    __tlbi(aside1, asid);
    __tlbi_user(aside1, asid);
    dsb(nsh);
	dsb(ishst);
	asid = __TLBI_VADDR(0, ASID(mm));
	__tlbi(aside1is, asid);
	__tlbi_user(aside1is, asid);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}

static inline void __flush_tlb_mm(struct mm_struct *mm)
#else
static inline void flush_tlb_mm(struct mm_struct *mm)
{
    unsigned long asid = __TLBI_VADDR(0, ASID(mm));
	unsigned long asid;

	dsb(ishst);
	asid = __TLBI_VADDR(0, ASID(mm));
	__tlbi(aside1is, asid);
	__tlbi_user(aside1is, asid);
	dsb(ish);
}

static inline void ipi_flush_tlb_mm(void *arg)
{
    struct mm_struct *mm = arg;

    local_flush_tlb_mm(mm);
}

static inline void flush_tlb_mm(struct mm_struct *mm)
{
    if (unlikely(test_tlbi_ipi_mm()))
		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm,
				(void *)mm, true);
    else
		__flush_tlb_mm(mm);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}
#endif

static inline void __flush_tlb_page_nosync_ipi(unsigned long addr)
static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
					   unsigned long uaddr)
{
	unsigned long addr;

	dsb(ishst);
	addr = __TLBI_VADDR(uaddr, ASID(mm));
	__tlbi(vale1is, addr);
	__tlbi_user(vale1is, addr);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
						(uaddr & PAGE_MASK) + PAGE_SIZE);
}

static inline void __local_flush_tlb_page_nosync(unsigned long addr)
#ifdef CONFIG_ARM64_TLBI_IPI
static inline void local_flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long asid = __TLBI_VADDR(0, ASID(mm));

	dsb(nshst);
    __tlbi(vale1, addr);
    __tlbi_user(vale1, addr);
	__tlbi(aside1, asid);
	dsb(nsh);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
}

static inline void ipi_flush_tlb_page_nosync(void *arg)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
    unsigned long addr = *(unsigned long *)arg;

    __local_flush_tlb_page_nosync(addr);
	if (unlikely(cpumask_full(mm_cpumask(mm)))) {
		flush_tlb_mm_nosync(mm);
		dsb(ish);
	} else {
		on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, mm, 1);
	}
}

static inline void flush_tlb_page_nosync_ipi(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args {
	struct vm_area_struct *ta_vma;
	unsigned long ta_start;
	unsigned long ta_end;
};

static inline void local_flush_tlb_page(struct vm_area_struct *vma,
	unsigned long uaddr)
{
	unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));

    if (unlikely(test_tlbi_ipi_page()))
		on_each_cpu_mask(mm_cpumask(vma->vm_mm),
				ipi_flush_tlb_page_nosync, &addr, true);
    else
		__flush_tlb_page_nosync_ipi(addr);
	dsb(nshst);
	__tlbi(vale1, addr);
	__tlbi_user(vale1, addr);
	dsb(nsh);
	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, uaddr & PAGE_MASK,
						(uaddr & PAGE_MASK) + PAGE_SIZE);
}

#else /* CONFIG_ARM64_TLBI_IPI */

static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void ipi_flush_tlb_page(void *arg)
{
	unsigned long asid;
	struct tlb_args *ta = arg;

	dsb(ishst);
	asid = __TLBI_VADDR(0, ASID(mm));
	__tlbi(aside1is, asid);
	__tlbi_user(aside1is, asid);
	dsb(ish);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
}
#endif /* CONFIG_ARM64_TLBI_IPI */

static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
					   unsigned long uaddr)
{
	unsigned long addr;

	dsb(ishst);
	addr = __TLBI_VADDR(uaddr, ASID(mm));
	__tlbi(vale1is, addr);
	__tlbi_user(vale1is, addr);
	mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
						(uaddr & PAGE_MASK) + PAGE_SIZE);
}

static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
					 unsigned long uaddr)
{
@@ -382,11 +346,21 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
				  unsigned long uaddr)
{
#ifdef CONFIG_ARM64_TLBI_IPI
	flush_tlb_page_nosync_ipi(vma, uaddr);
	struct tlb_args ta = {
		.ta_vma = vma,
		.ta_start = uaddr
	};

	if (unlikely(cpumask_full(mm_cpumask(vma->vm_mm)))) {
		flush_tlb_page_nosync(vma, uaddr);
		dsb(ish);
	} else {
		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
	}
#else
	flush_tlb_page_nosync(vma, uaddr);
#endif
	dsb(ish);
#endif
}

static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
+0 −1
Original line number Diff line number Diff line
@@ -36,7 +36,6 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
			   syscall.o proton-pack.o idreg-override.o idle.o	\
			   patching.o

obj-$(CONFIG_ARM64_TLBI_IPI)		+= tlbflush.o
obj-$(CONFIG_IEE)				+= haoc/
obj-$(CONFIG_AARCH32_EL0)		+= binfmt_elf32.o sys32.o signal32.o			\
					   sys_compat.o
Loading