Unverified Commit 40ccfff9 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13981 LoongArch: backport ptw&set_pte patches from upstream

Merge Pull Request from: @ci-robot 
 
PR sync from: Hongchen Zhang <zhanghongchen@loongson.cn>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3P7WGX4J7XWPQMQ2RRLFLHDOIXEJC3HJ/ 
Bibo Mao (1):
  LoongArch: Set initial pte entry with PAGE_GLOBAL for kernel space

Huacai Chen (3):
  LoongArch: Remove superfluous flush_dcache_page() definition
  LoongArch: Use accessors to page table entries instead of direct
    dereference
  LoongArch: Improve hardware page table walker


-- 
2.33.0
 
https://gitee.com/openeuler/kernel/issues/IB7Y4K 
 
Link:https://gitee.com/openeuler/kernel/pulls/13981

 

Reviewed-by: default avatarJuxin Gao <gaojuxin@loongson.cn>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 15bd6d7f 031bfb5b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@
#define __LL		"ll.w	"
#define __SC		"sc.w	"
#define __AMADD		"amadd.w	"
#define __AMOR		"amor.w		"
#define __AMAND_DB	"amand_db.w	"
#define __AMOR_DB	"amor_db.w	"
#define __AMXOR_DB	"amxor_db.w	"
@@ -22,6 +23,7 @@
#define __LL		"ll.d	"
#define __SC		"sc.d	"
#define __AMADD		"amadd.d	"
#define __AMOR		"amor.d		"
#define __AMAND_DB	"amand_db.d	"
#define __AMOR_DB	"amor_db.d	"
#define __AMXOR_DB	"amxor_db.d	"
+0 −3
Original line number Diff line number Diff line
@@ -37,8 +37,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range	local_flush_icache_range
#define flush_icache_user_range	local_flush_icache_range

#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0

#define flush_cache_all()				do { } while (0)
#define flush_cache_mm(mm)				do { } while (0)
#define flush_cache_dup_mm(mm)				do { } while (0)
@@ -47,7 +45,6 @@ void local_flush_icache_range(unsigned long start, unsigned long end);
#define flush_cache_vmap(start, end)			do { } while (0)
#define flush_cache_vunmap(start, end)			do { } while (0)
#define flush_icache_user_page(vma, page, addr, len)	do { } while (0)
#define flush_dcache_page(page)				do { } while (0)
#define flush_dcache_mmap_lock(mapping)			do { } while (0)
#define flush_dcache_mmap_unlock(mapping)		do { } while (0)

+2 −2
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
					    unsigned long addr, pte_t *ptep)
{
	pte_t clear;
	pte_t pte = *ptep;
	pte_t pte = ptep_get(ptep);

	pte_val(clear) = (unsigned long)invalid_pte_table;
	set_pte_at(mm, addr, ptep, clear);
@@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
					     pte_t *ptep, pte_t pte,
					     int dirty)
{
	int changed = !pte_same(*ptep, pte);
	int changed = !pte_same(ptep_get(ptep), pte);

	if (changed) {
		set_pte_at(vma->vm_mm, addr, ptep, pte);
+3 −3
Original line number Diff line number Diff line
@@ -43,13 +43,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
	pte_t *pte = virt_to_kpte(addr);

	if (WARN_ON(!pte) || pte_none(*pte))
	if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
		return false;

	if (protect)
		set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
		set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
	else
		set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
		set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));

	preempt_disable();
	local_flush_tlb_one(addr);
+27 −8
Original line number Diff line number Diff line
@@ -49,12 +49,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)

/* Normal, classic get_new_mmu_context */
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, bool *need_flush)
{
	u64 asid = asid_cache(cpu);

	if (!((++asid) & cpu_asid_mask(&cpu_data[cpu])))
		local_flush_tlb_user();	/* start new asid cycle */
		*need_flush = true;	/* start new asid cycle */

	cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
@@ -74,21 +74,34 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
	return 0;
}

static inline void atomic_update_pgd_asid(unsigned long asid, unsigned long pgdl)
{
	__asm__ __volatile__(
	"csrwr %[pgdl_val], %[pgdl_reg] \n\t"
	"csrwr %[asid_val], %[asid_reg] \n\t"
	: [asid_val] "+r" (asid), [pgdl_val] "+r" (pgdl)
	: [asid_reg] "i" (LOONGARCH_CSR_ASID), [pgdl_reg] "i" (LOONGARCH_CSR_PGDL)
	: "memory"
	);
}

static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
				      struct task_struct *tsk)
{
	bool need_flush = false;
	unsigned int cpu = smp_processor_id();

	/* Check if our ASID is of an older version and thus invalid */
	if (!asid_valid(next, cpu))
		get_new_mmu_context(next, cpu);

	write_csr_asid(cpu_asid(cpu, next));
		get_new_mmu_context(next, cpu, &need_flush);

	if (next != &init_mm)
		csr_write64((unsigned long)next->pgd, LOONGARCH_CSR_PGDL);
		atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)next->pgd);
	else
		csr_write64((unsigned long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
		atomic_update_pgd_asid(cpu_asid(cpu, next), (unsigned long)invalid_pg_dir);

	if (need_flush)
		local_flush_tlb_user(); /* Flush tlb after update ASID */

	/*
	 * Mark current->active_mm as not "active" anymore.
@@ -135,9 +148,15 @@ drop_mmu_context(struct mm_struct *mm, unsigned int cpu)
	asid = read_csr_asid() & cpu_asid_mask(&current_cpu_data);

	if (asid == cpu_asid(cpu, mm)) {
		bool need_flush = false;

		if (!current->mm || (current->mm == mm)) {
			get_new_mmu_context(mm, cpu);
			get_new_mmu_context(mm, cpu, &need_flush);

			write_csr_asid(cpu_asid(cpu, mm));
			if (need_flush)
				local_flush_tlb_user(); /* Flush tlb after update ASID */

			goto out;
		}
	}
Loading