Unverified Commit 01e57e5f authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15083 mm/vmalloc: combine all TLB flush operations of KASAN shadow virtual...

!15083  mm/vmalloc: combine all TLB flush operations of KASAN shadow virtual address into one operation

Merge Pull Request from: @ci-robot 
 
PR sync from: Jinjiang Tu <tujinjiang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/RYKS33SE72LN2GO3PED5FYBANDHDHIO3/ 
 
https://gitee.com/src-openeuler/kernel/issues/IBEAMT 
 
Link:https://gitee.com/openeuler/kernel/pulls/15083

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 786ae243 ed58cbc1
Loading
Loading
Loading
Loading
+9 −3
Original line number Diff line number Diff line
@@ -28,6 +28,9 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
#define KASAN_VMALLOC_VM_ALLOC		((__force kasan_vmalloc_flags_t)0x02u)
#define KASAN_VMALLOC_PROT_NORMAL	((__force kasan_vmalloc_flags_t)0x04u)

#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
#define KASAN_VMALLOC_TLB_FLUSH  0x2 /* TLB flush */

#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)

#include <linux/pgtable.h>
@@ -385,7 +388,8 @@ void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
			   unsigned long free_region_start,
			   unsigned long free_region_end);
			   unsigned long free_region_end,
			   unsigned long flags);

#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

@@ -400,7 +404,8 @@ static inline int kasan_populate_vmalloc(unsigned long start,
static inline void kasan_release_vmalloc(unsigned long start,
					 unsigned long end,
					 unsigned long free_region_start,
					 unsigned long free_region_end) { }
					 unsigned long free_region_end,
					 unsigned long flags) { }

#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */

@@ -435,7 +440,8 @@ static inline int kasan_populate_vmalloc(unsigned long start,
static inline void kasan_release_vmalloc(unsigned long start,
					 unsigned long end,
					 unsigned long free_region_start,
					 unsigned long free_region_end) { }
					 unsigned long free_region_end,
					 unsigned long flags) { }

static inline void *kasan_unpoison_vmalloc(const void *start,
					   unsigned long size,
+10 −4
Original line number Diff line number Diff line
@@ -520,7 +520,8 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
 */
void kasan_release_vmalloc(unsigned long start, unsigned long end,
			   unsigned long free_region_start,
			   unsigned long free_region_end)
			   unsigned long free_region_end,
			   unsigned long flags)
{
	void *shadow_start, *shadow_end;
	unsigned long region_start, region_end;
@@ -553,10 +554,15 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
			__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
			return;
		}


		if (flags & KASAN_VMALLOC_PAGE_RANGE)
			apply_to_existing_page_range(&init_mm,
					     (unsigned long)shadow_start,
					     size, kasan_depopulate_vmalloc_pte,
					     NULL);

		if (flags & KASAN_VMALLOC_TLB_FLUSH)
			flush_tlb_kernel_range((unsigned long)shadow_start,
					       (unsigned long)shadow_end);
	}
+26 −8
Original line number Diff line number Diff line
@@ -2176,6 +2176,25 @@ decay_va_pool_node(struct vmap_node *vn, bool full_decay)
	reclaim_list_global(&decay_list);
}

static void
kasan_release_vmalloc_node(struct vmap_node *vn)
{
	struct vmap_area *va;
	unsigned long start, end;

	start = list_first_entry(&vn->purge_list, struct vmap_area, list)->va_start;
	end = list_last_entry(&vn->purge_list, struct vmap_area, list)->va_end;

	list_for_each_entry(va, &vn->purge_list, list) {
		if (is_vmalloc_or_module_addr((void *) va->va_start))
			kasan_release_vmalloc(va->va_start, va->va_end,
				va->va_start, va->va_end,
				KASAN_VMALLOC_PAGE_RANGE);
	}

	kasan_release_vmalloc(start, end, start, end, KASAN_VMALLOC_TLB_FLUSH);
}

static void purge_vmap_node(struct work_struct *work)
{
	struct vmap_node *vn = container_of(work,
@@ -2183,20 +2202,17 @@ static void purge_vmap_node(struct work_struct *work)
	struct vmap_area *va, *n_va;
	LIST_HEAD(local_list);

	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
		kasan_release_vmalloc_node(vn);

	vn->nr_purged = 0;

	list_for_each_entry_safe(va, n_va, &vn->purge_list, list) {
		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
		unsigned long orig_start = va->va_start;
		unsigned long orig_end = va->va_end;
		unsigned int vn_id = decode_vn_id(va->flags);

		list_del_init(&va->list);

		if (is_vmalloc_or_module_addr((void *)orig_start))
			kasan_release_vmalloc(orig_start, orig_end,
					      va->va_start, va->va_end);

		atomic_long_sub(nr, &vmap_lazy_nr);
		vn->nr_purged++;

@@ -4712,7 +4728,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
				&free_vmap_area_list);
		if (va)
			kasan_release_vmalloc(orig_start, orig_end,
				va->va_start, va->va_end);
				va->va_start, va->va_end,
				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
		vas[area] = NULL;
	}

@@ -4762,7 +4779,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
				&free_vmap_area_list);
		if (va)
			kasan_release_vmalloc(orig_start, orig_end,
				va->va_start, va->va_end);
				va->va_start, va->va_end,
				KASAN_VMALLOC_PAGE_RANGE | KASAN_VMALLOC_TLB_FLUSH);
		vas[area] = NULL;
		kfree(vms[area]);
	}