Unverified Commit 34c26b73 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!13212 v7 arm64: support page mapping percpu first chunk allocator

Merge Pull Request from: @ci-robot 
 
PR sync from: Kaixiong Yu <yukaixiong@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/B7WXI2QFFZSHUPNXUGWLULLKVB35NBWQ/ 
Percpu embedded first chunk allocator is the firstly option, but it
could fails on ARM64 when turning on numa with CONFIG_KASAN=y.

Let's implement page mapping percpu first chunk allocator as a fallback
to the embedding allocator to increase the robustness of the system.

Also fix a crash when both NEED_PER_CPU_PAGE_FIRST_CHUNK and KASAN_VMALLOC enabled.

After merging this patch set, the ARM64 machine can start and work normally.

Kefeng Wang (3):
  vmalloc: choose a better start address in vm_area_register_early()
  arm64: support page mapping percpu first chunk allocator
  kasan: arm64: fix pcpu_page_first_chunk crash with KASAN_VMALLOC


-- 
2.34.1
 
https://gitee.com/src-openeuler/kernel/issues/IB2BDP 
 
Link:https://gitee.com/openeuler/kernel/pulls/13212

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarYang Yingliang <yangyingliang@huawei.com>
parents 53677a49 d26bfd1d
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -1196,6 +1196,10 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
	def_bool y
	depends on NUMA

config NEED_PER_CPU_PAGE_FIRST_CHUNK
       def_bool y
       depends on NUMA

source "kernel/Kconfig.hz"

config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+16 −0
Original line number Diff line number Diff line
@@ -208,6 +208,22 @@ static void __init clear_pgds(unsigned long start,
		set_pgd(pgd_offset_k(start), __pgd(0));
}

#ifdef CONFIG_KASAN_VMALLOC
void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
{
	unsigned long shadow_start, shadow_end;

	if (!is_vmalloc_or_module_addr(start))
		return;

	shadow_start = (unsigned long)kasan_mem_to_shadow(start);
	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
	shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
	kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
}
#endif

void __init kasan_init(void)
{
	u64 kimg_shadow_start, kimg_shadow_end;
+73 −11
Original line number Diff line number Diff line
@@ -342,12 +342,61 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
	memblock_free_early(__pa(ptr), size);
}

#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
#include <asm/pgalloc.h>

static void __init pcpu_populate_pte(unsigned long addr)
{
	pgd_t *pgd = pgd_offset_k(addr);
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;

	p4d = p4d_offset(pgd, addr);
	if (p4d_none(*p4d)) {
		pud_t *new;

		new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
		if (!new)
			goto err_alloc;
		p4d_populate(&init_mm, p4d, new);
	}

	pud = pud_offset(p4d, addr);
	if (pud_none(*pud)) {
		pmd_t *new;

		new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
		if (!new)
			goto err_alloc;
		pud_populate(&init_mm, pud, new);
	}

	pmd = pmd_offset(pud, addr);
	if (!pmd_present(*pmd)) {
		pte_t *new;

		new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
		if (!new)
			goto err_alloc;
		pmd_populate_kernel(&init_mm, pmd, new);
	}

	return;

err_alloc:
	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
		__func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
}
#endif

void __init setup_per_cpu_areas(void)
{
	unsigned long delta;
	unsigned int cpu;
	int rc;
	int rc = -EINVAL;

	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
		/*
		 * Always reserve area for module percpu variables.  That's
		 * what the legacy allocator did.
@@ -356,9 +405,22 @@ void __init setup_per_cpu_areas(void)
						PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
						pcpu_cpu_distance,
						pcpu_fc_alloc, pcpu_fc_free);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	if (rc < 0)
		panic("Failed to initialize percpu areas.");
		pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n",
			pcpu_fc_names[pcpu_chosen_fc], rc);
#endif
	}

#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
	if (rc < 0)
		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
						pcpu_fc_alloc,
						pcpu_fc_free,
						pcpu_populate_pte);
#endif
	if (rc < 0)
		panic("Failed to initialize percpu areas (err=%d).", rc);
	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu)
		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+9 −1
Original line number Diff line number Diff line
@@ -217,7 +217,10 @@ void kasan_unpoison_vmalloc(const void *start, unsigned long size);
void kasan_release_vmalloc(unsigned long start, unsigned long end,
			   unsigned long free_region_start,
			   unsigned long free_region_end);
#else

void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);

#else /* CONFIG_KASAN_VMALLOC */
static inline int kasan_populate_vmalloc(unsigned long start,
					unsigned long size)
{
@@ -232,6 +235,11 @@ static inline void kasan_release_vmalloc(unsigned long start,
					 unsigned long end,
					 unsigned long free_region_start,
					 unsigned long free_region_end) {}

static inline void kasan_populate_early_vm_area_shadow(void *start,
							unsigned long size)
{ }

#endif

#ifdef CONFIG_KASAN
+5 −0
Original line number Diff line number Diff line
@@ -997,4 +997,9 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
				       (unsigned long)shadow_end);
	}
}

void __init __weak kasan_populate_early_vm_area_shadow(void *start,
							unsigned long size)
{ }

#endif
Loading