Unverified Commit 2667e367 authored by Palmer Dabbelt's avatar Palmer Dabbelt
Browse files

Merge patch series "RISC-V kasan rework"

Alexandre Ghiti <alexghiti@rivosinc.com> says:

As described in patch 2, our current kasan implementation is intricate,
so I tried to simplify the implementation and mimic what arm64/x86 are
doing.

In addition it fixes UEFI bootflow with a kasan kernel and kasan inline
instrumentation: all kasan configurations were tested on a large ubuntu
kernel with success with KASAN_KUNIT_TEST and KASAN_MODULE_TEST.

inline ubuntu config + uefi:
 sv39: OK
 sv48: OK
 sv57: OK

outline ubuntu config + uefi:
 sv39: OK
 sv48: OK
 sv57: OK

Actually 1 test always fails with KASAN_KUNIT_TEST that I have to check:
KASAN failure expected in "set_bit(nr, addr)", but none occurrred

Note that Palmer recently proposed to remove COMMAND_LINE_SIZE from the
userspace abi
https://lore.kernel.org/lkml/20221211061358.28035-1-palmer@rivosinc.com/T/
so that we can finally increase the command line to fit all kasan kernel
parameters.

All of this should hopefully fix the syzkaller riscv build that has been
failing for a few months now, any test is appreciated and if I can help
in any way, please ask.

* b4-shazam-merge:
  riscv: Unconditionnally select KASAN_VMALLOC if KASAN
  riscv: Fix ptdump when KASAN is enabled
  riscv: Fix EFI stub usage of KASAN instrumented strcmp function
  riscv: Move DTB_EARLY_BASE_VA to the kernel address space
  riscv: Rework kasan population functions
  riscv: Split early and final KASAN population functions

Link: https://lore.kernel.org/r/20230203075232.274282-1-alexghiti@rivosinc.com


Signed-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parents 2e75ab31 864046c5
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -120,6 +120,7 @@ config RISCV
	select HAVE_SYSCALL_TRACEPOINTS
	select IRQ_DOMAIN
	select IRQ_FORCED_THREADING
	select KASAN_VMALLOC if KASAN
	select MODULES_USE_ELF_RELA if MODULES
	select MODULE_SECTIONS if MODULES
	select OF
+0 −2
Original line number Diff line number Diff line
@@ -23,8 +23,6 @@
 * linked at. The routines below are all implemented in assembler in a
 * position independent manner
 */
__efistub_strcmp		= strcmp;

__efistub__start		= _start;
__efistub__start_kernel		= _start_kernel;
__efistub__end			= _end;
+1 −1
Original line number Diff line number Diff line
@@ -57,7 +57,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
EXPORT_SYMBOL(empty_zero_page);

extern char _start[];
#define DTB_EARLY_BASE_VA      PGDIR_SIZE
#define DTB_EARLY_BASE_VA      (ADDRESS_SPACE_END - (PTRS_PER_PGD / 2 * PGDIR_SIZE) + 1)
void *_dtb_early_va __initdata;
uintptr_t _dtb_early_pa __initdata;

+284 −232
Original line number Diff line number Diff line
@@ -18,58 +18,48 @@
 * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
 * the page global directory with kasan_early_shadow_pmd.
 *
 * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
 * must be divided as follows:
 * - the first PGD entry, although incomplete, is populated with
 *   kasan_early_shadow_pud/p4d
 * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
 * - the last PGD entry is shared with the kernel mapping so populated at the
 *   lower levels pud/p4d
 *
 * In addition, when shallow populating a kasan region (for example vmalloc),
 * this region may also not be aligned on PGDIR size, so we must go down to the
 * pud level too.
 * For sv48 and sv57, the region start is aligned on PGDIR_SIZE whereas the end
 * region is not and then we have to go down to the PUD level.
 */

extern pgd_t early_pg_dir[PTRS_PER_PGD];
pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;

static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
{
	phys_addr_t phys_addr;
	pte_t *ptep, *base_pte;
	pte_t *ptep, *p;

	if (pmd_none(*pmd))
		base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
	else
		base_pte = (pte_t *)pmd_page_vaddr(*pmd);
	if (pmd_none(*pmd)) {
		p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
		set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
	}

	ptep = base_pte + pte_index(vaddr);
	ptep = pte_offset_kernel(pmd, vaddr);

	do {
		if (pte_none(*ptep)) {
			phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
			set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
			memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
		}
	} while (ptep++, vaddr += PAGE_SIZE, vaddr != end);

	set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
}

static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
{
	phys_addr_t phys_addr;
	pmd_t *pmdp, *base_pmd;
	pmd_t *pmdp, *p;
	unsigned long next;

	if (pud_none(*pud)) {
		base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
	} else {
		base_pmd = (pmd_t *)pud_pgtable(*pud);
		if (base_pmd == lm_alias(kasan_early_shadow_pmd))
			base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
		p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
		set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
	}

	pmdp = base_pmd + pmd_index(vaddr);
	pmdp = pmd_offset(pud, vaddr);

	do {
		next = pmd_addr_end(vaddr, end);
@@ -78,157 +68,77 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
			phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
			if (phys_addr) {
				set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
				memset(__va(phys_addr), KASAN_SHADOW_INIT, PMD_SIZE);
				continue;
			}
		}

		kasan_populate_pte(pmdp, vaddr, next);
	} while (pmdp++, vaddr = next, vaddr != end);

	/*
	 * Wait for the whole PGD to be populated before setting the PGD in
	 * the page table, otherwise, if we did set the PGD before populating
	 * it entirely, memblock could allocate a page at a physical address
	 * where KASAN is not populated yet and then we'd get a page fault.
	 */
	set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
}

static void __init kasan_populate_pud(pgd_t *pgd,
				      unsigned long vaddr, unsigned long end,
				      bool early)
static void __init kasan_populate_pud(p4d_t *p4d,
				      unsigned long vaddr, unsigned long end)
{
	phys_addr_t phys_addr;
	pud_t *pudp, *base_pud;
	pud_t *pudp, *p;
	unsigned long next;

	if (early) {
		/*
		 * We can't use pgd_page_vaddr here as it would return a linear
		 * mapping address but it is not mapped yet, but when populating
		 * early_pg_dir, we need the physical address and when populating
		 * swapper_pg_dir, we need the kernel virtual address so use
		 * pt_ops facility.
		 */
		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
	} else if (pgd_none(*pgd)) {
		base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
		memcpy(base_pud, (void *)kasan_early_shadow_pud,
			sizeof(pud_t) * PTRS_PER_PUD);
	} else {
		base_pud = (pud_t *)pgd_page_vaddr(*pgd);
		if (base_pud == lm_alias(kasan_early_shadow_pud)) {
			base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
			memcpy(base_pud, (void *)kasan_early_shadow_pud,
			       sizeof(pud_t) * PTRS_PER_PUD);
		}
	if (p4d_none(*p4d)) {
		p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
		set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
	}

	pudp = base_pud + pud_index(vaddr);
	pudp = pud_offset(p4d, vaddr);

	do {
		next = pud_addr_end(vaddr, end);

		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
			if (early) {
				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
				continue;
			} else {
			phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
			if (phys_addr) {
				set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
				memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE);
				continue;
			}
		}
		}

		kasan_populate_pmd(pudp, vaddr, next);
	} while (pudp++, vaddr = next, vaddr != end);

	/*
	 * Wait for the whole PGD to be populated before setting the PGD in
	 * the page table, otherwise, if we did set the PGD before populating
	 * it entirely, memblock could allocate a page at a physical address
	 * where KASAN is not populated yet and then we'd get a page fault.
	 */
	if (!early)
		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
}

static void __init kasan_populate_p4d(pgd_t *pgd,
				      unsigned long vaddr, unsigned long end,
				      bool early)
				      unsigned long vaddr, unsigned long end)
{
	phys_addr_t phys_addr;
	p4d_t *p4dp, *base_p4d;
	p4d_t *p4dp, *p;
	unsigned long next;

	if (early) {
		/*
		 * We can't use pgd_page_vaddr here as it would return a linear
		 * mapping address but it is not mapped yet, but when populating
		 * early_pg_dir, we need the physical address and when populating
		 * swapper_pg_dir, we need the kernel virtual address so use
		 * pt_ops facility.
		 */
		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
	} else {
		base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
		if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
			base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
			memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
				sizeof(p4d_t) * PTRS_PER_P4D);
		}
	if (pgd_none(*pgd)) {
		p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
	}

	p4dp = base_p4d + p4d_index(vaddr);
	p4dp = p4d_offset(pgd, vaddr);

	do {
		next = p4d_addr_end(vaddr, end);

		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
			if (early) {
				phys_addr = __pa(((uintptr_t)kasan_early_shadow_pud));
				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
				continue;
			} else {
			phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
			if (phys_addr) {
				set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
				memset(__va(phys_addr), KASAN_SHADOW_INIT, P4D_SIZE);
				continue;
			}
		}
		}

		kasan_populate_pud((pgd_t *)p4dp, vaddr, next, early);
		kasan_populate_pud(p4dp, vaddr, next);
	} while (p4dp++, vaddr = next, vaddr != end);

	/*
	 * Wait for the whole P4D to be populated before setting the P4D in
	 * the page table, otherwise, if we did set the P4D before populating
	 * it entirely, memblock could allocate a page at a physical address
	 * where KASAN is not populated yet and then we'd get a page fault.
	 */
	if (!early)
		set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_p4d)), PAGE_TABLE));
}

#define kasan_early_shadow_pgd_next			(pgtable_l5_enabled ?	\
				(uintptr_t)kasan_early_shadow_p4d :		\
							(pgtable_l4_enabled ?	\
				(uintptr_t)kasan_early_shadow_pud :		\
				(uintptr_t)kasan_early_shadow_pmd))
#define kasan_populate_pgd_next(pgdp, vaddr, next, early)			\
		(pgtable_l5_enabled ?						\
		kasan_populate_p4d(pgdp, vaddr, next, early) :			\
		(pgtable_l4_enabled ?						\
			kasan_populate_pud(pgdp, vaddr, next, early) :		\
			kasan_populate_pmd((pud_t *)pgdp, vaddr, next)))
}

static void __init kasan_populate_pgd(pgd_t *pgdp,
				      unsigned long vaddr, unsigned long end,
				      bool early)
				      unsigned long vaddr, unsigned long end)
{
	phys_addr_t phys_addr;
	unsigned long next;
@@ -236,29 +146,174 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
	do {
		next = pgd_addr_end(vaddr, end);

		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
			if (early) {
				phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
				continue;
			} else if (pgd_page_vaddr(*pgdp) ==
				   (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
				/*
				 * pgdp can't be none since kasan_early_init
				 * initialized all KASAN shadow region with
				 * kasan_early_shadow_pud: if this is still the
				 * case, that means we can try to allocate a
				 * hugepage as a replacement.
				 */
		if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
		    (next - vaddr) >= PGDIR_SIZE) {
			phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
			if (phys_addr) {
				set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
				memset(__va(phys_addr), KASAN_SHADOW_INIT, PGDIR_SIZE);
				continue;
			}
		}

		kasan_populate_p4d(pgdp, vaddr, next);
	} while (pgdp++, vaddr = next, vaddr != end);
}

static void __init kasan_early_clear_pud(p4d_t *p4dp,
					 unsigned long vaddr, unsigned long end)
{
	pud_t *pudp, *base_pud;
	unsigned long next;

	if (!pgtable_l4_enabled) {
		pudp = (pud_t *)p4dp;
	} else {
		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
		pudp = base_pud + pud_index(vaddr);
	}

	do {
		next = pud_addr_end(vaddr, end);

		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
			pud_clear(pudp);
			continue;
		}

		BUG();
	} while (pudp++, vaddr = next, vaddr != end);
}

static void __init kasan_early_clear_p4d(pgd_t *pgdp,
					 unsigned long vaddr, unsigned long end)
{
	p4d_t *p4dp, *base_p4d;
	unsigned long next;

	if (!pgtable_l5_enabled) {
		p4dp = (p4d_t *)pgdp;
	} else {
		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
		p4dp = base_p4d + p4d_index(vaddr);
	}

	do {
		next = p4d_addr_end(vaddr, end);

		if (pgtable_l4_enabled && IS_ALIGNED(vaddr, P4D_SIZE) &&
		    (next - vaddr) >= P4D_SIZE) {
			p4d_clear(p4dp);
			continue;
		}

		kasan_early_clear_pud(p4dp, vaddr, next);
	} while (p4dp++, vaddr = next, vaddr != end);
}

static void __init kasan_early_clear_pgd(pgd_t *pgdp,
					 unsigned long vaddr, unsigned long end)
{
	unsigned long next;

	do {
		next = pgd_addr_end(vaddr, end);

		if (pgtable_l5_enabled && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
		    (next - vaddr) >= PGDIR_SIZE) {
			pgd_clear(pgdp);
			continue;
		}

		kasan_early_clear_p4d(pgdp, vaddr, next);
	} while (pgdp++, vaddr = next, vaddr != end);
}

static void __init kasan_early_populate_pud(p4d_t *p4dp,
					    unsigned long vaddr,
					    unsigned long end)
{
	pud_t *pudp, *base_pud;
	phys_addr_t phys_addr;
	unsigned long next;

	if (!pgtable_l4_enabled) {
		pudp = (pud_t *)p4dp;
	} else {
		base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
		pudp = base_pud + pud_index(vaddr);
	}

	do {
		next = pud_addr_end(vaddr, end);

		if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
		    (next - vaddr) >= PUD_SIZE) {
			phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
			set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
			continue;
		}

		BUG();
	} while (pudp++, vaddr = next, vaddr != end);
}

static void __init kasan_early_populate_p4d(pgd_t *pgdp,
					    unsigned long vaddr,
					    unsigned long end)
{
	p4d_t *p4dp, *base_p4d;
	phys_addr_t phys_addr;
	unsigned long next;

	/*
	 * We can't use pgd_page_vaddr here as it would return a linear
	 * mapping address but it is not mapped yet, but when populating
	 * early_pg_dir, we need the physical address and when populating
	 * swapper_pg_dir, we need the kernel virtual address so use
	 * pt_ops facility.
	 * Note that this test is then completely equivalent to
	 * p4dp = p4d_offset(pgdp, vaddr)
	 */
	if (!pgtable_l5_enabled) {
		p4dp = (p4d_t *)pgdp;
	} else {
		base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
		p4dp = base_p4d + p4d_index(vaddr);
	}

		kasan_populate_pgd_next(pgdp, vaddr, next, early);
	do {
		next = p4d_addr_end(vaddr, end);

		if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
		    (next - vaddr) >= P4D_SIZE) {
			phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
			set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
			continue;
		}

		kasan_early_populate_pud(p4dp, vaddr, next);
	} while (p4dp++, vaddr = next, vaddr != end);
}

static void __init kasan_early_populate_pgd(pgd_t *pgdp,
					    unsigned long vaddr,
					    unsigned long end)
{
	phys_addr_t phys_addr;
	unsigned long next;

	do {
		next = pgd_addr_end(vaddr, end);

		if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
		    (next - vaddr) >= PGDIR_SIZE) {
			phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
			set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
			continue;
		}

		kasan_early_populate_p4d(pgdp, vaddr, next);
	} while (pgdp++, vaddr = next, vaddr != end);
}

@@ -295,16 +350,16 @@ asmlinkage void __init kasan_early_init(void)
					PAGE_TABLE));
	}

	kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
	kasan_early_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
				 KASAN_SHADOW_START, KASAN_SHADOW_END);

	local_flush_tlb_all();
}

void __init kasan_swapper_init(void)
{
	kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
			   KASAN_SHADOW_START, KASAN_SHADOW_END, true);
	kasan_early_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
				 KASAN_SHADOW_START, KASAN_SHADOW_END);

	local_flush_tlb_all();
}
@@ -314,118 +369,65 @@ static void __init kasan_populate(void *start, void *end)
	unsigned long vaddr = (unsigned long)start & PAGE_MASK;
	unsigned long vend = PAGE_ALIGN((unsigned long)end);

	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);

	local_flush_tlb_all();
	memset(start, KASAN_SHADOW_INIT, end - start);
	kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend);
}

static void __init kasan_shallow_populate_pmd(pgd_t *pgdp,
static void __init kasan_shallow_populate_pud(p4d_t *p4d,
					      unsigned long vaddr, unsigned long end)
{
	unsigned long next;
	pmd_t *pmdp, *base_pmd;
	bool is_kasan_pte;

	base_pmd = (pmd_t *)pgd_page_vaddr(*pgdp);
	pmdp = base_pmd + pmd_index(vaddr);

	do {
		next = pmd_addr_end(vaddr, end);
		is_kasan_pte = (pmd_pgtable(*pmdp) == lm_alias(kasan_early_shadow_pte));

		if (is_kasan_pte)
			pmd_clear(pmdp);
	} while (pmdp++, vaddr = next, vaddr != end);
}

static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
					      unsigned long vaddr, unsigned long end)
{
	unsigned long next;
	pud_t *pudp, *base_pud;
	pmd_t *base_pmd;
	bool is_kasan_pmd;

	base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
	pudp = base_pud + pud_index(vaddr);
	void *p;
	pud_t *pud_k = pud_offset(p4d, vaddr);

	do {
		next = pud_addr_end(vaddr, end);
		is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));

		if (!is_kasan_pmd)
			continue;

		base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
		set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));

		if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE)
		if (pud_none(*pud_k)) {
			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
			set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
			continue;
		}

		memcpy(base_pmd, (void *)kasan_early_shadow_pmd, PAGE_SIZE);
		kasan_shallow_populate_pmd((pgd_t *)pudp, vaddr, next);
	} while (pudp++, vaddr = next, vaddr != end);
		BUG();
	} while (pud_k++, vaddr = next, vaddr != end);
}

static void __init kasan_shallow_populate_p4d(pgd_t *pgdp,
static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
					      unsigned long vaddr, unsigned long end)
{
	unsigned long next;
	p4d_t *p4dp, *base_p4d;
	pud_t *base_pud;
	bool is_kasan_pud;

	base_p4d = (p4d_t *)pgd_page_vaddr(*pgdp);
	p4dp = base_p4d + p4d_index(vaddr);
	void *p;
	p4d_t *p4d_k = p4d_offset(pgd, vaddr);

	do {
		next = p4d_addr_end(vaddr, end);
		is_kasan_pud = (p4d_pgtable(*p4dp) == lm_alias(kasan_early_shadow_pud));

		if (!is_kasan_pud)
			continue;

		base_pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
		set_p4d(p4dp, pfn_p4d(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));

		if (IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE)
		if (p4d_none(*p4d_k)) {
			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
			set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
			continue;

		memcpy(base_pud, (void *)kasan_early_shadow_pud, PAGE_SIZE);
		kasan_shallow_populate_pud((pgd_t *)p4dp, vaddr, next);
	} while (p4dp++, vaddr = next, vaddr != end);
		}

#define kasan_shallow_populate_pgd_next(pgdp, vaddr, next)			\
		(pgtable_l5_enabled ?						\
		kasan_shallow_populate_p4d(pgdp, vaddr, next) :			\
		(pgtable_l4_enabled ?						\
		kasan_shallow_populate_pud(pgdp, vaddr, next) :			\
		kasan_shallow_populate_pmd(pgdp, vaddr, next)))
		kasan_shallow_populate_pud(p4d_k, vaddr, end);
	} while (p4d_k++, vaddr = next, vaddr != end);
}

static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
	unsigned long next;
	void *p;
	pgd_t *pgd_k = pgd_offset_k(vaddr);
	bool is_kasan_pgd_next;

	do {
		next = pgd_addr_end(vaddr, end);
		is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
				     (unsigned long)lm_alias(kasan_early_shadow_pgd_next));

		if (is_kasan_pgd_next) {
		if (pgd_none(*pgd_k)) {
			p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
			set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
		}

		if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
			continue;
		}

		memcpy(p, (void *)kasan_early_shadow_pgd_next, PAGE_SIZE);
		kasan_shallow_populate_pgd_next(pgd_k, vaddr, next);
		kasan_shallow_populate_p4d(pgd_k, vaddr, next);
	} while (pgd_k++, vaddr = next, vaddr != end);
}

@@ -435,7 +437,37 @@ static void __init kasan_shallow_populate(void *start, void *end)
	unsigned long vend = PAGE_ALIGN((unsigned long)end);

	kasan_shallow_populate_pgd(vaddr, vend);
	local_flush_tlb_all();
}

static void create_tmp_mapping(void)
{
	void *ptr;
	p4d_t *base_p4d;

	/*
	 * We need to clean the early mapping: this is hard to achieve "in-place",
	 * so install a temporary mapping like arm64 and x86 do.
	 */
	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(pgd_t) * PTRS_PER_PGD);

	/* Copy the last p4d since it is shared with the kernel mapping. */
	if (pgtable_l5_enabled) {
		ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
		memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
		set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
			pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
		base_p4d = tmp_p4d;
	} else {
		base_p4d = (p4d_t *)tmp_pg_dir;
	}

	/* Copy the last pud since it is shared with the kernel mapping. */
	if (pgtable_l4_enabled) {
		ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
		memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
		set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
			pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
	}
}

void __init kasan_init(void)
@@ -443,10 +475,27 @@ void __init kasan_init(void)
	phys_addr_t p_start, p_end;
	u64 i;

	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
	create_tmp_mapping();
	csr_write(CSR_SATP, PFN_DOWN(__pa(tmp_pg_dir)) | satp_mode);

	kasan_early_clear_pgd(pgd_offset_k(KASAN_SHADOW_START),
			      KASAN_SHADOW_START, KASAN_SHADOW_END);

	kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)FIXADDR_START),
				    (void *)kasan_mem_to_shadow((void *)VMALLOC_START));

	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
		kasan_shallow_populate(
			(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
			(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
		/* Shallow populate modules and BPF which are vmalloc-allocated */
		kasan_shallow_populate(
			(void *)kasan_mem_to_shadow((void *)MODULES_VADDR),
			(void *)kasan_mem_to_shadow((void *)MODULES_END));
	} else {
		kasan_populate_early_shadow((void *)kasan_mem_to_shadow((void *)VMALLOC_START),
					    (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
	}

	/* Populate the linear mapping */
	for_each_mem_range(i, &p_start, &p_end) {
@@ -459,8 +508,8 @@ void __init kasan_init(void)
		kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
	}

	/* Populate kernel, BPF, modules mapping */
	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
	/* Populate kernel */
	kasan_populate(kasan_mem_to_shadow((const void *)MODULES_END),
		       kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));

	for (i = 0; i < PTRS_PER_PTE; i++)
@@ -471,4 +520,7 @@ void __init kasan_init(void)

	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
	init_task.kasan_depth = 0;

	csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
	local_flush_tlb_all();
}
+12 −12
Original line number Diff line number Diff line
@@ -59,10 +59,6 @@ struct ptd_mm_info {
};

enum address_markers_idx {
#ifdef CONFIG_KASAN
	KASAN_SHADOW_START_NR,
	KASAN_SHADOW_END_NR,
#endif
	FIXMAP_START_NR,
	FIXMAP_END_NR,
	PCI_IO_START_NR,
@@ -74,6 +70,10 @@ enum address_markers_idx {
	VMALLOC_START_NR,
	VMALLOC_END_NR,
	PAGE_OFFSET_NR,
#ifdef CONFIG_KASAN
	KASAN_SHADOW_START_NR,
	KASAN_SHADOW_END_NR,
#endif
#ifdef CONFIG_64BIT
	MODULES_MAPPING_NR,
	KERNEL_MAPPING_NR,
@@ -82,10 +82,6 @@ enum address_markers_idx {
};

static struct addr_marker address_markers[] = {
#ifdef CONFIG_KASAN
	{0, "Kasan shadow start"},
	{0, "Kasan shadow end"},
#endif
	{0, "Fixmap start"},
	{0, "Fixmap end"},
	{0, "PCI I/O start"},
@@ -97,6 +93,10 @@ static struct addr_marker address_markers[] = {
	{0, "vmalloc() area"},
	{0, "vmalloc() end"},
	{0, "Linear mapping"},
#ifdef CONFIG_KASAN
	{0, "Kasan shadow start"},
	{0, "Kasan shadow end"},
#endif
#ifdef CONFIG_64BIT
	{0, "Modules/BPF mapping"},
	{0, "Kernel mapping"},
@@ -362,10 +362,6 @@ static int __init ptdump_init(void)
{
	unsigned int i, j;

#ifdef CONFIG_KASAN
	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
#endif
	address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
	address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
	address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
@@ -377,6 +373,10 @@ static int __init ptdump_init(void)
	address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
	address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
	address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
#ifdef CONFIG_KASAN
	address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
	address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
#endif
#ifdef CONFIG_64BIT
	address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
	address_markers[KERNEL_MAPPING_NR].start_address = kernel_map.virt_addr;