Commit 3e826100 authored by Alexander Gordeev's avatar Alexander Gordeev
Browse files

s390/kasan: avoid short by one page shadow memory



Kernel Address Sanitizer uses 3 bits per byte to
encode memory. That is the number of bits the start
and end address of a memory range is shifted right
when the corresponding shadow memory is created for
that memory range.

The used memory mapping routine expects page-aligned
addresses, while the above described 3-bit shift might
turn the shadow memory range start and end boundaries
into non-page-aligned in case the size of the original
memory range is less than (PAGE_SIZE << 3). As result,
the resulting shadow memory range could be short on one
page.

Align on page boundary the start and end addresses when
mapping a shadow memory range and avoid the described
issue in the future.

Note, that does not fix a real problem, since currently
no virtual regions of size less than (PAGE_SIZE << 3)
exist.

Reviewed-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent 2ed8b509
Loading
Loading
Loading
Loading
+11 −4
Original line number Diff line number Diff line
@@ -45,6 +45,13 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat

static pte_t pte_z;

static inline void kasan_populate(unsigned long start, unsigned long end, enum populate_mode mode)
{
	start = PAGE_ALIGN_DOWN(__sha(start));
	end = PAGE_ALIGN(__sha(end));
	pgtable_populate(start, end, mode);
}

static void kasan_populate_shadow(void)
{
	pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
@@ -95,17 +102,17 @@ static void kasan_populate_shadow(void)
	 */

	for_each_physmem_usable_range(i, &start, &end)
		pgtable_populate(__sha(start), __sha(end), POPULATE_KASAN_MAP_SHADOW);
		kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
	if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
		untracked_end = VMALLOC_START;
		/* shallowly populate kasan shadow for vmalloc and modules */
		pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END), POPULATE_KASAN_SHALLOW);
		kasan_populate(VMALLOC_START, MODULES_END, POPULATE_KASAN_SHALLOW);
	} else {
		untracked_end = MODULES_VADDR;
	}
	/* populate kasan shadow for untracked memory */
	pgtable_populate(__sha(ident_map_size), __sha(untracked_end), POPULATE_KASAN_ZERO_SHADOW);
	pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE), POPULATE_KASAN_ZERO_SHADOW);
	kasan_populate(ident_map_size, untracked_end, POPULATE_KASAN_ZERO_SHADOW);
	kasan_populate(MODULES_END, _REGION1_SIZE, POPULATE_KASAN_ZERO_SHADOW);
}

static bool kasan_pgd_populate_zero_shadow(pgd_t *pgd, unsigned long addr,