Commit cde8a7eb authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds
Browse files

kasan: ensure poisoning size alignment

A previous changes d99f6a10 ("kasan: don't round_up too much")
attempted to simplify the code by adding a round_up(size) call into
kasan_poison().  While this allows to have less round_up() calls around
the code, this results in round_up() being called multiple times.

This patch removes round_up() of size from kasan_poison() and ensures that
all callers round_up() the size explicitly.  This patch also adds
WARN_ON() alignment checks for address and size to kasan_poison() and
kasan_unpoison().

Link: https://lkml.kernel.org/r/3ffe8d4a246ae67a8b5e91f65bf98cd7cba9d7b9.1612546384.git.andreyknvl@google.com


Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d12d9ad8
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -261,7 +261,8 @@ void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)

void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
	kasan_poison(object, cache->object_size, KASAN_KMALLOC_REDZONE);
	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
			KASAN_KMALLOC_REDZONE);
}

/*
@@ -348,7 +349,8 @@ static bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
		return true;
	}

	kasan_poison(object, cache->object_size, KASAN_KMALLOC_FREE);
	kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
			KASAN_KMALLOC_FREE);

	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
		return false;
@@ -490,7 +492,8 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,
	/* Poison the aligned part of the redzone. */
	redzone_start = round_up((unsigned long)(object + size),
				KASAN_GRANULE_SIZE);
	redzone_end = (unsigned long)object + cache->object_size;
	redzone_end = round_up((unsigned long)(object + cache->object_size),
				KASAN_GRANULE_SIZE);
	kasan_poison((void *)redzone_start, redzone_end - redzone_start,
			   KASAN_KMALLOC_REDZONE);

+20 −13
Original line number Diff line number Diff line
@@ -330,30 +330,37 @@ static inline u8 kasan_random_tag(void) { return 0; }

#ifdef CONFIG_KASAN_HW_TAGS

static inline void kasan_poison(const void *address, size_t size, u8 value)
static inline void kasan_poison(const void *addr, size_t size, u8 value)
{
	address = kasan_reset_tag(address);
	addr = kasan_reset_tag(addr);

	/* Skip KFENCE memory if called explicitly outside of sl*b. */
	if (is_kfence_address(address))
	if (is_kfence_address(addr))
		return;

	hw_set_mem_tag_range((void *)address,
			round_up(size, KASAN_GRANULE_SIZE), value);
	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
		return;
	if (WARN_ON(size & KASAN_GRANULE_MASK))
		return;

	hw_set_mem_tag_range((void *)addr, size, value);
}

static inline void kasan_unpoison(const void *address, size_t size)
static inline void kasan_unpoison(const void *addr, size_t size)
{
	u8 tag = get_tag(address);
	u8 tag = get_tag(addr);

	address = kasan_reset_tag(address);
	addr = kasan_reset_tag(addr);

	/* Skip KFENCE memory if called explicitly outside of sl*b. */
	if (is_kfence_address(address))
	if (is_kfence_address(addr))
		return;

	hw_set_mem_tag_range((void *)address,
			round_up(size, KASAN_GRANULE_SIZE), tag);
	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
		return;
	size = round_up(size, KASAN_GRANULE_SIZE);

	hw_set_mem_tag_range((void *)addr, size, tag);
}

static inline bool kasan_byte_accessible(const void *addr)
@@ -370,7 +377,7 @@ static inline bool kasan_byte_accessible(const void *addr)
/**
 * kasan_poison - mark the memory range as unaccessible
 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
 * @size - range size
 * @size - range size, must be aligned to KASAN_GRANULE_SIZE
 * @value - value that's written to metadata for the range
 *
 * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
@@ -380,7 +387,7 @@ void kasan_poison(const void *addr, size_t size, u8 value);
/**
 * kasan_unpoison - mark the memory range as accessible
 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
 * @size - range size
 * @size - range size, can be unaligned
 *
 * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
 * marking the range.
+22 −15
Original line number Diff line number Diff line
@@ -69,7 +69,7 @@ void *memcpy(void *dest, const void *src, size_t len)
	return __memcpy(dest, src, len);
}

void kasan_poison(const void *address, size_t size, u8 value)
void kasan_poison(const void *addr, size_t size, u8 value)
{
	void *shadow_start, *shadow_end;

@@ -78,55 +78,62 @@ void kasan_poison(const void *address, size_t size, u8 value)
	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
	 * addresses to this function.
	 */
	address = kasan_reset_tag(address);
	addr = kasan_reset_tag(addr);

	/* Skip KFENCE memory if called explicitly outside of sl*b. */
	if (is_kfence_address(address))
	if (is_kfence_address(addr))
		return;

	size = round_up(size, KASAN_GRANULE_SIZE);
	shadow_start = kasan_mem_to_shadow(address);
	shadow_end = kasan_mem_to_shadow(address + size);
	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
		return;
	if (WARN_ON(size & KASAN_GRANULE_MASK))
		return;

	shadow_start = kasan_mem_to_shadow(addr);
	shadow_end = kasan_mem_to_shadow(addr + size);

	__memset(shadow_start, value, shadow_end - shadow_start);
}
EXPORT_SYMBOL(kasan_poison);

#ifdef CONFIG_KASAN_GENERIC
void kasan_poison_last_granule(const void *address, size_t size)
void kasan_poison_last_granule(const void *addr, size_t size)
{
	if (size & KASAN_GRANULE_MASK) {
		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
		*shadow = size & KASAN_GRANULE_MASK;
	}
}
#endif

void kasan_unpoison(const void *address, size_t size)
void kasan_unpoison(const void *addr, size_t size)
{
	u8 tag = get_tag(address);
	u8 tag = get_tag(addr);

	/*
	 * Perform shadow offset calculation based on untagged address, as
	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
	 * addresses to this function.
	 */
	address = kasan_reset_tag(address);
	addr = kasan_reset_tag(addr);

	/*
	 * Skip KFENCE memory if called explicitly outside of sl*b. Also note
	 * that calls to ksize(), where size is not a multiple of machine-word
	 * size, would otherwise poison the invalid portion of the word.
	 */
	if (is_kfence_address(address))
	if (is_kfence_address(addr))
		return;

	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
		return;

	/* Unpoison round_up(size, KASAN_GRANULE_SIZE) bytes. */
	kasan_poison(address, size, tag);
	/* Unpoison all granules that cover the object. */
	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag);

	/* Partially poison the last granule for the generic mode. */
	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
		kasan_poison_last_granule(address, size);
		kasan_poison_last_granule(addr, size);
}

#ifdef CONFIG_MEMORY_HOTPLUG