Commit d12d9ad8 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Linus Torvalds
Browse files

kasan, mm: optimize krealloc poisoning

Currently, krealloc() always calls ksize(), which unpoisons the whole
object including the redzone.  This is inefficient, as kasan_krealloc()
repoisons the redzone for objects that fit into the same buffer.

This patch changes krealloc() instrumentation to use uninstrumented
__ksize() that doesn't unpoison the memory.  Instead, kasan_kreallos() is
changed to unpoison the memory excluding the redzone.

For objects that don't fit into the old allocation, this patch disables
KASAN accessibility checks when copying memory into a new object instead
of unpoisoning it.

Link: https://lkml.kernel.org/r/9bef90327c9cb109d736c40115684fd32f49e6b0.1612546384.git.andreyknvl@google.com


Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarMarco Elver <elver@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Branislav Rankov <Branislav.Rankov@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Kevin Brodsky <kevin.brodsky@arm.com>
Cc: Peter Collingbourne <pcc@google.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 26a5ca7a
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
@@ -476,7 +476,7 @@ static void *____kasan_kmalloc(struct kmem_cache *cache, const void *object,

	/*
	 * The object has already been unpoisoned by kasan_slab_alloc() for
	 * kmalloc() or by ksize() for krealloc().
	 * kmalloc() or by kasan_krealloc() for krealloc().
	 */

	/*
@@ -526,7 +526,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,

	/*
	 * The object has already been unpoisoned by kasan_alloc_pages() for
	 * alloc_pages() or by ksize() for krealloc().
	 * alloc_pages() or by kasan_krealloc() for krealloc().
	 */

	/*
@@ -554,8 +554,16 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
	if (unlikely(object == ZERO_SIZE_PTR))
		return (void *)object;

	/*
	 * Unpoison the object's data.
	 * Part of it might already have been unpoisoned, but it's unknown
	 * how big that part is.
	 */
	kasan_unpoison(object, size);

	page = virt_to_head_page(object);

	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
	if (unlikely(!PageSlab(page)))
		return __kasan_kmalloc_large(object, size, flags);
	else
+14 −6
Original line number Diff line number Diff line
@@ -1136,19 +1136,27 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
	void *ret;
	size_t ks;

	if (likely(!ZERO_OR_NULL_PTR(p)) && !kasan_check_byte(p))
	/* Don't use instrumented ksize to allow precise KASAN poisoning. */
	if (likely(!ZERO_OR_NULL_PTR(p))) {
		if (!kasan_check_byte(p))
			return NULL;
		ks = kfence_ksize(p) ?: __ksize(p);
	} else
		ks = 0;

	ks = ksize(p);

	/* If the object still fits, repoison it precisely. */
	if (ks >= new_size) {
		p = kasan_krealloc((void *)p, new_size, flags);
		return (void *)p;
	}

	ret = kmalloc_track_caller(new_size, flags);
	if (ret && p)
		memcpy(ret, p, ks);
	if (ret && p) {
		/* Disable KASAN checks as the object's redzone is accessed. */
		kasan_disable_current();
		memcpy(ret, kasan_reset_tag(p), ks);
		kasan_enable_current();
	}

	return ret;
}