Commit 6e48a966 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Vlastimil Babka
Browse files

mm/kasan: Convert to struct folio and struct slab



KASAN accesses some slab related struct page fields so we need to
convert it to struct slab. Some places are a bit simplified thanks to
kasan_addr_to_slab() encapsulating the PageSlab flag check through
virt_to_slab().  When resolving object address to either a real slab or
a large kmalloc, use struct folio as the intermediate type for testing
the slab flag to avoid unnecessary implicit compound_head().

[ vbabka@suse.cz: use struct folio, adjust to differences in previous
  patches ]

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarAndrey Konovalov <andreyknvl@gmail.com>
Reviewed-by: default avatarRoman Gushchin <guro@fb.com>
Tested-by: default avatarHyeongogn Yoo <42.hyeyoo@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: <kasan-dev@googlegroups.com>
parent 50757018
Loading
Loading
Loading
Loading
+5 −4
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

struct kmem_cache;
struct page;
struct slab;
struct vm_struct;
struct task_struct;

@@ -193,11 +194,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
	return 0;
}

void __kasan_poison_slab(struct page *page);
static __always_inline void kasan_poison_slab(struct page *page)
void __kasan_poison_slab(struct slab *slab);
static __always_inline void kasan_poison_slab(struct slab *slab)
{
	if (kasan_enabled())
		__kasan_poison_slab(page);
		__kasan_poison_slab(slab);
}

void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -322,7 +323,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
				      slab_flags_t *flags) {}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
static inline void kasan_poison_slab(struct page *page) {}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
					void *object) {}
static inline void kasan_poison_object_data(struct kmem_cache *cache,
+13 −10
Original line number Diff line number Diff line
@@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
}
#endif

void __kasan_poison_slab(struct page *page)
void __kasan_poison_slab(struct slab *slab)
{
	struct page *page = slab_page(slab);
	unsigned long i;

	for (i = 0; i < compound_nr(page); i++)
@@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)

void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
{
	struct page *page;
	struct folio *folio;

	page = virt_to_head_page(ptr);
	folio = virt_to_folio(ptr);

	/*
	 * Even though this function is only called for kmem_cache_alloc and
@@ -411,12 +412,14 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
	 * !PageSlab() when the size provided to kmalloc is larger than
	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
	 */
	if (unlikely(!PageSlab(page))) {
	if (unlikely(!folio_test_slab(folio))) {
		if (____kasan_kfree_large(ptr, ip))
			return;
		kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
		kasan_poison(ptr, folio_size(folio), KASAN_FREE_PAGE, false);
	} else {
		____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
		struct slab *slab = folio_slab(folio);

		____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
	}
}

@@ -560,7 +563,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,

void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
{
	struct page *page;
	struct slab *slab;

	if (unlikely(object == ZERO_SIZE_PTR))
		return (void *)object;
@@ -572,13 +575,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
	 */
	kasan_unpoison(object, size, false);

	page = virt_to_head_page(object);
	slab = virt_to_slab(object);

	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
	if (unlikely(!PageSlab(page)))
	if (unlikely(!slab))
		return __kasan_kmalloc_large(object, size, flags);
	else
		return ____kasan_kmalloc(page->slab_cache, object, size, flags);
		return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
}

bool __kasan_check_byte(const void *address, unsigned long ip)
+4 −4
Original line number Diff line number Diff line
@@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8);

static void __kasan_record_aux_stack(void *addr, bool can_alloc)
{
	struct page *page = kasan_addr_to_page(addr);
	struct slab *slab = kasan_addr_to_slab(addr);
	struct kmem_cache *cache;
	struct kasan_alloc_meta *alloc_meta;
	void *object;

	if (is_kfence_address(addr) || !(page && PageSlab(page)))
	if (is_kfence_address(addr) || !slab)
		return;

	cache = page->slab_cache;
	object = nearest_obj(cache, page_slab(page), addr);
	cache = slab->slab_cache;
	object = nearest_obj(cache, slab, addr);
	alloc_meta = kasan_get_alloc_meta(cache, object);
	if (!alloc_meta)
		return;
+1 −0
Original line number Diff line number Diff line
@@ -265,6 +265,7 @@ bool kasan_report(unsigned long addr, size_t size,
void kasan_report_invalid_free(void *object, unsigned long ip);

struct page *kasan_addr_to_page(const void *addr);
struct slab *kasan_addr_to_slab(const void *addr);

depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
void kasan_set_track(struct kasan_track *track, gfp_t flags);
+1 −1
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size;

static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
{
	return virt_to_head_page(qlink)->slab_cache;
	return virt_to_slab(qlink)->slab_cache;
}

static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
Loading