Commit a32d654d authored by Alexander Popov's avatar Alexander Popov Committed by Linus Torvalds
Browse files

mm/slab: rerform init_on_free earlier

Currently in CONFIG_SLAB init_on_free happens too late, and heap objects
go to the heap quarantine not being erased.

Lets move init_on_free clearing before calling kasan_slab_free().  In that
case heap quarantine will store erased objects, similarly to CONFIG_SLUB=y
behavior.

Link: https://lkml.kernel.org/r/20201210183729.1261524-1-alex.popov@linux.com


Signed-off-by: default avatarAlexander Popov <alex.popov@linux.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0c06dd75
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -3417,6 +3417,9 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
{
	if (unlikely(slab_want_init_on_free(cachep)))
		memset(objp, 0, cachep->object_size);

	/* Put the object into the quarantine, don't touch it for now. */
	if (kasan_slab_free(cachep, objp, _RET_IP_))
		return;
@@ -3435,8 +3438,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
	struct array_cache *ac = cpu_cache_get(cachep);

	check_irq_off();
	if (unlikely(slab_want_init_on_free(cachep)))
		memset(objp, 0, cachep->object_size);
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, caller);
	memcg_slab_free_hook(cachep, &objp, 1);