Commit b77d5b1b authored by Muchun Song's avatar Muchun Song Committed by Vlastimil Babka
Browse files

mm: slab: optimize memcg_slab_free_hook()



Most callers of memcg_slab_free_hook() already know the slab,  which could
be passed to memcg_slab_free_hook() directly to reduce the overhead of an
another call of virt_to_slab().  For bulk freeing of objects, the call of
slab_objcgs() in the loop in memcg_slab_free_hook() is redundant as well.
Rework memcg_slab_free_hook() and build_detached_freelist() to reduce
those unnecessary overhead and make memcg_slab_free_hook() can handle bulk
freeing in slab_free().

Move the calling site of memcg_slab_free_hook() from do_slab_free() to
slab_free() for slub to make the code clearer since the logic is weird
(e.g. the caller need to judge whether it needs to call
memcg_slab_free_hook()). It is easy to make mistakes like missing calling
of memcg_slab_free_hook() like fixes of:

  commit d1b2cf6c ("mm: memcg/slab: uncharge during kmem_cache_free_bulk()")
  commit ae085d7f ("mm: kfence: fix missing objcg housekeeping for SLAB")

This optimization is mainly for bulk objects freeing.  The following numbers
is shown for 16-object freeing.

                           before      after
  kmem_cache_free_bulk:   ~430 ns     ~400 ns

The overhead is reduced by about 7% for 16-object freeing.

Signed-off-by: default avatarMuchun Song <songmuchun@bytedance.com>
Reviewed-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Link: https://lore.kernel.org/r/20220429123044.37885-1-songmuchun@bytedance.com


Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent b347aa7b
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -3406,9 +3406,10 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
{
	bool init;

	memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1);

	if (is_kfence_address(objp)) {
		kmemleak_free_recursive(objp, cachep->flags);
		memcg_slab_free_hook(cachep, &objp, 1);
		__kfence_free(objp);
		return;
	}
@@ -3441,7 +3442,6 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
	check_irq_off();
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, caller);
	memcg_slab_free_hook(cachep, &objp, 1);

	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
+8 −22
Original line number Diff line number Diff line
@@ -547,36 +547,22 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
	obj_cgroup_put(objcg);
}

static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
					void **p, int objects)
{
	struct kmem_cache *s;
	struct obj_cgroup **objcgs;
	struct obj_cgroup *objcg;
	struct slab *slab;
	unsigned int off;
	int i;

	if (!memcg_kmem_enabled())
		return;

	for (i = 0; i < objects; i++) {
		if (unlikely(!p[i]))
			continue;

		slab = virt_to_slab(p[i]);
		/* we could be given a kmalloc_large() object, skip those */
		if (!slab)
			continue;

	objcgs = slab_objcgs(slab);
	if (!objcgs)
			continue;
		return;

		if (!s_orig)
			s = slab->slab_cache;
		else
			s = s_orig;
	for (i = 0; i < objects; i++) {
		struct obj_cgroup *objcg;
		unsigned int off;

		off = obj_to_index(s, slab, p[i]);
		objcg = objcgs[off];
@@ -628,7 +614,7 @@ static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
{
}

static inline void memcg_slab_free_hook(struct kmem_cache *s,
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
					void **p, int objects)
{
}
+22 −44
Original line number Diff line number Diff line
@@ -3464,9 +3464,6 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
	struct kmem_cache_cpu *c;
	unsigned long tid;

	/* memcg_slab_free_hook() is already called for bulk free. */
	if (!tail)
		memcg_slab_free_hook(s, &head, 1);
redo:
	/*
	 * Determine the currently cpus per cpu slab.
@@ -3526,9 +3523,10 @@ static __always_inline void do_slab_free(struct kmem_cache *s,
}

static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
				      void *head, void *tail, int cnt,
				      void *head, void *tail, void **p, int cnt,
				      unsigned long addr)
{
	memcg_slab_free_hook(s, slab, p, cnt);
	/*
	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
	 * to remove objects, whose reuse must be delayed.
@@ -3550,7 +3548,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
	if (!s)
		return;
	trace_kmem_cache_free(_RET_IP_, x, s->name);
	slab_free(s, virt_to_slab(x), x, NULL, 1, _RET_IP_);
	slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);

@@ -3591,79 +3589,59 @@ static inline
int build_detached_freelist(struct kmem_cache *s, size_t size,
			    void **p, struct detached_freelist *df)
{
	size_t first_skipped_index = 0;
	int lookahead = 3;
	void *object;
	struct folio *folio;
	struct slab *slab;

	/* Always re-init detached_freelist */
	df->slab = NULL;
	size_t same;

	do {
	object = p[--size];
		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
	} while (!object && size);

	if (!object)
		return 0;

	folio = virt_to_folio(object);
	if (!s) {
		/* Handle kalloc'ed objects */
		if (unlikely(!folio_test_slab(folio))) {
			free_large_kmalloc(folio, object);
			p[size] = NULL; /* mark object processed */
			df->slab = NULL;
			return size;
		}
		/* Derive kmem_cache from object */
		slab = folio_slab(folio);
		df->s = slab->slab_cache;
		df->slab = folio_slab(folio);
		df->s = df->slab->slab_cache;
	} else {
		slab = folio_slab(folio);
		df->slab = folio_slab(folio);
		df->s = cache_from_obj(s, object); /* Support for memcg */
	}

	if (is_kfence_address(object)) {
		slab_free_hook(df->s, object, false);
		__kfence_free(object);
		p[size] = NULL; /* mark object processed */
		return size;
	}

	/* Start new detached freelist */
	df->slab = slab;
	set_freepointer(df->s, object, NULL);
	df->tail = object;
	df->freelist = object;
	p[size] = NULL; /* mark object processed */
	df->cnt = 1;

	if (is_kfence_address(object))
		return size;

	set_freepointer(df->s, object, NULL);

	same = size;
	while (size) {
		object = p[--size];
		if (!object)
			continue; /* Skip processed objects */

		/* df->slab is always set at this point */
		if (df->slab == virt_to_slab(object)) {
			/* Opportunity build freelist */
			set_freepointer(df->s, object, df->freelist);
			df->freelist = object;
			df->cnt++;
			p[size] = NULL; /* mark object processed */

			same--;
			if (size != same)
				swap(p[size], p[same]);
			continue;
		}

		/* Limit look ahead search */
		if (!--lookahead)
			break;

		if (!first_skipped_index)
			first_skipped_index = size + 1;
	}

	return first_skipped_index;
	return same;
}

/* Note that interrupts must be enabled when calling this function. */
@@ -3672,7 +3650,6 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
	if (WARN_ON(!size))
		return;

	memcg_slab_free_hook(s, p, size);
	do {
		struct detached_freelist df;

@@ -3680,7 +3657,8 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
		if (!df.slab)
			continue;

		slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt, _RET_IP_);
		slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt,
			  _RET_IP_);
	} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);
@@ -4581,7 +4559,7 @@ void kfree(const void *x)
		return;
	}
	slab = folio_slab(folio);
	slab_free(slab->slab_cache, slab, object, NULL, 1, _RET_IP_);
	slab_free(slab->slab_cache, slab, object, NULL, &object, 1, _RET_IP_);
}
EXPORT_SYMBOL(kfree);