Commit 26a40990 authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka
Browse files

mm/sl[au]b: cleanup kmem_cache_alloc[_node]_trace()



Despite its name, kmem_cache_alloc[_node]_trace() is hook for inlined
kmalloc. So rename it to kmalloc[_node]_trace().

Move its implementation to slab_common.c by using
__kmem_cache_alloc_node(), but keep CONFIG_TRACING=n varients to save a
function call when CONFIG_TRACING=n.

Use __assume_kmalloc_alignment for kmalloc[_node]_trace instead of
__assume_slab_alignement. Generally kmalloc has larger alignment
requirements.

Suggested-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent b1405135
Loading
Loading
Loading
Loading
+14 −13
Original line number Diff line number Diff line
@@ -449,16 +449,16 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assum
									 __malloc;

#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
				   __assume_slab_alignment __alloc_size(3);
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
		    __assume_kmalloc_alignment __alloc_size(3);

extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
					 int node, size_t size) __assume_slab_alignment
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
			 int node, size_t size) __assume_kmalloc_alignment
						__alloc_size(4);

#else /* CONFIG_TRACING */
static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
								    gfp_t flags, size_t size)
/* Save a function call when CONFIG_TRACING=n */
static __always_inline __alloc_size(3)
void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
{
	void *ret = kmem_cache_alloc(s, flags);

@@ -466,7 +466,8 @@ static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_
	return ret;
}

static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
static __always_inline __alloc_size(4)
void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
			 int node, size_t size)
{
	void *ret = kmem_cache_alloc_node(s, gfpflags, node);
@@ -550,7 +551,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
		if (!index)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_trace(
		return kmalloc_trace(
				kmalloc_caches[kmalloc_type(flags)][index],
				flags, size);
#endif
@@ -572,7 +573,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
		if (!index)
			return ZERO_SIZE_PTR;

		return kmem_cache_alloc_node_trace(
		return kmalloc_node_trace(
				kmalloc_caches[kmalloc_type(flags)][index],
				flags, node, size);
	}
+0 −35
Original line number Diff line number Diff line
@@ -3519,22 +3519,6 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);

#ifdef CONFIG_TRACING
void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
	void *ret;

	ret = slab_alloc(cachep, NULL, flags, size, _RET_IP_);

	ret = kasan_kmalloc(cachep, ret, size, flags);
	trace_kmalloc(_RET_IP_, ret, cachep,
		      size, cachep->size, flags);
	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif

/**
 * kmem_cache_alloc_node - Allocate an object on the specified node
 * @cachep: The cache to allocate from.
@@ -3568,25 +3552,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
			       orig_size, caller);
}

#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
				  gfp_t flags,
				  int nodeid,
				  size_t size)
{
	void *ret;

	ret = slab_alloc_node(cachep, NULL, flags, nodeid, size, _RET_IP_);

	ret = kasan_kmalloc(cachep, ret, size, flags);
	trace_kmalloc_node(_RET_IP_, ret, cachep,
			   size, cachep->size,
			   flags, nodeid);
	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif

#ifdef CONFIG_PRINTK
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
{
+27 −0
Original line number Diff line number Diff line
@@ -1000,6 +1000,33 @@ size_t __ksize(const void *object)
	return slab_ksize(folio_slab(folio)->slab_cache);
}
EXPORT_SYMBOL(__ksize);

#ifdef CONFIG_TRACING
void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
	void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
					    size, _RET_IP_);

	trace_kmalloc_node(_RET_IP_, ret, s, size, s->size,
			   gfpflags, NUMA_NO_NODE);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_trace);

void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
			 int node, size_t size)
{
	void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);

	trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_node_trace);
#endif /* !CONFIG_TRACING */
#endif /* !CONFIG_SLOB */

gfp_t kmalloc_fix_flags(gfp_t flags)
+0 −27
Original line number Diff line number Diff line
@@ -3270,17 +3270,6 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
			       caller, orig_size);
}

#ifdef CONFIG_TRACING
void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
	void *ret = slab_alloc(s, NULL, gfpflags, _RET_IP_, size);
	trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags);
	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
#endif

void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
@@ -3292,22 +3281,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
}
EXPORT_SYMBOL(kmem_cache_alloc_node);

#ifdef CONFIG_TRACING
void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
				    gfp_t gfpflags,
				    int node, size_t size)
{
	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);

	trace_kmalloc_node(_RET_IP_, ret, s,
			   size, s->size, gfpflags, node);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
#endif

/*
 * Slow path handling. This may still be called frequently since objects
 * have a longer lifetime than the cpu slabs in most processing loads.