Commit e4c98d68 authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka
Browse files

mm/slab_common: fold kmalloc_order_trace() into kmalloc_large()



There is no caller of kmalloc_order_trace() except kmalloc_large().
Fold it into kmalloc_large() and remove kmalloc_order{,_trace}().

Also add tracepoint in kmalloc_large() that was previously
in kmalloc_order_trace().

Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 0f853b2e
Loading
Loading
Loading
Loading
+2 −20
Original line number Diff line number Diff line
@@ -489,26 +489,8 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
}
#endif /* CONFIG_TRACING */

extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
					      __alloc_size(1);

#ifdef CONFIG_TRACING
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
				__assume_page_alignment __alloc_size(1);
#else
static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
								 unsigned int order)
{
	return kmalloc_order(size, flags, order);
}
#endif

static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
{
	unsigned int order = get_order(size);
	return kmalloc_order_trace(size, flags, order);
}

/**
 * kmalloc - allocate memory
 * @size: how many bytes of memory are required.
+4 −13
Original line number Diff line number Diff line
@@ -905,16 +905,16 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
 * directly to the page allocator. We use __GFP_COMP, because we will need to
 * know the allocation order to free the pages properly in kfree.
 */
void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
void *kmalloc_large(size_t size, gfp_t flags)
{
	void *ret = NULL;
	struct page *page;
	unsigned int order = get_order(size);

	if (unlikely(flags & GFP_SLAB_BUG_MASK))
		flags = kmalloc_fix_flags(flags);

	flags |= __GFP_COMP;
	page = alloc_pages(flags, order);
	page = alloc_pages(flags | __GFP_COMP, order);
	if (likely(page)) {
		ret = page_address(page);
		mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
@@ -923,19 +923,10 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
	ret = kasan_kmalloc_large(ret, size, flags);
	/* As ret might get tagged, call kmemleak hook after KASAN. */
	kmemleak_alloc(ret, size, 1, flags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_order);

#ifdef CONFIG_TRACING
void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
{
	void *ret = kmalloc_order(size, flags, order);
	trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
	return ret;
}
EXPORT_SYMBOL(kmalloc_order_trace);
#endif
EXPORT_SYMBOL(kmalloc_large);

#ifdef CONFIG_SLAB_FREELIST_RANDOM
/* Randomize a generic freelist */