Commit 11e9734b authored by Hyeonggon Yoo's avatar Hyeonggon Yoo Committed by Vlastimil Babka
Browse files

mm/slab_common: unify NUMA and UMA version of tracepoints



Drop kmem_alloc event class, rename kmem_alloc_node to kmem_alloc, and
remove _node postfix for NUMA version of tracepoints.

This will break some tools that depend on {kmem_cache_alloc,kmalloc}_node,
but at this point maintaining both kmem_alloc and kmem_alloc_node
event classes does not makes sense at all.

Signed-off-by: default avatarHyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
parent 26a40990
Loading
Loading
Loading
Loading
+2 −58
Original line number Diff line number Diff line
@@ -11,62 +11,6 @@

DECLARE_EVENT_CLASS(kmem_alloc,

	TP_PROTO(unsigned long call_site,
		 const void *ptr,
		 struct kmem_cache *s,
		 size_t bytes_req,
		 size_t bytes_alloc,
		 gfp_t gfp_flags),

	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags),

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
		__field(	size_t,		bytes_req	)
		__field(	size_t,		bytes_alloc	)
		__field(	unsigned long,	gfp_flags	)
		__field(	bool,		accounted	)
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
		__entry->bytes_req	= bytes_req;
		__entry->bytes_alloc	= bytes_alloc;
		__entry->gfp_flags	= (__force unsigned long)gfp_flags;
		__entry->accounted	= IS_ENABLED(CONFIG_MEMCG_KMEM) ?
					  ((gfp_flags & __GFP_ACCOUNT) ||
					  (s && s->flags & SLAB_ACCOUNT)) : false;
	),

	TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s accounted=%s",
		(void *)__entry->call_site,
		__entry->ptr,
		__entry->bytes_req,
		__entry->bytes_alloc,
		show_gfp_flags(__entry->gfp_flags),
		__entry->accounted ? "true" : "false")
);

DEFINE_EVENT(kmem_alloc, kmalloc,

	TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),

	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
);

DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,

	TP_PROTO(unsigned long call_site, const void *ptr, struct kmem_cache *s,
		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),

	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags)
);

DECLARE_EVENT_CLASS(kmem_alloc_node,

	TP_PROTO(unsigned long call_site,
		 const void *ptr,
		 struct kmem_cache *s,
@@ -109,7 +53,7 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
		__entry->accounted ? "true" : "false")
);

DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
DEFINE_EVENT(kmem_alloc, kmalloc,

	TP_PROTO(unsigned long call_site, const void *ptr,
		 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
@@ -118,7 +62,7 @@ DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
	TP_ARGS(call_site, ptr, s, bytes_req, bytes_alloc, gfp_flags, node)
);

DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,

	TP_PROTO(unsigned long call_site, const void *ptr,
		 struct kmem_cache *s, size_t bytes_req, size_t bytes_alloc,
+4 −5
Original line number Diff line number Diff line
@@ -3440,8 +3440,8 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
{
	void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_);

	trace_kmem_cache_alloc(_RET_IP_, ret, cachep,
			       cachep->object_size, cachep->size, flags);
	trace_kmem_cache_alloc(_RET_IP_, ret, cachep, cachep->object_size,
			       cachep->size, flags, NUMA_NO_NODE);

	return ret;
}
@@ -3536,9 +3536,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
	void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);

	trace_kmem_cache_alloc_node(_RET_IP_, ret, cachep,
				    cachep->object_size, cachep->size,
				    flags, nodeid);
	trace_kmem_cache_alloc(_RET_IP_, ret, cachep, cachep->object_size,
			       cachep->size, flags, nodeid);

	return ret;
}
+8 −13
Original line number Diff line number Diff line
@@ -907,9 +907,8 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller

	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
		ret = __kmalloc_large_node(size, flags, node);
		trace_kmalloc_node(caller, ret, NULL,
				   size, PAGE_SIZE << get_order(size),
				   flags, node);
		trace_kmalloc(_RET_IP_, ret, NULL, size,
			      PAGE_SIZE << get_order(size), flags, node);
		return ret;
	}

@@ -920,8 +919,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller

	ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
	ret = kasan_kmalloc(s, ret, size, flags);
	trace_kmalloc_node(caller, ret, s, size,
			   s->size, flags, node);
	trace_kmalloc(_RET_IP_, ret, s, size, s->size, flags, node);
	return ret;
}

@@ -1007,8 +1005,7 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
	void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
					    size, _RET_IP_);

	trace_kmalloc_node(_RET_IP_, ret, s, size, s->size,
			   gfpflags, NUMA_NO_NODE);
	trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, NUMA_NO_NODE);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
@@ -1020,7 +1017,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
{
	void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);

	trace_kmalloc_node(_RET_IP_, ret, s, size, s->size, gfpflags, node);
	trace_kmalloc(_RET_IP_, ret, s, size, s->size, gfpflags, node);

	ret = kasan_kmalloc(s, ret, size, gfpflags);
	return ret;
@@ -1076,7 +1073,7 @@ void *kmalloc_large(size_t size, gfp_t flags)
	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);

	trace_kmalloc(_RET_IP_, ret, NULL, size,
		      PAGE_SIZE << get_order(size), flags);
		      PAGE_SIZE << get_order(size), flags, NUMA_NO_NODE);
	return ret;
}
EXPORT_SYMBOL(kmalloc_large);
@@ -1085,7 +1082,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
	void *ret = __kmalloc_large_node(size, flags, node);

	trace_kmalloc_node(_RET_IP_, ret, NULL, size,
	trace_kmalloc(_RET_IP_, ret, NULL, size,
		      PAGE_SIZE << get_order(size), flags, node);
	return ret;
}
@@ -1421,8 +1418,6 @@ EXPORT_SYMBOL(ksize);
/* Tracepoints definitions. */
EXPORT_TRACEPOINT_SYMBOL(kmalloc);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
EXPORT_TRACEPOINT_SYMBOL(kfree);
EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);

+10 −10
Original line number Diff line number Diff line
@@ -507,8 +507,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
		*m = size;
		ret = (void *)m + minalign;

		trace_kmalloc_node(caller, ret, NULL,
				   size, size + minalign, gfp, node);
		trace_kmalloc(caller, ret, NULL, size,
			      size + minalign, gfp, node);
	} else {
		unsigned int order = get_order(size);

@@ -516,8 +516,8 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
			gfp |= __GFP_COMP;
		ret = slob_new_pages(gfp, order, node);

		trace_kmalloc_node(caller, ret, NULL,
				   size, PAGE_SIZE << order, gfp, node);
		trace_kmalloc(caller, ret, NULL, size,
			      PAGE_SIZE << order, gfp, node);
	}

	kmemleak_alloc(ret, size, 1, gfp);
@@ -608,12 +608,12 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)

	if (c->size < PAGE_SIZE) {
		b = slob_alloc(c->size, flags, c->align, node, 0);
		trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
		trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
				       SLOB_UNITS(c->size) * SLOB_UNIT,
				       flags, node);
	} else {
		b = slob_new_pages(flags, get_order(c->size), node);
		trace_kmem_cache_alloc_node(_RET_IP_, b, NULL, c->object_size,
		trace_kmem_cache_alloc(_RET_IP_, b, NULL, c->object_size,
				       PAGE_SIZE << get_order(c->size),
				       flags, node);
	}
+3 −3
Original line number Diff line number Diff line
@@ -3244,7 +3244,7 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
	void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size);

	trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
				s->size, gfpflags);
				s->size, gfpflags, NUMA_NO_NODE);

	return ret;
}
@@ -3274,8 +3274,8 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);

	trace_kmem_cache_alloc_node(_RET_IP_, ret, s,
				    s->object_size, s->size, gfpflags, node);
	trace_kmem_cache_alloc(_RET_IP_, ret, s, s->object_size,
			       s->size, gfpflags, node);

	return ret;
}