Commit 1d18538e authored by Steven Rostedt (VMware)'s avatar Steven Rostedt (VMware)
Browse files

tracing: Have dynamic events have a ref counter

As dynamic events are not created by modules, if something is attached to
one, calling "try_module_get()" on its "mod" field, is not going to keep
the dynamic event from going away.

Since dynamic events do not need the "mod" pointer of the event structure,
make a union out of it in order to save memory (there's one structure for
each of the thousand+ events in the kernel), and have any event with the
DYNAMIC flag set to use a ref counter instead.

Link: https://lore.kernel.org/linux-trace-devel/20210813004448.51c7de69ce432d338f4d226b@kernel.org/
Link: https://lkml.kernel.org/r/20210817035027.174869074@goodmis.org



Suggested-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 8b0e6c74
Loading
Loading
Loading
Loading
+44 −1
Original line number Diff line number Diff line
@@ -350,7 +350,14 @@ struct trace_event_call {
	struct trace_event	event;
	char			*print_fmt;
	struct event_filter	*filter;
	void			*mod;
	/*
	 * Static events can disappear with modules,
	 * where as dynamic ones need their own ref count.
	 */
	union {
		void				*module;
		atomic_t			refcnt;
	};
	void			*data;

	/* See the TRACE_EVENT_FL_* flags above */
@@ -366,6 +373,42 @@ struct trace_event_call {
#endif
};

#ifdef CONFIG_DYNAMIC_EVENTS
bool trace_event_dyn_try_get_ref(struct trace_event_call *call);
void trace_event_dyn_put_ref(struct trace_event_call *call);
bool trace_event_dyn_busy(struct trace_event_call *call);
#else
static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call)
{
	/* Without DYNAMIC_EVENTS configured, nothing should be calling this */
	return false;
}
static inline void trace_event_dyn_put_ref(struct trace_event_call *call)
{
}
static inline bool trace_event_dyn_busy(struct trace_event_call *call)
{
	/* Nothing should call this without DYNAIMIC_EVENTS configured. */
	return true;
}
#endif

static inline bool trace_event_try_get_ref(struct trace_event_call *call)
{
	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
		return trace_event_dyn_try_get_ref(call);
	else
		return try_module_get(call->module);
}

static inline void trace_event_put_ref(struct trace_event_call *call)
{
	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
		trace_event_dyn_put_ref(call);
	else
		module_put(call->module);
}

#ifdef CONFIG_PERF_EVENTS
static inline bool bpf_prog_array_valid(struct trace_event_call *call)
{
+2 −2
Original line number Diff line number Diff line
@@ -3697,11 +3697,11 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str)
		return false;

	event = container_of(trace_event, struct trace_event_call, event);
	if (!event->mod)
	if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
		return false;

	/* Would rather have rodata, but this will suffice */
	if (within_module_core(addr, event->mod))
	if (within_module_core(addr, event->module))
		return true;

	return false;
+38 −0
Original line number Diff line number Diff line
@@ -13,11 +13,49 @@
#include <linux/tracefs.h>

#include "trace.h"
#include "trace_output.h"	/* for trace_event_sem */
#include "trace_dynevent.h"

static DEFINE_MUTEX(dyn_event_ops_mutex);
static LIST_HEAD(dyn_event_ops_list);

bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call)
{
	struct trace_event_call *call;
	bool ret = false;

	if (WARN_ON_ONCE(!(dyn_call->flags & TRACE_EVENT_FL_DYNAMIC)))
		return false;

	down_read(&trace_event_sem);
	list_for_each_entry(call, &ftrace_events, list) {
		if (call == dyn_call) {
			atomic_inc(&dyn_call->refcnt);
			ret = true;
		}
	}
	up_read(&trace_event_sem);
	return ret;
}

void trace_event_dyn_put_ref(struct trace_event_call *call)
{
	if (WARN_ON_ONCE(!(call->flags & TRACE_EVENT_FL_DYNAMIC)))
		return;

	if (WARN_ON_ONCE(atomic_read(&call->refcnt) <= 0)) {
		atomic_set(&call->refcnt, 0);
		return;
	}

	atomic_dec(&call->refcnt);
}

bool trace_event_dyn_busy(struct trace_event_call *call)
{
	return atomic_read(&call->refcnt) != 0;
}

int dyn_event_register(struct dyn_event_operations *ops)
{
	if (!ops || !ops->create || !ops->show || !ops->is_busy ||
+3 −3
Original line number Diff line number Diff line
@@ -177,7 +177,7 @@ static void perf_trace_event_unreg(struct perf_event *p_event)
		}
	}
out:
	module_put(tp_event->mod);
	trace_event_put_ref(tp_event);
}

static int perf_trace_event_open(struct perf_event *p_event)
@@ -224,10 +224,10 @@ int perf_trace_init(struct perf_event *p_event)
	list_for_each_entry(tp_event, &ftrace_events, list) {
		if (tp_event->event.type == event_id &&
		    tp_event->class && tp_event->class->reg &&
		    try_module_get(tp_event->mod)) {
		    trace_event_try_get_ref(tp_event)) {
			ret = perf_trace_event_init(tp_event, p_event);
			if (ret)
				module_put(tp_event->mod);
				trace_event_put_ref(tp_event);
			break;
		}
	}
+14 −8
Original line number Diff line number Diff line
@@ -2525,7 +2525,10 @@ __register_event(struct trace_event_call *call, struct module *mod)
		return ret;

	list_add(&call->list, &ftrace_events);
	call->mod = mod;
	if (call->flags & TRACE_EVENT_FL_DYNAMIC)
		atomic_set(&call->refcnt, 0);
	else
		call->module = mod;

	return 0;
}
@@ -2839,7 +2842,9 @@ static void trace_module_remove_events(struct module *mod)

	down_write(&trace_event_sem);
	list_for_each_entry_safe(call, p, &ftrace_events, list) {
		if (call->mod == mod)
		if ((call->flags & TRACE_EVENT_FL_DYNAMIC) || !call->module)
			continue;
		if (call->module == mod)
			__trace_remove_event_call(call);
	}
	up_write(&trace_event_sem);
@@ -2982,7 +2987,7 @@ struct trace_event_file *trace_get_event_file(const char *instance,
	}

	/* Don't let event modules unload while in use */
	ret = try_module_get(file->event_call->mod);
	ret = trace_event_try_get_ref(file->event_call);
	if (!ret) {
		trace_array_put(tr);
		ret = -EBUSY;
@@ -3012,7 +3017,7 @@ EXPORT_SYMBOL_GPL(trace_get_event_file);
void trace_put_event_file(struct trace_event_file *file)
{
	mutex_lock(&event_mutex);
	module_put(file->event_call->mod);
	trace_event_put_ref(file->event_call);
	mutex_unlock(&event_mutex);

	trace_array_put(file->tr);
@@ -3147,7 +3152,7 @@ static int free_probe_data(void *data)
	if (!edata->ref) {
		/* Remove the SOFT_MODE flag */
		__ftrace_event_enable_disable(edata->file, 0, 1);
		module_put(edata->file->event_call->mod);
		trace_event_put_ref(edata->file->event_call);
		kfree(edata);
	}
	return 0;
@@ -3280,7 +3285,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,

 out_reg:
	/* Don't let event modules unload while probe registered */
	ret = try_module_get(file->event_call->mod);
	ret = trace_event_try_get_ref(file->event_call);
	if (!ret) {
		ret = -EBUSY;
		goto out_free;
@@ -3310,7 +3315,7 @@ event_enable_func(struct trace_array *tr, struct ftrace_hash *hash,
 out_disable:
	__ftrace_event_enable_disable(file, 0, 1);
 out_put:
	module_put(file->event_call->mod);
	trace_event_put_ref(file->event_call);
 out_free:
	kfree(data);
	goto out;
@@ -3376,7 +3381,8 @@ void __trace_early_add_events(struct trace_array *tr)

	list_for_each_entry(call, &ftrace_events, list) {
		/* Early boot up should not have any modules loaded */
		if (WARN_ON_ONCE(call->mod))
		if (!(call->flags & TRACE_EVENT_FL_DYNAMIC) &&
		    WARN_ON_ONCE(call->module))
			continue;

		ret = __trace_early_add_new_event(call, tr);
Loading