Commit b047602d authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull tracing fixes from Steven Rostedt:
 "Fixes and minor clean ups for tracing:

   - Fix memory leak by reverting what was thought to be a double free.

     A static tool had gave a false positive that a double free was
     possible in the error path, but it was actually a different
     location that confused the static analyzer (and those of us that
     reviewed it).

   - Move use of static buffers by ftrace_dump() to a location that can
     be used by kgdb's ftdump(), as it needs it for the same reasons.

   - Clarify in the Kconfig description that function tracing has
     negligible impact on x86, but may have a bit bigger impact on other
     architectures.

   - Remove unnecessary extra semicolon in trace event.

   - Make a local variable static that is used in the fprobes sample

   - Use KSYM_NAME_LEN for length of function in kprobe sample and get
     rid of unneeded macro for the same purpose"

* tag 'trace-v5.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  samples: Use KSYM_NAME_LEN for kprobes
  fprobe/samples: Make sample_probe static
  blk-iocost: tracing: atomic64_read(&ioc->vtime_rate) is assigned an extra semicolon
  ftrace: Be more specific about arch impact when function tracer is enabled
  tracing: Fix sleeping while atomic in kdb ftdump
  tracing/histograms: Fix memory leak problem
parents 72a8e05d 1e1fb420
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -160,7 +160,7 @@ TRACE_EVENT(iocost_ioc_vrate_adj,

	TP_fast_assign(
		__assign_str(devname, ioc_name(ioc));
		__entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
		__entry->old_vrate = atomic64_read(&ioc->vtime_rate);
		__entry->new_vrate = new_vrate;
		__entry->busy_level = ioc->busy_level;
		__entry->read_missed_ppm = missed_ppm[READ];
+2 −1
Original line number Diff line number Diff line
@@ -194,7 +194,8 @@ config FUNCTION_TRACER
	  sequence is then dynamically patched into a tracer call when
	  tracing is enabled by the administrator. If it's runtime disabled
	  (the bootup default), then the overhead of the instructions is very
	  small and not measurable even in micro-benchmarks.
	  small and not measurable even in micro-benchmarks (at least on
	  x86, but may have impact on other architectures).

config FUNCTION_GRAPH_TRACER
	bool "Kernel Function Graph Tracer"
+6 −5
Original line number Diff line number Diff line
@@ -9864,6 +9864,12 @@ void trace_init_global_iter(struct trace_iterator *iter)
	/* Output in nanoseconds only if we are using a clock in nanoseconds. */
	if (trace_clocks[iter->tr->clock_id].in_ns)
		iter->iter_flags |= TRACE_FILE_TIME_IN_NS;

	/* Can not use kmalloc for iter.temp and iter.fmt */
	iter->temp = static_temp_buf;
	iter->temp_size = STATIC_TEMP_BUF_SIZE;
	iter->fmt = static_fmt_buf;
	iter->fmt_size = STATIC_FMT_BUF_SIZE;
}

void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
@@ -9896,11 +9902,6 @@ void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)

	/* Simulate the iterator */
	trace_init_global_iter(&iter);
	/* Can not use kmalloc for iter.temp and iter.fmt */
	iter.temp = static_temp_buf;
	iter.temp_size = STATIC_TEMP_BUF_SIZE;
	iter.fmt = static_fmt_buf;
	iter.fmt_size = STATIC_FMT_BUF_SIZE;

	for_each_tracing_cpu(cpu) {
		atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
+2 −0
Original line number Diff line number Diff line
@@ -4430,6 +4430,8 @@ static int parse_var_defs(struct hist_trigger_data *hist_data)

			s = kstrdup(field_str, GFP_KERNEL);
			if (!s) {
				kfree(hist_data->attrs->var_defs.name[n_vars]);
				hist_data->attrs->var_defs.name[n_vars] = NULL;
				ret = -ENOMEM;
				goto free;
			}
+1 −1
Original line number Diff line number Diff line
@@ -20,7 +20,7 @@

#define BACKTRACE_DEPTH 16
#define MAX_SYMBOL_LEN 4096
struct fprobe sample_probe;
static struct fprobe sample_probe;
static unsigned long nhit;

static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
Loading