Commit 112cb561 authored by Song Liu's avatar Song Liu Committed by Arnaldo Carvalho de Melo
Browse files

perf stat: Introduce config stat.bpf-counter-events



Currently, to use BPF to aggregate perf event counters, the user uses
--bpf-counters option. Enable "use bpf by default" events with a config
option, stat.bpf-counter-events. Events with name in the option will use
BPF.

This also enables mixed BPF event and regular event in the same sesssion.
For example:

   perf config stat.bpf-counter-events=instructions
   perf stat -e instructions,cs

The second command will use BPF for "instructions" but not "cs".

Signed-off-by: default avatarSong Liu <song@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Link: https://lore.kernel.org/r/20210425214333.1090950-4-song@kernel.org


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent fe3dd826
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -97,6 +97,8 @@ report::
	Use BPF programs to aggregate readings from perf_events.  This
	allows multiple perf-stat sessions that are counting the same metric (cycles,
	instructions, etc.) to share hardware counters.
	To use BPF programs on common events by default, use
	"perf config stat.bpf-counter-events=<list_of_events>".

--bpf-attr-map::
	With option "--bpf-counters", different perf-stat sessions share
+25 −17
Original line number Diff line number Diff line
@@ -161,6 +161,7 @@ static const char *smi_cost_attrs = {
};

static struct evlist	*evsel_list;
static bool all_counters_use_bpf = true;

static struct target target = {
	.uid	= UINT_MAX,
@@ -401,6 +402,9 @@ static int read_affinity_counters(struct timespec *rs)
	struct affinity affinity;
	int i, ncpus, cpu;

	if (all_counters_use_bpf)
		return 0;

	if (affinity__setup(&affinity) < 0)
		return -1;

@@ -415,6 +419,8 @@ static int read_affinity_counters(struct timespec *rs)
		evlist__for_each_entry(evsel_list, counter) {
			if (evsel__cpu_iter_skip(counter, cpu))
				continue;
			if (evsel__is_bpf(counter))
				continue;
			if (!counter->err) {
				counter->err = read_counter_cpu(counter, rs,
								counter->cpu_iter - 1);
@@ -431,6 +437,9 @@ static int read_bpf_map_counters(void)
	int err;

	evlist__for_each_entry(evsel_list, counter) {
		if (!evsel__is_bpf(counter))
			continue;

		err = bpf_counter__read(counter);
		if (err)
			return err;
@@ -441,14 +450,10 @@ static int read_bpf_map_counters(void)
static void read_counters(struct timespec *rs)
{
	struct evsel *counter;
	int err;

	if (!stat_config.stop_read_counter) {
		if (target__has_bpf(&target))
			err = read_bpf_map_counters();
		else
			err = read_affinity_counters(rs);
		if (err < 0)
		if (read_bpf_map_counters() ||
		    read_affinity_counters(rs))
			return;
	}

@@ -537,13 +542,14 @@ static int enable_counters(void)
	struct evsel *evsel;
	int err;

	if (target__has_bpf(&target)) {
	evlist__for_each_entry(evsel_list, evsel) {
		if (!evsel__is_bpf(evsel))
			continue;

		err = bpf_counter__enable(evsel);
		if (err)
			return err;
	}
	}

	if (stat_config.initial_delay < 0) {
		pr_info(EVLIST_DISABLED_MSG);
@@ -786,11 +792,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
	if (affinity__setup(&affinity) < 0)
		return -1;

	if (target__has_bpf(&target)) {
	evlist__for_each_entry(evsel_list, counter) {
		if (bpf_counter__load(counter, &target))
			return -1;
		}
		if (!evsel__is_bpf(counter))
			all_counters_use_bpf = false;
	}

	evlist__for_each_cpu (evsel_list, i, cpu) {
@@ -807,6 +813,8 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
				continue;
			if (counter->reset_group || counter->errored)
				continue;
			if (evsel__is_bpf(counter))
				continue;
try_again:
			if (create_perf_stat_counter(counter, &stat_config, &target,
						     counter->cpu_iter - 1) < 0) {
+2 −1
Original line number Diff line number Diff line
@@ -790,7 +790,8 @@ int bpf_counter__load(struct evsel *evsel, struct target *target)
{
	if (target->bpf_str)
		evsel->bpf_counter_ops = &bpf_program_profiler_ops;
	else if (target->use_bpf)
	else if (target->use_bpf ||
		 evsel__match_bpf_counter_events(evsel->name))
		evsel->bpf_counter_ops = &bperf_ops;

	if (evsel->bpf_counter_ops)
+4 −0
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include "util/hist.h"  /* perf_hist_config */
#include "util/llvm-utils.h"   /* perf_llvm_config */
#include "util/stat.h"  /* perf_stat__set_big_num */
#include "util/evsel.h"  /* evsel__hw_names, evsel__use_bpf_counters */
#include "build-id.h"
#include "debug.h"
#include "config.h"
@@ -460,6 +461,9 @@ static int perf_stat_config(const char *var, const char *value)
	if (!strcmp(var, "stat.no-csv-summary"))
		perf_stat__set_no_csv_summary(perf_config_bool(var, value));

	if (!strcmp(var, "stat.bpf-counter-events"))
		evsel__bpf_counter_events = strdup(value);

	/* Add other config variables here. */
	return 0;
}
+22 −0
Original line number Diff line number Diff line
@@ -492,6 +492,28 @@ const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
	"ref-cycles",
};

char *evsel__bpf_counter_events;

bool evsel__match_bpf_counter_events(const char *name)
{
	int name_len;
	bool match;
	char *ptr;

	if (!evsel__bpf_counter_events)
		return false;

	ptr = strstr(evsel__bpf_counter_events, name);
	name_len = strlen(name);

	/* check name matches a full token in evsel__bpf_counter_events */
	match = (ptr != NULL) &&
		((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
		((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));

	return match;
}

static const char *__evsel__hw_name(u64 config)
{
	if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
Loading