Commit f8b61bd2 authored by Song Liu's avatar Song Liu Committed by Arnaldo Carvalho de Melo
Browse files

perf stat: Skip evlist__[enable|disable] when all events uses BPF



When all events of a perf-stat session use BPF, it is not necessary to
call evlist__enable() and evlist__disable(). Skip them when
all_counters_use_bpf is true.

Signed-off-by: default avatarSong Liu <song@kernel.org>
Reported-by: default avatarJiri Olsa <jolsa@redhat.com>
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent f42907e8
Loading
Loading
Loading
Loading
+10 −3
Original line number Diff line number Diff line
@@ -572,6 +572,7 @@ static int enable_counters(void)
	 * - we have initial delay configured
	 */
	if (!target__none(&target) || stat_config.initial_delay) {
		if (!all_counters_use_bpf)
			evlist__enable(evsel_list);
		if (stat_config.initial_delay > 0)
			pr_info(EVLIST_ENABLED_MSG);
@@ -581,14 +582,20 @@ static int enable_counters(void)

static void disable_counters(void)
{
	struct evsel *counter;

	/*
	 * If we don't have tracee (attaching to task or cpu), counters may
	 * still be running. To get accurate group ratios, we must stop groups
	 * from counting before reading their constituent counters.
	 */
	if (!target__none(&target))
	if (!target__none(&target)) {
		evlist__for_each_entry(evsel_list, counter)
			bpf_counter__disable(counter);
		if (!all_counters_use_bpf)
			evlist__disable(evsel_list);
	}
}

static volatile int workload_exec_errno;

+0 −3
Original line number Diff line number Diff line
@@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
	if (affinity__setup(&affinity) < 0)
		return;

	evlist__for_each_entry(evlist, pos)
		bpf_counter__disable(pos);

	/* Disable 'immediate' events last */
	for (imm = 0; imm <= 1; imm++) {
		evlist__for_each_cpu(evlist, i, cpu) {