Commit 5508c9da authored by Song Liu's avatar Song Liu Committed by Arnaldo Carvalho de Melo
Browse files

perf stat: Introduce bpf_counter_ops->disable()



Introduce bpf_counter_ops->disable(), which is used stop counting the
event.

Committer notes:

Added a dummy bpf_counter__disable() to the python binding to avoid
having 'perf test python' failing.

bpf_counter isn't supported in the python binding.

Signed-off-by: default avatarSong Liu <song@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: kernel-team@fb.com
Link: https://lore.kernel.org/r/20210425214333.1090950-6-song@kernel.org


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 01bd8efc
Loading
Loading
Loading
Loading
+26 −0
Original line number Diff line number Diff line
@@ -215,6 +215,17 @@ static int bpf_program_profiler__enable(struct evsel *evsel)
	return 0;
}

static int bpf_program_profiler__disable(struct evsel *evsel)
{
	struct bpf_counter *counter;

	list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
		assert(counter->skel != NULL);
		bpf_prog_profiler_bpf__detach(counter->skel);
	}
	return 0;
}

static int bpf_program_profiler__read(struct evsel *evsel)
{
	// perf_cpu_map uses /sys/devices/system/cpu/online
@@ -280,6 +291,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
struct bpf_counter_ops bpf_program_profiler_ops = {
	.load       = bpf_program_profiler__load,
	.enable	    = bpf_program_profiler__enable,
	.disable    = bpf_program_profiler__disable,
	.read       = bpf_program_profiler__read,
	.destroy    = bpf_program_profiler__destroy,
	.install_pe = bpf_program_profiler__install_pe,
@@ -627,6 +639,12 @@ static int bperf__enable(struct evsel *evsel)
	return 0;
}

static int bperf__disable(struct evsel *evsel)
{
	evsel->follower_skel->bss->enabled = 0;
	return 0;
}

static int bperf__read(struct evsel *evsel)
{
	struct bperf_follower_bpf *skel = evsel->follower_skel;
@@ -768,6 +786,7 @@ static int bperf__destroy(struct evsel *evsel)
struct bpf_counter_ops bperf_ops = {
	.load       = bperf__load,
	.enable     = bperf__enable,
	.disable    = bperf__disable,
	.read       = bperf__read,
	.install_pe = bperf__install_pe,
	.destroy    = bperf__destroy,
@@ -806,6 +825,13 @@ int bpf_counter__enable(struct evsel *evsel)
	return evsel->bpf_counter_ops->enable(evsel);
}

int bpf_counter__disable(struct evsel *evsel)
{
	if (bpf_counter_skip(evsel))
		return 0;
	return evsel->bpf_counter_ops->disable(evsel);
}

int bpf_counter__read(struct evsel *evsel)
{
	if (bpf_counter_skip(evsel))
+7 −0
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@ typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
struct bpf_counter_ops {
	bpf_counter_evsel_target_op load;
	bpf_counter_evsel_op enable;
	bpf_counter_evsel_op disable;
	bpf_counter_evsel_op read;
	bpf_counter_evsel_op destroy;
	bpf_counter_evsel_install_pe_op install_pe;
@@ -32,6 +33,7 @@ struct bpf_counter {

int bpf_counter__load(struct evsel *evsel, struct target *target);
int bpf_counter__enable(struct evsel *evsel);
int bpf_counter__disable(struct evsel *evsel);
int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
@@ -51,6 +53,11 @@ static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
	return 0;
}

static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
{
	return 0;
}

static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
{
	return -EAGAIN;
+4 −0
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@
#include "evsel.h"
#include "debug.h"
#include "units.h"
#include "bpf_counter.h"
#include <internal/lib.h> // page_size
#include "affinity.h"
#include "../perf.h"
@@ -421,6 +422,9 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
	if (affinity__setup(&affinity) < 0)
		return;

	evlist__for_each_entry(evlist, pos)
		bpf_counter__disable(pos);

	/* Disable 'immediate' events last */
	for (imm = 0; imm <= 1; imm++) {
		evlist__for_each_cpu(evlist, i, cpu) {
+6 −0
Original line number Diff line number Diff line
@@ -90,6 +90,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
 */
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
int bpf_counter__disable(struct evsel *evsel);

void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
{
@@ -100,6 +101,11 @@ int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_
	return 0;
}

int bpf_counter__disable(struct evsel *evsel __maybe_unused)
{
	return 0;
}

/*
 * Support debug printing even though util/debug.c is not linked.  That means
 * implementing 'verbose' and 'eprintf'.