Commit 49de1795 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo
Browse files

perf stat: No need to setup affinities when starting a workload



I.e. the simple:

  $ perf stat sleep 1

Uses a dummy CPU map and thus there is no need to setup/cleanup
affinities to avoid IPIs, etc.

With this we're down to a sched_getaffinity() call, in the libnuma
initialization, that probably can be removed in a followup patch.

Acked-by: default avatarIan Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-3-acme@kernel.org


Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@redhat.com>
parent 1855b796
Loading
Loading
Loading
Loading
+10 −7
Original line number Diff line number Diff line
@@ -788,7 +788,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
	const bool forks = (argc > 0);
	bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
	struct evlist_cpu_iterator evlist_cpu_itr;
	struct affinity affinity;
	struct affinity saved_affinity, *affinity = NULL;
	int err;
	bool second_pass = false;

@@ -803,8 +803,11 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
	if (group)
		evlist__set_leader(evsel_list);

	if (affinity__setup(&affinity) < 0)
	if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
		if (affinity__setup(&saved_affinity) < 0)
			return -1;
		affinity = &saved_affinity;
	}

	evlist__for_each_entry(evsel_list, counter) {
		if (bpf_counter__load(counter, &target))
@@ -813,7 +816,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
			all_counters_use_bpf = false;
	}

	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
	evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
		counter = evlist_cpu_itr.evsel;

		/*
@@ -869,7 +872,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
		 */

		/* First close errored or weak retry */
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
			counter = evlist_cpu_itr.evsel;

			if (!counter->reset_group && !counter->errored)
@@ -878,7 +881,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
			perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
		}
		/* Now reopen weak */
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, &affinity) {
		evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
			counter = evlist_cpu_itr.evsel;

			if (!counter->reset_group && !counter->errored)
@@ -904,7 +907,7 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
			counter->supported = true;
		}
	}
	affinity__cleanup(&affinity);
	affinity__cleanup(affinity);

	evlist__for_each_entry(evsel_list, counter) {
		if (!counter->supported) {