Commit eb55b455 authored by Namhyung Kim's avatar Namhyung Kim Committed by Ingo Molnar
Browse files

perf/core: Add perf_sample_save_brstack() helper



When we saves the branch stack to the perf sample data, we needs to
update the sample flags and the dynamic size.  To make sure this is
done consistently, add the perf_sample_save_brstack() helper and
convert all call sites.

Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarNamhyung Kim <namhyung@kernel.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarJiri Olsa <jolsa@kernel.org>
Acked-by: default avatarAthira Rajeev <atrajeev@linux.vnet.ibm.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230118060559.615653-5-namhyung@kernel.org
parent 0a9081cf
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -2313,8 +2313,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
			struct cpu_hw_events *cpuhw;
			cpuhw = this_cpu_ptr(&cpu_hw_events);
			power_pmu_bhrb_read(event, cpuhw);
			data.br_stack = &cpuhw->bhrb_stack;
			data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
			perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack);
		}

		if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
+2 −4
Original line number Diff line number Diff line
@@ -928,10 +928,8 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
		if (!x86_perf_event_set_period(event))
			continue;

		if (has_branch_stack(event)) {
			data.br_stack = &cpuc->lbr_stack;
			data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
		}
		if (has_branch_stack(event))
			perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);

		if (perf_event_overflow(event, &data, regs))
			x86_pmu_stop(event, 0);
+2 −4
Original line number Diff line number Diff line
@@ -3036,10 +3036,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)

		perf_sample_data_init(&data, 0, event->hw.last_period);

		if (has_branch_stack(event)) {
			data.br_stack = &cpuc->lbr_stack;
			data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
		}
		if (has_branch_stack(event))
			perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);

		if (perf_event_overflow(event, &data, regs))
			x86_pmu_stop(event, 0);
+3 −6
Original line number Diff line number Diff line
@@ -1720,10 +1720,8 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
		data->sample_flags |= PERF_SAMPLE_TIME;
	}

	if (has_branch_stack(event)) {
		data->br_stack = &cpuc->lbr_stack;
		data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
	}
	if (has_branch_stack(event))
		perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
}

static void adaptive_pebs_save_regs(struct pt_regs *regs,
@@ -1883,8 +1881,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,

		if (has_branch_stack(event)) {
			intel_pmu_store_pebs_lbrs(lbr);
			data->br_stack = &cpuc->lbr_stack;
			data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
			perf_sample_save_brstack(data, event, &cpuc->lbr_stack);
		}
	}

+40 −26
Original line number Diff line number Diff line
@@ -1102,6 +1102,31 @@ extern u64 perf_event_read_value(struct perf_event *event,

extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);

static inline bool branch_sample_no_flags(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
}

static inline bool branch_sample_no_cycles(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
}

static inline bool branch_sample_type(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
}

static inline bool branch_sample_hw_index(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
}

static inline bool branch_sample_priv(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}


struct perf_sample_data {
	/*
@@ -1210,6 +1235,21 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data,
	data->sample_flags |= PERF_SAMPLE_RAW;
}

static inline void perf_sample_save_brstack(struct perf_sample_data *data,
					    struct perf_event *event,
					    struct perf_branch_stack *brs)
{
	int size = sizeof(u64); /* nr */

	if (branch_sample_hw_index(event))
		size += sizeof(u64);
	size += brs->nr * sizeof(struct perf_branch_entry);

	data->br_stack = brs;
	data->dyn_size += size;
	data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}

/*
 * Clear all bitfields in the perf_branch_entry.
 * The to and from fields are not cleared because they are
@@ -1827,30 +1867,4 @@ static inline void perf_lopwr_cb(bool mode)
}
#endif

#ifdef CONFIG_PERF_EVENTS
static inline bool branch_sample_no_flags(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS;
}

static inline bool branch_sample_no_cycles(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES;
}

static inline bool branch_sample_type(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE;
}

static inline bool branch_sample_hw_index(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
}

static inline bool branch_sample_priv(const struct perf_event *event)
{
	return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE;
}
#endif /* CONFIG_PERF_EVENTS */
#endif /* _LINUX_PERF_EVENT_H */
Loading