Commit dbf4e792 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

perf/x86/p4: Remove perfctr_second_write quirk



Now that we have a x86_pmu::set_period() method, use it to remove the
perfctr_second_write quirk from the generic code.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220829101321.839502514@infradead.org
parent 1acab2e0
Loading
Loading
Loading
Loading
+1 −11
Original line number Diff line number Diff line
@@ -1356,7 +1356,7 @@ static void x86_pmu_enable(struct pmu *pmu)
	static_call(x86_pmu_enable_all)(added);
}

static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);

/*
 * Set the next IRQ period, based on the hwc->period_left value.
@@ -1416,16 +1416,6 @@ int x86_perf_event_set_period(struct perf_event *event)
	if (is_counter_pair(hwc))
		wrmsrl(x86_pmu_event_addr(idx + 1), 0xffff);

	/*
	 * Due to erratum on certan cpu we need
	 * a second write to be sure the register
	 * is updated properly
	 */
	if (x86_pmu.perfctr_second_write) {
		wrmsrl(hwc->event_base,
			(u64)(-left) & x86_pmu.cntval_mask);
	}

	perf_event_update_userpage(event);

	return ret;
+27 −10
Original line number Diff line number Diff line
@@ -1006,6 +1006,29 @@ static void p4_pmu_enable_all(int added)
	}
}

static int p4_pmu_set_period(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	s64 left = this_cpu_read(pmc_prev_left[hwc->idx]);
	int ret;

	ret = x86_perf_event_set_period(event);

	if (hwc->event_base) {
		/*
		 * This handles erratum N15 in intel doc 249199-029,
		 * the counter may not be updated correctly on write
		 * so we need a second write operation to do the trick
		 * (the official workaround didn't work)
		 *
		 * the former idea is taken from OProfile code
		 */
		wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
	}

	return ret;
}

static int p4_pmu_handle_irq(struct pt_regs *regs)
{
	struct perf_sample_data data;
@@ -1044,7 +1067,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
		/* event overflow for sure */
		perf_sample_data_init(&data, 0, hwc->last_period);

		if (!x86_perf_event_set_period(event))
		if (!static_call(x86_pmu_set_period)(event))
			continue;


@@ -1316,6 +1339,9 @@ static __initconst const struct x86_pmu p4_pmu = {
	.enable_all		= p4_pmu_enable_all,
	.enable			= p4_pmu_enable_event,
	.disable		= p4_pmu_disable_event,

	.set_period		= p4_pmu_set_period,

	.eventsel		= MSR_P4_BPU_CCCR0,
	.perfctr		= MSR_P4_BPU_PERFCTR0,
	.event_map		= p4_pmu_event_map,
@@ -1334,15 +1360,6 @@ static __initconst const struct x86_pmu p4_pmu = {
	.max_period		= (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
	.hw_config		= p4_hw_config,
	.schedule_events	= p4_pmu_schedule_events,
	/*
	 * This handles erratum N15 in intel doc 249199-029,
	 * the counter may not be updated correctly on write
	 * so we need a second write operation to do the trick
	 * (the official workaround didn't work)
	 *
	 * the former idea is taken from OProfile code
	 */
	.perfctr_second_write	= 1,

	.format_attrs		= intel_p4_formats_attr,
};
+1 −1
Original line number Diff line number Diff line
@@ -780,7 +780,6 @@ struct x86_pmu {

	struct event_constraint *event_constraints;
	struct x86_pmu_quirk *quirks;
	int		perfctr_second_write;
	void		(*limit_period)(struct perf_event *event, s64 *l);

	/* PMI handler bits */
@@ -1060,6 +1059,7 @@ static inline bool x86_pmu_has_lbr_callstack(void)
}

DECLARE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
DECLARE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);

int x86_perf_event_set_period(struct perf_event *event);