Commit 154b059e authored by Kan Liang's avatar Kan Liang Committed by Aichun Shi
Browse files

perf/x86/intel: Support CPUID 10.ECX to disable fixed counters

mainline inclusion
from mainline-v5.12-rc1
commit 32451614
category: feature
feature: SPR PMU core event enhancement
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I596BF



Intel-SIG: commit 32451614 ("perf/x86/intel: Support CPUID 10.ECX to
disable fixed counters")

-------------------------------------

With Architectural Performance Monitoring Version 5, CPUID 10.ECX cpu
leaf indicates the fixed counter enumeration. This extends the previous
count to a bitmap which allows disabling even lower fixed counters.
It could be used by a Hypervisor.

The existing intel_ctrl variable is used to remember the bitmask of the
counters. All code that reads all counters is fixed to check this extra
bitmask.

Suggested-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Originally-by: default avatarAndi Kleen <ak@linux.intel.com>
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/1611873611-156687-6-git-send-email-kan.liang@linux.intel.com


Signed-off-by: default avatarYunying Sun <yunying.sun@intel.com>
Signed-off-by: default avatarJun Tian <jun.j.tian@intel.com>
Signed-off-by: default avatarJason Zeng <jason.zeng@intel.com>
Signed-off-by: default avatarAichun Shi <aichun.shi@intel.com>
parent bcebaa3a
Loading
Loading
Loading
Loading
+7 −1
Original line number Diff line number Diff line
@@ -255,6 +255,8 @@ static bool check_hw_exists(void)
		if (ret)
			goto msr_fail;
		for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
			if (fixed_counter_disabled(i))
				continue;
			if (val & (0x03 << i*4)) {
				bios_fail = 1;
				val_fail = val;
@@ -1534,6 +1536,8 @@ void perf_event_print_debug(void)
			cpu, idx, prev_left);
	}
	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
		if (fixed_counter_disabled(idx))
			continue;
		rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);

		pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
@@ -2006,7 +2010,9 @@ static int __init init_hw_perf_events(void)
	pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
	pr_info("... value mask:             %016Lx\n", x86_pmu.cntval_mask);
	pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
	pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
	pr_info("... fixed-purpose events:   %lu\n",
			hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
					<< INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
	pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);

	if (!x86_pmu.read)
+24 −10
Original line number Diff line number Diff line
@@ -2723,8 +2723,11 @@ static void intel_pmu_reset(void)
		wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
		wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
	}
	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
	for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
		if (fixed_counter_disabled(idx))
			continue;
		wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
	}

	if (ds)
		ds->bts_index = ds->bts_buffer_base;
@@ -5217,7 +5220,7 @@ __init int intel_pmu_init(void)
	union cpuid10_eax eax;
	union cpuid10_ebx ebx;
	struct event_constraint *c;
	unsigned int unused;
	unsigned int fixed_mask;
	struct extra_reg *er;
	bool pmem = false;
	int version, i;
@@ -5239,7 +5242,7 @@ __init int intel_pmu_init(void)
	 * Check whether the Architectural PerfMon supports
	 * Branch Misses Retired hw_event or not.
	 */
	cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
	cpuid(10, &eax.full, &ebx.full, &fixed_mask, &edx.full);
	if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
		return -ENODEV;

@@ -5263,12 +5266,15 @@ __init int intel_pmu_init(void)
	 * Quirk: v2 perfmon does not report fixed-purpose events, so
	 * assume at least 3 events, when not running in a hypervisor:
	 */
	if (version > 1) {
	if (version > 1 && version < 5) {
		int assume = 3 * !boot_cpu_has(X86_FEATURE_HYPERVISOR);

		x86_pmu.num_counters_fixed =
			max((int)edx.split.num_counters_fixed, assume);
	}

		fixed_mask = (1L << x86_pmu.num_counters_fixed) - 1;
	} else if (version >= 5)
		x86_pmu.num_counters_fixed = fls(fixed_mask);

	if (version >= 4)
		x86_pmu.counter_freezing = !disable_counter_freezing;
@@ -5858,8 +5864,7 @@ __init int intel_pmu_init(void)
		x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
	}

	x86_pmu.intel_ctrl |=
		((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
	x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;

	/* AnyThread may be deprecated on arch perfmon v5 or later */
	if (x86_pmu.intel_cap.anythread_deprecated)
@@ -5876,12 +5881,21 @@ __init int intel_pmu_init(void)
			 * events to the generic counters.
			 */
			if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
				/*
				 * Disable topdown slots and metrics events,
				 * if slots event is not in CPUID.
				 */
				if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl))
					c->idxmsk64 = 0;
				c->weight = hweight64(c->idxmsk64);
				continue;
			}

			if (c->cmask == FIXED_EVENT_FLAGS
			    && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
			if (c->cmask == FIXED_EVENT_FLAGS) {
				/* Disabled fixed counters which are not in CPUID */
				c->idxmsk64 &= x86_pmu.intel_ctrl;

				if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
					c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
			}
			c->idxmsk64 &=
+5 −0
Original line number Diff line number Diff line
@@ -1072,6 +1072,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
			  char *page);

static inline bool fixed_counter_disabled(int i)
{
	return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
}

#ifdef CONFIG_CPU_SUP_AMD

int amd_pmu_init(void);