Commit 5a785425 authored by Sandipan Das's avatar Sandipan Das Committed by Xie Haocheng
Browse files

perf/x86/amd/core: Detect PerfMonV2 support

mainline inclusion
from mainline-v5.19
commit 21d59e3e
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I5S3WV


CVE: NA

-------------------------------------------------

AMD Performance Monitoring Version 2 (PerfMonV2) introduces
some new Core PMU features such as detection of the number
of available PMCs and managing PMCs using global registers
namely, PerfCntrGlobalCtl and PerfCntrGlobalStatus.

Clearing PerfCntrGlobalCtl and PerfCntrGlobalStatus ensures
that all PMCs are inactive and have no pending overflows
when CPUs are onlined or offlined.

The PMU version (x86_pmu.version) now indicates PerfMonV2
support and will be used to bypass the new features on
unsupported processors.

Signed-off-by: default avatarSandipan Das <sandipan.das@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/dc8672ecbddff394e088ca8abf94b089b8ecc2e7.1650515382.git.sandipan.das@amd.com


Signed-off-by: default avatarXie Haocheng <haocheng.xie@amd.com>
parent f29be156
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -19,6 +19,9 @@ static unsigned long perf_nmi_window;
#define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
#define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)

/* PMC Enable and Overflow bits for PerfCntrGlobal* registers */
static u64 amd_pmu_global_cntr_mask __read_mostly;

static __initconst const u64 amd_hw_cache_event_ids
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -578,6 +581,18 @@ static struct amd_nb *amd_alloc_nb(int cpu)
	return nb;
}

static void amd_pmu_cpu_reset(int cpu)
{
	if (x86_pmu.version < 2)
		return;

	/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);

	/* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
}

static int amd_pmu_cpu_prepare(int cpu)
{
	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -625,6 +640,7 @@ static void amd_pmu_cpu_starting(int cpu)
	cpuc->amd_nb->refcnt++;

	amd_brs_reset();
	amd_pmu_cpu_reset(cpu);
}

static void amd_pmu_cpu_dead(int cpu)
@@ -644,6 +660,8 @@ static void amd_pmu_cpu_dead(int cpu)

		cpuhw->amd_nb = NULL;
	}

	amd_pmu_cpu_reset(cpu);
}

/*
@@ -1185,6 +1203,15 @@ static int __init amd_core_pmu_init(void)
	x86_pmu.eventsel	= MSR_F15H_PERF_CTL;
	x86_pmu.perfctr		= MSR_F15H_PERF_CTR;
	x86_pmu.num_counters	= AMD64_NUM_COUNTERS_CORE;

	/* Check for Performance Monitoring v2 support */
	if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
		/* Update PMU version for later usage */
		x86_pmu.version = 2;

		amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
	}

	/*
	 * AMD Core perfctr has separate MSRs for the NB events, see
	 * the amd/uncore.c driver.