Commit 2589ab85 authored by Sandipan Das's avatar Sandipan Das Committed by Luo Gengkun
Browse files

perf/x86/amd/core: Fix overflow reset on hotplug

mainline inclusion
from mainline-v6.6-rc4
commit 23d2626b
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IAMTVO

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=2eabb655a968b862bc0c31629a09f0fbf3c80d51



--------------------------------

Kernels older than v5.19 do not support PerfMonV2 and the PMI handler
does not clear the overflow bits of the PerfCntrGlobalStatus register.
Because of this, loading a recent kernel using kexec from an older
kernel can result in inconsistent register states on Zen 4 systems.

The PMI handler of the new kernel gets confused and shows a warning when
an overflow occurs because some of the overflow bits are set even if the
corresponding counters are inactive. These are remnants from overflows
that were handled by the older kernel.

During CPU hotplug, the PerfCntrGlobalCtl and PerfCntrGlobalStatus
registers should always be cleared for PerfMonV2-capable processors.
However, a condition used for NB event constaints applicable only to
older processors currently prevents this from happening. Move the reset
sequence to an appropriate place and also clear the LBR Freeze bit.

Fixes: 21d59e3e ("perf/x86/amd/core: Detect PerfMonV2 support")
Signed-off-by: default avatarSandipan Das <sandipan.das@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/882a87511af40792ba69bb0e9026f19a2e71e8a3.1694696888.git.sandipan.das@amd.com



Conflicts:
	arch/x86/events/amd/core.c
	[Fix contexts conflicts]

Signed-off-by: default avatarLuo Gengkun <luogengkun2@huawei.com>
parent cee0af5e
Loading
Loading
Loading
Loading
+9 −5
Original line number Diff line number Diff line
@@ -591,8 +591,12 @@ static void amd_pmu_cpu_reset(int cpu)
	/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);

	/* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
	/*
	 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
	 * and PerfCntrGLobalStatus.PerfCntrOvfl
	 */
	wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
	       GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
}

static int amd_pmu_cpu_prepare(int cpu)
@@ -619,6 +623,7 @@ static void amd_pmu_cpu_starting(int cpu)
	int i, nb_id;

	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
	amd_pmu_cpu_reset(cpu);

	if (!x86_pmu.amd_nb_constraints)
		return;
@@ -642,13 +647,14 @@ static void amd_pmu_cpu_starting(int cpu)
	cpuc->amd_nb->refcnt++;

	amd_brs_reset();
	amd_pmu_cpu_reset(cpu);
}

static void amd_pmu_cpu_dead(int cpu)
{
	struct cpu_hw_events *cpuhw;

	amd_pmu_cpu_reset(cpu);

	if (!x86_pmu.amd_nb_constraints)
		return;

@@ -662,8 +668,6 @@ static void amd_pmu_cpu_dead(int cpu)

		cpuhw->amd_nb = NULL;
	}

	amd_pmu_cpu_reset(cpu);
}

static inline void amd_pmu_set_global_ctl(u64 ctl)