Commit 46b18782 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Keep a per-VM pointer to the default PMU



As we are about to allow selection of the PMU exposed to a guest, start by
keeping track of the default one instead of only the PMU version.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarAlexandru Elisei <alexandru.elisei@arm.com>
Link: https://lore.kernel.org/r/20220127161759.53553-4-alexandru.elisei@arm.com
parent 2093057a
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -129,7 +129,7 @@ struct kvm_arch {
	 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
	 */
	unsigned long *pmu_filter;
	unsigned int pmuver;
	struct arm_pmu *arm_pmu;

	u8 pfr0_csv2;
	u8 pfr0_csv3;
+26 −16
Original line number Diff line number Diff line
@@ -24,7 +24,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);

static u32 kvm_pmu_event_mask(struct kvm *kvm)
{
	switch (kvm->arch.pmuver) {
	unsigned int pmuver;

	pmuver = kvm->arch.arm_pmu->pmuver;

	switch (pmuver) {
	case ID_AA64DFR0_PMUVER_8_0:
		return GENMASK(9, 0);
	case ID_AA64DFR0_PMUVER_8_1:
@@ -33,7 +37,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
	case ID_AA64DFR0_PMUVER_8_7:
		return GENMASK(15, 0);
	default:		/* Shouldn't be here, just for sanity */
		WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
		WARN_ONCE(1, "Unknown PMU version %d\n", pmuver);
		return 0;
	}
}
@@ -600,6 +604,7 @@ static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
 */
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
{
	struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu;
	struct kvm_pmu *pmu = &vcpu->arch.pmu;
	struct kvm_pmc *pmc;
	struct perf_event *event;
@@ -636,7 +641,7 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
		return;

	memset(&attr, 0, sizeof(struct perf_event_attr));
	attr.type = PERF_TYPE_RAW;
	attr.type = arm_pmu->pmu.type;
	attr.size = sizeof(attr);
	attr.pinned = 1;
	attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
@@ -750,12 +755,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
		static_branch_enable(&kvm_arm_pmu_available);
}

static int kvm_pmu_probe_pmuver(void)
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
{
	struct perf_event_attr attr = { };
	struct perf_event *event;
	struct arm_pmu *pmu;
	int pmuver = ID_AA64DFR0_PMUVER_IMP_DEF;
	struct arm_pmu *pmu = NULL;

	/*
	 * Create a dummy event that only counts user cycles. As we'll never
@@ -780,19 +784,20 @@ static int kvm_pmu_probe_pmuver(void)
	if (IS_ERR(event)) {
		pr_err_once("kvm: pmu event creation failed %ld\n",
			    PTR_ERR(event));
		return ID_AA64DFR0_PMUVER_IMP_DEF;
		return NULL;
	}

	if (event->pmu) {
		pmu = to_arm_pmu(event->pmu);
		if (pmu->pmuver)
			pmuver = pmu->pmuver;
		if (pmu->pmuver == 0 ||
		    pmu->pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
			pmu = NULL;
	}

	perf_event_disable(event);
	perf_event_release_kernel(event);

	return pmuver;
	return pmu;
}

u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
@@ -810,7 +815,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
		 * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
		 * as RAZ
		 */
		if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
		if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_PMUVER_8_4)
			val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
		base = 32;
	}
@@ -932,11 +937,16 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
	if (vcpu->arch.pmu.created)
		return -EBUSY;

	if (!vcpu->kvm->arch.pmuver)
		vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();

	if (vcpu->kvm->arch.pmuver == ID_AA64DFR0_PMUVER_IMP_DEF)
	mutex_lock(&kvm->lock);
	if (!kvm->arch.arm_pmu) {
		/* No PMU set, get the default one */
		kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
		if (!kvm->arch.arm_pmu) {
			mutex_unlock(&kvm->lock);
			return -ENODEV;
		}
	}
	mutex_unlock(&kvm->lock);

	switch (attr->attr) {
	case KVM_ARM_VCPU_PMU_V3_IRQ: {