Commit c8857935 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Refactor filtering of ID registers



Our current ID register filtering is starting to be a mess of if()
statements, and isn't going to get any saner.

Let's turn it into a switch(), which has a chance of being more
readable, and introduce a FEATURE() macro that allows easy generation
of feature masks.

No functionnal change intended.

Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Reviewed-by: default avatarAlexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 99b6a401
Loading
Loading
Loading
Loading
+28 −23
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@
 *          Christoffer Dall <c.dall@virtualopensystems.com>
 */

#include <linux/bitfield.h>
#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
@@ -1016,6 +1017,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
	return true;
}

#define FEATURE(x)	(GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))

/* Read a sanitised cpufeature ID register by sys_reg_desc */
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
		struct sys_reg_desc const *r, bool raz)
@@ -1024,36 +1027,38 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
			 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
	u64 val = raz ? 0 : read_sanitised_ftr_reg(id);

	if (id == SYS_ID_AA64PFR0_EL1) {
	switch (id) {
	case SYS_ID_AA64PFR0_EL1:
		if (!vcpu_has_sve(vcpu))
			val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
		val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
		val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
		val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
		val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT);
		val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT);
	} else if (id == SYS_ID_AA64PFR1_EL1) {
		val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
	} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
		val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
			 (0xfUL << ID_AA64ISAR1_API_SHIFT) |
			 (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
			 (0xfUL << ID_AA64ISAR1_GPI_SHIFT));
	} else if (id == SYS_ID_AA64DFR0_EL1) {
		u64 cap = 0;

			val &= ~FEATURE(ID_AA64PFR0_SVE);
		val &= ~FEATURE(ID_AA64PFR0_AMU);
		val &= ~FEATURE(ID_AA64PFR0_CSV2);
		val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
		val &= ~FEATURE(ID_AA64PFR0_CSV3);
		val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
		break;
	case SYS_ID_AA64PFR1_EL1:
		val &= ~FEATURE(ID_AA64PFR1_MTE);
		break;
	case SYS_ID_AA64ISAR1_EL1:
		if (!vcpu_has_ptrauth(vcpu))
			val &= ~(FEATURE(ID_AA64ISAR1_APA) |
				 FEATURE(ID_AA64ISAR1_API) |
				 FEATURE(ID_AA64ISAR1_GPA) |
				 FEATURE(ID_AA64ISAR1_GPI));
		break;
	case SYS_ID_AA64DFR0_EL1:
		/* Limit guests to PMUv3 for ARMv8.1 */
		if (kvm_vcpu_has_pmu(vcpu))
			cap = ID_AA64DFR0_PMUVER_8_1;

		val = cpuid_feature_cap_perfmon_field(val,
						      ID_AA64DFR0_PMUVER_SHIFT,
						cap);
	} else if (id == SYS_ID_DFR0_EL1) {
						      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_1 : 0);
		break;
	case SYS_ID_DFR0_EL1:
		/* Limit guests to PMUv3 for ARMv8.1 */
		val = cpuid_feature_cap_perfmon_field(val,
						      ID_DFR0_PERFMON_SHIFT,
						      kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_1 : 0);
		break;
	}

	return val;