Commit 0bf9601f authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge tag 'kvmarm-fixes-6.3-3' of...

Merge tag 'kvmarm-fixes-6.3-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 6.3, part #3

 - Ensure the guest PMU context is restored before the first KVM_RUN,
   fixing an issue where EL0 event counting is broken after vCPU
   save/restore

 - Actually initialize ID_AA64PFR0_EL1.{CSV2,CSV3} based on the
   sanitized, system-wide values for protected VMs
parents fb5015bc e8162521
Loading
Loading
Loading
Loading
+25 −1
Original line number Diff line number Diff line
@@ -1890,9 +1890,33 @@ static int __init do_pkvm_init(u32 hyp_va_bits)
	return ret;
}

static u64 get_hyp_id_aa64pfr0_el1(void)
{
	/*
	 * Track whether the system isn't affected by spectre/meltdown in the
	 * hypervisor's view of id_aa64pfr0_el1, used for protected VMs.
	 * Although this is per-CPU, we make it global for simplicity, e.g., not
	 * to have to worry about vcpu migration.
	 *
	 * Unlike for non-protected VMs, userspace cannot override this for
	 * protected VMs.
	 */
	u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

	val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
		 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));

	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
			  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED);
	val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
			  arm64_get_meltdown_state() == SPECTRE_UNAFFECTED);

	return val;
}

static void kvm_hyp_init_symbols(void)
{
	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
	kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1();
	kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
	kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
	kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
+4 −1
Original line number Diff line number Diff line
@@ -33,11 +33,14 @@
 * Allow for protected VMs:
 * - Floating-point and Advanced SIMD
 * - Data Independent Timing
 * - Spectre/Meltdown Mitigation
 */
#define PVM_ID_AA64PFR0_ALLOW (\
	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP) | \
	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD) | \
	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) \
	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_DIT) | \
	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | \
	ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3) \
	)

/*
+0 −7
Original line number Diff line number Diff line
@@ -85,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,

static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
{
	const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
	u64 set_mask = 0;
	u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;

	set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
		PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);

	/* Spectre and Meltdown mitigation in KVM */
	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2),
			       (u64)kvm->arch.pfr0_csv2);
	set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3),
			       (u64)kvm->arch.pfr0_csv3);

	return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
}

+1 −0
Original line number Diff line number Diff line
@@ -558,6 +558,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
		for_each_set_bit(i, &mask, 32)
			kvm_pmu_set_pmc_value(kvm_vcpu_idx_to_pmc(vcpu, i), 0, true);
	}
	kvm_vcpu_pmu_restore_guest(vcpu);
}

static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc)
+0 −1
Original line number Diff line number Diff line
@@ -794,7 +794,6 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
		if (!kvm_supports_32bit_el0())
			val |= ARMV8_PMU_PMCR_LC;
		kvm_pmu_handle_pmcr(vcpu, val);
		kvm_vcpu_pmu_restore_guest(vcpu);
	} else {
		/* PMCR.P & PMCR.C are RAZ */
		val = __vcpu_sys_reg(vcpu, PMCR_EL0)