Commit 75c76ab5 authored by Marc Zyngier's avatar Marc Zyngier Committed by Oliver Upton
Browse files

KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration



Just like we repainted the early arm64 code, we need to update
the CPTR_EL2 accesses that are taking place in the nVHE code
when hVHE is used, making them look as if they were CPACR_EL1
accesses. Just like the VHE code.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230609162200.2024064-14-maz@kernel.org


Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parent 6537565f
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -285,7 +285,6 @@
#define CPTR_EL2_TFP	(1 << CPTR_EL2_TFP_SHIFT)
#define CPTR_EL2_TZ	(1 << 8)
#define CPTR_NVHE_EL2_RES1	0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */
#define CPTR_EL2_DEFAULT	CPTR_NVHE_EL2_RES1
#define CPTR_NVHE_EL2_RES0	(GENMASK(63, 32) |	\
				 GENMASK(29, 21) |	\
				 GENMASK(19, 14) |	\
@@ -347,8 +346,7 @@
	ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
	ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET)

#define CPACR_EL1_DEFAULT	(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\
				 CPACR_EL1_ZEN_EL1EN)
#define CPACR_EL1_TTA		(1 << 28)

#define kvm_mode_names				\
	{ PSR_MODE_EL0t,	"EL0t" },	\
+31 −0
Original line number Diff line number Diff line
@@ -570,4 +570,35 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
	return test_bit(feature, vcpu->arch.features);
}

static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
	u64 val;

	if (has_vhe()) {
		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
		       CPACR_EL1_ZEN_EL1EN);
	} else if (has_hvhe()) {
		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
	} else {
		val = CPTR_NVHE_EL2_RES1;

		if (vcpu_has_sve(vcpu) &&
		    (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
			val |= CPTR_EL2_TZ;
		if (cpus_have_final_cap(ARM64_SME))
			val &= ~CPTR_EL2_TSM;
	}

	return val;
}

static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
{
	u64 val = kvm_get_reset_cptr_el2(vcpu);

	if (has_vhe() || has_hvhe())
		write_sysreg(val, cpacr_el1);
	else
		write_sysreg(val, cptr_el2);
}
#endif /* __ARM64_KVM_EMULATE_H__ */
+1 −1
Original line number Diff line number Diff line
@@ -1240,7 +1240,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
	}

	vcpu_reset_hcr(vcpu);
	vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT;
	vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);

	/*
	 * Handle the "start in power-off" case.
+2 −2
Original line number Diff line number Diff line
@@ -180,7 +180,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)

	/*
	 * If we have VHE then the Hyp code will reset CPACR_EL1 to
	 * CPACR_EL1_DEFAULT and we need to reenable SME.
	 * the default value and we need to reenable SME.
	 */
	if (has_vhe() && system_supports_sme()) {
		/* Also restore EL0 state seen on entry */
@@ -210,7 +210,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
		/*
		 * The FPSIMD/SVE state in the CPU has not been touched, and we
		 * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
		 * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
		 * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
		 * for EL0.  To avoid spurious traps, restore the trap state
		 * seen by kvm_arch_vcpu_load_fp():
		 */
+1 −1
Original line number Diff line number Diff line
@@ -192,7 +192,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
	/* Valid trap.  Switch the context: */

	/* First disable enough traps to allow us to update the registers */
	if (has_vhe()) {
	if (has_vhe() || has_hvhe()) {
		reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
		if (sve_guest)
			reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
Loading