Commit 52029198 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

KVM: arm64: Rework SVE host-save/guest-restore



In order to keep the code readable, move the host-save/guest-restore
sequences in their own functions, with the following changes:
- the hypervisor ZCR is now set from C code
- ZCR_EL2 is always used as the EL2 accessor

This results in some minor assembler macro rework.
No functional change intended.

Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 71ce1ae5
Loading
Loading
Loading
Loading
+6 −2
Original line number Diff line number Diff line
@@ -232,8 +232,7 @@
		str		w\nxtmp, [\xpfpsr, #4]
.endm

.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
		sve_load_vq	\xvqminus1, x\nxtmp, \xtmp2
.macro __sve_load nxbase, xpfpsr, nxtmp
 _for n, 0, 31,	_sve_ldr_v	\n, \nxbase, \n - 34
		_sve_ldr_p	0, \nxbase
		_sve_wrffr	0
@@ -244,3 +243,8 @@
		ldr		w\nxtmp, [\xpfpsr, #4]
		msr		fpcr, x\nxtmp
.endm

.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
		sve_load_vq	\xvqminus1, x\nxtmp, \xtmp2
		__sve_load	\nxbase, \xpfpsr, \nxtmp
.endm
+1 −1
Original line number Diff line number Diff line
@@ -86,7 +86,7 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr, unsigned int vqminus1);
void __sve_restore_state(void *sve_pffr, u32 *fpsr);

#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
+1 −1
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@ SYM_FUNC_START(__fpsimd_restore_state)
SYM_FUNC_END(__fpsimd_restore_state)

SYM_FUNC_START(__sve_restore_state)
	sve_load 0, x1, x2, 3, x4
	__sve_load 0, x1, 2
	ret
SYM_FUNC_END(__sve_restore_state)

+24 −16
Original line number Diff line number Diff line
@@ -196,6 +196,24 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
	return true;
}

static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
{
	struct thread_struct *thread;

	thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
			      uw.fpsimd_state);

	__sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
}

static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
	sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
	__sve_restore_state(vcpu_sve_pffr(vcpu),
			    &vcpu->arch.ctxt.fp_regs.fpsr);
	write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
}

/* Check for an FPSIMD/SVE trap and handle as appropriate */
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
@@ -251,28 +269,18 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
		 * In the SVE case, VHE is assumed: it is enforced by
		 * Kconfig and kvm_arch_init().
		 */
		if (sve_host) {
			struct thread_struct *thread = container_of(
				vcpu->arch.host_fpsimd_state,
				struct thread_struct, uw.fpsimd_state);

			__sve_save_state(sve_pffr(thread),
					 &vcpu->arch.host_fpsimd_state->fpsr);
		} else {
		if (sve_host)
			__hyp_sve_save_host(vcpu);
		else
			__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
		}

		vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
	}

	if (sve_guest) {
		__sve_restore_state(vcpu_sve_pffr(vcpu),
				    &vcpu->arch.ctxt.fp_regs.fpsr,
				    vcpu_sve_vq(vcpu) - 1);
		write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
	} else {
	if (sve_guest)
		__hyp_sve_restore_guest(vcpu);
	else
		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
	}

	/* Skip restoring fpexc32 for AArch64 guests */
	if (!(read_sysreg(hcr_el2) & HCR_RW))