Commit 8d8556fb authored by Marc Zyngier's avatar Marc Zyngier Committed by yanhaitao
Browse files

KVM: arm64: Introduce vcpu_sve_vq() helper

mainline inclusion
from mainline-v5.13-rc1
commit 468f3477
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8E73O
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=468f3477ef8bda1beeb91dd7f423c9bc248ac39d



-------------------------------------------------

The KVM code contains a number of "sve_vq_from_vl(vcpu->arch.sve_max_vl)"
instances, and we are about to add more.

Introduce vcpu_sve_vq() as a shorthand for this expression.

Acked-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Conflicts:
       correct undef function in switch.h
Signed-off-by: default avatarWang ShaoBo <bobo.shaobowang@huawei.com>
parent baaa76d4
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -409,6 +409,8 @@ struct kvm_vcpu_arch {
#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
			     sve_ffr_offset((vcpu)->arch.sve_max_vl))

#define vcpu_sve_max_vq(vcpu)	sve_vq_from_vl((vcpu)->arch.sve_max_vl)

#define vcpu_sve_state_size(vcpu) ({					\
	size_t __size_ret;						\
	unsigned int __vcpu_vq;						\
@@ -416,7 +418,7 @@ struct kvm_vcpu_arch {
	if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) {		\
		__size_ret = 0;						\
	} else {							\
		__vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl);	\
		__vcpu_vq = vcpu_sve_max_vq(vcpu);			\
		__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq);		\
	}								\
									\
+3 −3
Original line number Diff line number Diff line
@@ -313,7 +313,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)

	memset(vqs, 0, sizeof(vqs));

	max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
	max_vq = vcpu_sve_max_vq(vcpu);
	for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
		if (sve_vq_available(vq))
			vqs[vq_word(vq)] |= vq_mask(vq);
@@ -441,7 +441,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
			return -ENOENT;

		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
		vq = vcpu_sve_max_vq(vcpu);

		reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
				SVE_SIG_REGS_OFFSET;
@@ -451,7 +451,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
		if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
			return -ENOENT;

		vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
		vq = vcpu_sve_max_vq(vcpu);

		reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
				SVE_SIG_REGS_OFFSET;
+1 −1
Original line number Diff line number Diff line
@@ -271,7 +271,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
	if (sve_guest) {
		__sve_restore_state(vcpu_sve_pffr(vcpu),
				    &vcpu->arch.ctxt.fp_regs.fpsr,
				    sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
				    vcpu_sve_max_vq(vcpu) - 1);
		write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
	} else {
		__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);