Commit 3eaf467a authored by Marc Zyngier's avatar Marc Zyngier Committed by Kunkun Jiang
Browse files

KVM: arm64: Offer early resume for non-blocking WFxT instructions

mainline inclusion
from mainline-v5.19-rc1
commit a3fb5965
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6YAMV
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=a3fb59651449d8bd4dc4ed5413888819932c740b



-------------------------------

For WFxT instructions used with very small delays, it is not
unlikely that the deadline is already expired by the time we
reach the WFx handling code.

Check for this condition as soon as possible, and return to the
guest immediately if we can.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220419182755.601427-7-maz@kernel.org


Signed-off-by: default avatarKunkun Jiang <jiangkunkun@huawei.com>
parent 3c52cea7
Loading
Loading
Loading
Loading
+25 −3
Original line number Diff line number Diff line
@@ -82,12 +82,14 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
 *
 * @vcpu:	the vcpu pointer
 *
 * WFE: Yield the CPU and come back to this vcpu when the scheduler
 * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
 * decides to.
 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
 * world-switches and schedule other host processes until there is an
 * incoming IRQ or FIQ to the VM.
 * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
 *
 * WF{I,E}T can immediately return if the deadline has already expired.
 */
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
{
@@ -96,18 +98,38 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
	if (esr & ESR_ELx_WFx_ISS_WFE) {
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
		vcpu->stat.wfe_exit_stat++;
		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
	} else {
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
		vcpu->stat.wfi_exit_stat++;
	}

	if (esr & ESR_ELx_WFx_ISS_WFxT) {
		if (esr & ESR_ELx_WFx_ISS_RV) {
			u64 val, now;

			now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
			val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));

			if (now >= val)
				goto out;
		} else {
			/* Treat WFxT as WFx if RN is invalid */
			esr &= ~ESR_ELx_WFx_ISS_WFxT;
		}
	}

	if (esr & ESR_ELx_WFx_ISS_WFE) {
		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
	} else {
		vcpu->arch.pvsched.pv_unhalted = false;
		if ((esr & (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) == (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT))
		if (esr & ESR_ELx_WFx_ISS_WFxT)
			vcpu->arch.flags |= KVM_ARM64_WFIT;
		kvm_vcpu_block(vcpu);
		vcpu->arch.flags &= ~KVM_ARM64_WFIT;
		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
	}

out:
	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));

	return 1;