Commit 3c52cea7 authored by Marc Zyngier's avatar Marc Zyngier Committed by Kunkun Jiang
Browse files

KVM: arm64: Handle blocking WFIT instruction

mainline inclusion
from mainline-v5.19-rc1
commit 89f5074c
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6YAMV
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=89f5074c503b6b6f181c0240c931f67bcaf266e9



-------------------------------

When trapping a blocking WFIT instruction, take it into account when
computing the deadline of the background timer.

The state is tracked with a new vcpu flag, and is gated by a new
CPU capability, which isn't currently enabled.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220419182755.601427-6-maz@kernel.org


Signed-off-by: default avatarKunkun Jiang <jiangkunkun@huawei.com>
parent 2173e074
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -432,6 +432,7 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
#define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_WFIT			(1 << 16) /* WFIT instruction trapped */

#define vcpu_has_sve(vcpu) (system_supports_sve() && \
			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
+20 −2
Original line number Diff line number Diff line
@@ -238,6 +238,20 @@ static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx)
		  (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE);
}

static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu)
{
	return (cpus_have_final_cap(ARM64_HAS_WFXT) &&
		(vcpu->arch.flags & KVM_ARM64_WFIT));
}

static u64 wfit_delay_ns(struct kvm_vcpu *vcpu)
{
	struct arch_timer_context *ctx = vcpu_vtimer(vcpu);
	u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));

	return kvm_counter_compute_delta(ctx, val);
}

/*
 * Returns the earliest expiration time in ns among guest timers.
 * Note that it will return 0 if none of timers can fire.
@@ -255,6 +269,9 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu)
			min_delta = min(min_delta, kvm_timer_compute_delta(ctx));
	}

	if (vcpu_has_wfit_active(vcpu))
		min_delta = min(min_delta, wfit_delay_ns(vcpu));

	/* If none of timers can fire, then return 0 */
	if (min_delta == ULLONG_MAX)
		return 0;
@@ -354,7 +371,7 @@ static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx)

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
	return 0;
	return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0;
}

/*
@@ -480,7 +497,8 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu)
	 */
	if (!kvm_timer_irq_can_fire(map.direct_vtimer) &&
	    !kvm_timer_irq_can_fire(map.direct_ptimer) &&
	    !kvm_timer_irq_can_fire(map.emul_ptimer))
	    !kvm_timer_irq_can_fire(map.emul_ptimer) &&
	    !vcpu_has_wfit_active(vcpu))
		return;

	/*
+7 −1
Original line number Diff line number Diff line
@@ -87,10 +87,13 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
 * world-switches and schedule other host processes until there is an
 * incoming IRQ or FIQ to the VM.
 * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
 */
static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
{
	if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
	u64 esr = kvm_vcpu_get_esr(vcpu);

	if (esr & ESR_ELx_WFx_ISS_WFE) {
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
		vcpu->stat.wfe_exit_stat++;
		kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
@@ -98,7 +101,10 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
		trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
		vcpu->stat.wfi_exit_stat++;
		vcpu->arch.pvsched.pv_unhalted = false;
		if ((esr & (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT)) == (ESR_ELx_WFx_ISS_RV | ESR_ELx_WFx_ISS_WFxT))
			vcpu->arch.flags |= KVM_ARM64_WFIT;
		kvm_vcpu_block(vcpu);
		vcpu->arch.flags &= ~KVM_ARM64_WFIT;
		kvm_clear_request(KVM_REQ_UNHALT, vcpu);
	}