Commit b2d2af7e authored by Mark Rutland's avatar Mark Rutland Committed by Paolo Bonzini
Browse files

kvm/x86: rework guest entry logic



For consistency and clarity, migrate x86 over to the generic helpers for
guest timing and lockdep/RCU/tracing management, and remove the
x86-specific helpers.

Prior to this patch, the guest timing was entered in
kvm_guest_enter_irqoff() (called by svm_vcpu_enter_exit() and
svm_vcpu_enter_exit()), and was exited by the call to
vtime_account_guest_exit() within vcpu_enter_guest().

To minimize duplication and to more clearly balance entry and exit, both
entry and exit of guest timing are placed in vcpu_enter_guest(), using
the new guest_timing_{enter,exit}_irqoff() helpers. When context
tracking is used a small amount of additional time will be accounted
towards guests; tick-based accounting is unnaffected as IRQs are
disabled at this point and not enabled until after the return from the
guest.

This also corrects (benign) mis-balanced context tracking accounting
introduced in commits:

  ae95f566 ("KVM: X86: TSCDEADLINE MSR emulation fastpath")
  26efe2fd ("KVM: VMX: Handle preemption timer fastpath")

Where KVM can enter a guest multiple times, calling vtime_guest_enter()
without a corresponding call to vtime_account_guest_exit(), and with
vtime_account_system() called when vtime_account_guest() should be used.
As account_system_time() checks PF_VCPU and calls account_guest_time(),
this doesn't result in any functional problem, but is unnecessarily
confusing.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Reviewed-by: default avatarNicolas Saenz Julienne <nsaenzju@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jim Mattson <jmattson@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <20220201132926.3301912-4-mark.rutland@arm.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 72e32445
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -3630,7 +3630,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
	struct vcpu_svm *svm = to_svm(vcpu);
	unsigned long vmcb_pa = svm->current_vmcb->pa;

	kvm_guest_enter_irqoff();
	guest_state_enter_irqoff();

	if (sev_es_guest(vcpu->kvm)) {
		__svm_sev_es_vcpu_run(vmcb_pa);
@@ -3650,7 +3650,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
		vmload(__sme_page_pa(sd->save_area));
	}

	kvm_guest_exit_irqoff();
	guest_state_exit_irqoff();
}

static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+2 −2
Original line number Diff line number Diff line
@@ -6767,7 +6767,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
					struct vcpu_vmx *vmx)
{
	kvm_guest_enter_irqoff();
	guest_state_enter_irqoff();

	/* L1D Flush includes CPU buffer clear to mitigate MDS */
	if (static_branch_unlikely(&vmx_l1d_should_flush))
@@ -6783,7 +6783,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,

	vcpu->arch.cr2 = native_read_cr2();

	kvm_guest_exit_irqoff();
	guest_state_exit_irqoff();
}

static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
+3 −1
Original line number Diff line number Diff line
@@ -10088,6 +10088,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
		set_debugreg(0, 7);
	}

	guest_timing_enter_irqoff();

	for (;;) {
		/*
		 * Assert that vCPU vs. VM APICv state is consistent.  An APICv
@@ -10172,7 +10174,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
	 * of accounting via context tracking, but the loss of accuracy is
	 * acceptable for all known use cases.
	 */
	vtime_account_guest_exit();
	guest_timing_exit_irqoff();

	if (lapic_in_kernel(vcpu)) {
		s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
+0 −45
Original line number Diff line number Diff line
@@ -10,51 +10,6 @@

void kvm_spurious_fault(void);

static __always_inline void kvm_guest_enter_irqoff(void)
{
	/*
	 * VMENTER enables interrupts (host state), but the kernel state is
	 * interrupts disabled when this is invoked. Also tell RCU about
	 * it. This is the same logic as for exit_to_user_mode().
	 *
	 * This ensures that e.g. latency analysis on the host observes
	 * guest mode as interrupt enabled.
	 *
	 * guest_enter_irqoff() informs context tracking about the
	 * transition to guest mode and if enabled adjusts RCU state
	 * accordingly.
	 */
	instrumentation_begin();
	trace_hardirqs_on_prepare();
	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
	instrumentation_end();

	guest_enter_irqoff();
	lockdep_hardirqs_on(CALLER_ADDR0);
}

static __always_inline void kvm_guest_exit_irqoff(void)
{
	/*
	 * VMEXIT disables interrupts (host state), but tracing and lockdep
	 * have them in state 'on' as recorded before entering guest mode.
	 * Same as enter_from_user_mode().
	 *
	 * context_tracking_guest_exit() restores host context and reinstates
	 * RCU if enabled and required.
	 *
	 * This needs to be done immediately after VM-Exit, before any code
	 * that might contain tracepoints or call out to the greater world,
	 * e.g. before x86_spec_ctrl_restore_host().
	 */
	lockdep_hardirqs_off(CALLER_ADDR0);
	context_tracking_guest_exit();

	instrumentation_begin();
	trace_hardirqs_off_finish();
	instrumentation_end();
}

#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check)		\
({									\
	bool failed = (consistency_check);				\