Commit fc9465be authored by Sean Christopherson's avatar Sean Christopherson
Browse files

KVM: x86: Make vmx_get_exit_qual() and vmx_get_intr_info() noinstr-friendly



Add an extra special noinstr-friendly helper to test+mark a "register"
available and use it when caching vmcs.EXIT_QUALIFICATION and
vmcs.VM_EXIT_INTR_INFO.  Make the caching helpers __always_inline too so
that they can be used in noinstr functions.

A future fix will move VMX's handling of NMI exits into the noinstr
vmx_vcpu_enter_exit() so that the NMI is processed before any kind of
instrumentation can trigger a fault and thus IRET, i.e. so that KVM
doesn't invoke the NMI handler with NMIs enabled.

Cc: Peter Zijlstra <peterz@infradead.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221213060912.654668-2-seanjc@google.com


Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
parent e8733482
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -75,6 +75,18 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
	__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
}

/*
 * kvm_register_test_and_mark_available() is a special snowflake that uses an
 * arch bitop directly to avoid the explicit instrumentation that comes with
 * the generic bitops.  This allows code that cannot be instrumented (noinstr
 * functions), e.g. the low level VM-Enter/VM-Exit paths, to cache registers.
 */
static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu,
								 enum kvm_reg reg)
{
	return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}

/*
 * The "raw" register helpers are only for cases where the full 64 bits of a
 * register are read/written irrespective of current vCPU mode.  In other words,
+6 −8
Original line number Diff line number Diff line
@@ -669,25 +669,23 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu);
int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu);
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu);

static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
	if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1))
		vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
	}

	return vmx->exit_qualification;
}

static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
		kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
	if (!kvm_register_test_and_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2))
		vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
	}

	return vmx->exit_intr_info;
}