Commit 320af55a authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: x86: Add wrappers for setting/clearing APICv inhibits



Add set/clear wrappers for toggling APICv inhibits to make the call sites
more readable, and opportunistically rename the inner helpers to align
with the new wrappers and to make them more readable as well.  Invert the
flag from "activate" to "set"; activate is painfully ambiguous as it's
not obvious if the inhibit is being activated, or if APICv is being
activated, in which case the inhibit is being deactivated.

For the functions that take @set, swap the order of the inhibit reason
and @set so that the call sites are visually similar to those that bounce
through the wrapper.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20220311043517.17027-3-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 7491b7b2
Loading
Loading
Loading
Loading
+16 −4
Original line number Diff line number Diff line
@@ -1799,10 +1799,22 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,

bool kvm_apicv_activated(struct kvm *kvm);
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
void kvm_request_apicv_update(struct kvm *kvm, bool activate,
			      enum kvm_apicv_inhibit reason);
void __kvm_request_apicv_update(struct kvm *kvm, bool activate,
				enum kvm_apicv_inhibit reason);
void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
				      enum kvm_apicv_inhibit reason, bool set);
void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
				    enum kvm_apicv_inhibit reason, bool set);

static inline void kvm_set_apicv_inhibit(struct kvm *kvm,
					 enum kvm_apicv_inhibit reason)
{
	kvm_set_or_clear_apicv_inhibit(kvm, reason, true);
}

static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
					   enum kvm_apicv_inhibit reason)
{
	kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
}

int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);

+7 −3
Original line number Diff line number Diff line
@@ -122,9 +122,13 @@ static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
	else
		hv->synic_auto_eoi_used--;

	__kvm_request_apicv_update(vcpu->kvm,
				   !hv->synic_auto_eoi_used,
				   APICV_INHIBIT_REASON_HYPERV);
	/*
	 * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
	 * the hypervisor to manually inject IRQs.
	 */
	__kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
					 APICV_INHIBIT_REASON_HYPERV,
					 !!hv->synic_auto_eoi_used);

	up_write(&vcpu->kvm->arch.apicv_update_lock);
}
+2 −4
Original line number Diff line number Diff line
@@ -305,15 +305,13 @@ void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
	 * So, deactivate APICv when PIT is in reinject mode.
	 */
	if (reinject) {
		kvm_request_apicv_update(kvm, false,
					 APICV_INHIBIT_REASON_PIT_REINJ);
		kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
		/* The initial state is preserved while ps->reinject == 0. */
		kvm_pit_reset_reinject(pit);
		kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
		kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
	} else {
		kvm_request_apicv_update(kvm, true,
					 APICV_INHIBIT_REASON_PIT_REINJ);
		kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
		kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
		kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
	}
+5 −6
Original line number Diff line number Diff line
@@ -2918,7 +2918,7 @@ static int interrupt_window_interception(struct kvm_vcpu *vcpu)
	 * In this case AVIC was temporarily disabled for
	 * requesting the IRQ window and we have to re-enable it.
	 */
	kvm_request_apicv_update(vcpu->kvm, true, APICV_INHIBIT_REASON_IRQWIN);
	kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);

	++vcpu->stat.irq_window_exits;
	return 1;
@@ -3516,7 +3516,7 @@ static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
		 * via AVIC. In such case, we need to temporarily disable AVIC,
		 * and fallback to injecting IRQ via V_IRQ.
		 */
		kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_IRQWIN);
		kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
		svm_set_vintr(svm);
	}
}
@@ -3948,6 +3948,7 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct kvm_cpuid_entry2 *best;
	struct kvm *kvm = vcpu->kvm;

	vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
				    boot_cpu_has(X86_FEATURE_XSAVE) &&
@@ -3974,16 +3975,14 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
		 * is exposed to the guest, disable AVIC.
		 */
		if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
			kvm_request_apicv_update(vcpu->kvm, false,
						 APICV_INHIBIT_REASON_X2APIC);
			kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_X2APIC);

		/*
		 * Currently, AVIC does not work with nested virtualization.
		 * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
		 */
		if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
			kvm_request_apicv_update(vcpu->kvm, false,
						 APICV_INHIBIT_REASON_NESTED);
			kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_NESTED);
	}
	init_vmcb_after_set_cpuid(vcpu);
}
+4 −4
Original line number Diff line number Diff line
@@ -1340,17 +1340,17 @@ TRACE_EVENT(kvm_hv_stimer_cleanup,
);

TRACE_EVENT(kvm_apicv_update_request,
	    TP_PROTO(bool activate, int reason),
	    TP_ARGS(activate, reason),
	    TP_PROTO(int reason, bool activate),
	    TP_ARGS(reason, activate),

	TP_STRUCT__entry(
		__field(bool, activate)
		__field(int, reason)
		__field(bool, activate)
	),

	TP_fast_assign(
		__entry->activate = activate;
		__entry->reason = reason;
		__entry->activate = activate;
	),

	TP_printk("%s reason=%u",
Loading