Commit 935a7333 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini
Browse files

KVM: SVM: Drop AVIC's intermediate avic_set_running() helper



Drop avic_set_running() in favor of calling avic_vcpu_{load,put}()
directly, and modify the block+put path to use preempt_disable/enable()
instead of get/put_cpu(), as it doesn't actually care about the current
pCPU associated with the vCPU.  Opportunistically add lockdep assertions
as being preempted in avic_vcpu_put() would lead to consuming stale data,
even though doing so _in the current code base_ would not be fatal.

Add a much needed comment explaining why svm_vcpu_blocking() needs to
unload the AVIC and update the IRTE _before_ the vCPU starts blocking.

Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211208015236.1616697-22-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 635e6357
Loading
Loading
Loading
Loading
+36 −20
Original line number Diff line number Diff line
@@ -978,6 +978,8 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
	int h_physical_id = kvm_cpu_get_apicid(cpu);
	struct vcpu_svm *svm = to_svm(vcpu);

	lockdep_assert_preemption_disabled();

	/*
	 * Since the host physical APIC id is 8 bits,
	 * we can support host APIC ID upto 255.
@@ -1011,6 +1013,8 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
	u64 entry;
	struct vcpu_svm *svm = to_svm(vcpu);

	lockdep_assert_preemption_disabled();

	entry = READ_ONCE(*(svm->avic_physical_id_cache));

	/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
@@ -1023,30 +1027,42 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
	WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}

/*
 * This function is called during VCPU halt/unhalt.
 */
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	int cpu = get_cpu();
	if (!kvm_vcpu_apicv_active(vcpu))
		return;

	WARN_ON(cpu != vcpu->cpu);
	preempt_disable();

	if (kvm_vcpu_apicv_active(vcpu)) {
		if (is_run)
			avic_vcpu_load(vcpu, cpu);
		else
       /*
        * Unload the AVIC when the vCPU is about to block, _before_
        * the vCPU actually blocks.
        *
        * Any IRQs that arrive before IsRunning=0 will not cause an
        * incomplete IPI vmexit on the source, therefore vIRR will also
        * be checked by kvm_vcpu_check_block() before blocking.  The
        * memory barrier implicit in set_current_state orders writing
        * IsRunning=0 before reading the vIRR.  The processor needs a
        * matching memory barrier on interrupt delivery between writing
        * IRR and reading IsRunning; the lack of this barrier might be
        * the cause of errata #1235).
        */
	avic_vcpu_put(vcpu);
	}
	put_cpu();
}

void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
{
	avic_set_running(vcpu, false);
	preempt_enable();
}

void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
	avic_set_running(vcpu, true);
	int cpu;

	if (!kvm_vcpu_apicv_active(vcpu))
		return;

	cpu = get_cpu();
	WARN_ON(cpu != vcpu->cpu);

	avic_vcpu_load(vcpu, cpu);

	put_cpu();
}