Commit 4e71cad3 authored by Paolo Bonzini's avatar Paolo Bonzini
Browse files

Merge remote-tracking branch 'kvm/master' into HEAD

Merge bugfix patches from Linux 5.17-rc.
parents 48ebd0cf 710c4765
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
						    IRQCHIP_STATE_PENDING,
						    &val);
			WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
		} else if (vgic_irq_is_mapped_level(irq)) {
			val = vgic_get_phys_line_level(irq);
		} else {
			val = irq_is_pending(irq);
		}
+4 −3
Original line number Diff line number Diff line
@@ -95,7 +95,7 @@ static void kvm_perf_overflow(struct perf_event *perf_event,
}

static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
				  unsigned config, bool exclude_user,
				  u64 config, bool exclude_user,
				  bool exclude_kernel, bool intr,
				  bool in_tx, bool in_tx_cp)
{
@@ -181,7 +181,8 @@ static int cmp_u64(const void *a, const void *b)

void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
	unsigned config, type = PERF_TYPE_RAW;
	u64 config;
	u32 type = PERF_TYPE_RAW;
	struct kvm *kvm = pmc->vcpu->kvm;
	struct kvm_pmu_event_filter *filter;
	bool allow_event = true;
@@ -220,7 +221,7 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
	}

	if (type == PERF_TYPE_RAW)
		config = eventsel & X86_RAW_EVENT_MASK;
		config = eventsel & AMD64_RAW_EVENT_MASK;

	if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
		return;
+23 −48
Original line number Diff line number Diff line
@@ -269,6 +269,22 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
	return 0;
}

void avic_ring_doorbell(struct kvm_vcpu *vcpu)
{
	/*
	 * Note, the vCPU could get migrated to a different pCPU at any point,
	 * which could result in signalling the wrong/previous pCPU.  But if
	 * that happens the vCPU is guaranteed to do a VMRUN (after being
	 * migrated) and thus will process pending interrupts, i.e. a doorbell
	 * is not needed (and the spurious one is harmless).
	 */
	int cpu = READ_ONCE(vcpu->cpu);

	if (cpu != get_cpu())
		wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
	put_cpu();
}

static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
				   u32 icrl, u32 icrh)
{
@@ -284,8 +300,13 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
					GET_APIC_DEST_FIELD(icrh),
					icrl & APIC_DEST_MASK))
			kvm_vcpu_wake_up(vcpu);
					icrl & APIC_DEST_MASK)) {
			vcpu->arch.apic->irr_pending = true;
			svm_complete_interrupt_delivery(vcpu,
							icrl & APIC_MODE_MASK,
							icrl & APIC_INT_LEVELTRIG,
							icrl & APIC_VECTOR_MASK);
		}
	}
}

@@ -647,52 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
	return;
}

int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
{
	if (!vcpu->arch.apicv_active)
		return -1;

	kvm_lapic_set_irr(vec, vcpu->arch.apic);

	/*
	 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
	 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
	 * the read of guest_mode, which guarantees that either VMRUN will see
	 * and process the new vIRR entry, or that the below code will signal
	 * the doorbell if the vCPU is already running in the guest.
	 */
	smp_mb__after_atomic();

	/*
	 * Signal the doorbell to tell hardware to inject the IRQ if the vCPU
	 * is in the guest.  If the vCPU is not in the guest, hardware will
	 * automatically process AVIC interrupts at VMRUN.
	 */
	if (vcpu->mode == IN_GUEST_MODE) {
		int cpu = READ_ONCE(vcpu->cpu);

		/*
		 * Note, the vCPU could get migrated to a different pCPU at any
		 * point, which could result in signalling the wrong/previous
		 * pCPU.  But if that happens the vCPU is guaranteed to do a
		 * VMRUN (after being migrated) and thus will process pending
		 * interrupts, i.e. a doorbell is not needed (and the spurious
		 * one is harmless).
		 */
		if (cpu != get_cpu())
			wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
		put_cpu();
	} else {
		/*
		 * Wake the vCPU if it was blocking.  KVM will then detect the
		 * pending IRQ when checking if the vCPU has a wake event.
		 */
		kvm_vcpu_wake_up(vcpu);
	}

	return 0;
}

bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
	return false;
+41 −7
Original line number Diff line number Diff line
@@ -3311,19 +3311,53 @@ static void svm_inject_irq(struct kvm_vcpu *vcpu)
		SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}

static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
				     int trig_mode, int vector)
{
	struct kvm_vcpu *vcpu = apic->vcpu;
	/*
	 * vcpu->arch.apicv_active must be read after vcpu->mode.
	 * Pairs with smp_store_release in vcpu_enter_guest.
	 */
	bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);

	if (svm_deliver_avic_intr(vcpu, vector)) {
		kvm_lapic_set_irr(vector, apic);
	if (!READ_ONCE(vcpu->arch.apicv_active)) {
		/* Process the interrupt via inject_pending_event */
		kvm_make_request(KVM_REQ_EVENT, vcpu);
		kvm_vcpu_kick(vcpu);
		return;
	}

	trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
	if (in_guest_mode) {
		/*
		 * Signal the doorbell to tell hardware to inject the IRQ.  If
		 * the vCPU exits the guest before the doorbell chimes, hardware
		 * will automatically process AVIC interrupts at the next VMRUN.
		 */
		avic_ring_doorbell(vcpu);
	} else {
		trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
					   trig_mode, vector);
		/*
		 * Wake the vCPU if it was blocking.  KVM will then detect the
		 * pending IRQ when checking if the vCPU has a wake event.
		 */
		kvm_vcpu_wake_up(vcpu);
	}
}

static void svm_deliver_interrupt(struct kvm_lapic *apic,  int delivery_mode,
				  int trig_mode, int vector)
{
	kvm_lapic_set_irr(vector, apic);

	/*
	 * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
	 * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
	 * the read of guest_mode.  This guarantees that either VMRUN will see
	 * and process the new vIRR entry, or that svm_complete_interrupt_delivery
	 * will signal the doorbell if the CPU has already entered the guest.
	 */
	smp_mb__after_atomic();
	svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
}

static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+3 −1
Original line number Diff line number Diff line
@@ -499,6 +499,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
			  int read, int write);
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
				     int trig_mode, int vec);

/* nested.c */

@@ -582,12 +584,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
		       uint32_t guest_irq, bool set);
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
void avic_ring_doorbell(struct kvm_vcpu *vcpu);

/* sev.c */

Loading