Commit e6290032 authored by Marc Zyngier's avatar Marc Zyngier
Browse files

Merge branch 'kvm-arm64/vlpi-save-restore' into kvmarm-master/next



Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents c90aad55 8082d50f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -80,7 +80,7 @@ KVM_DEV_ARM_VGIC_GRP_CTRL
    -EFAULT  Invalid guest ram access
    -EBUSY   One or more VCPUS are running
    -EACCES  The virtual ITS is backed by a physical GICv4 ITS, and the
	     state is not available
	     state is not available without GICv4.1
    =======  ==========================================================

KVM_DEV_ARM_VGIC_GRP_ITS_REGS
+3 −3
Original line number Diff line number Diff line
@@ -2218,10 +2218,10 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
		/*
		 * If an LPI carries the HW bit, this means that this
		 * interrupt is controlled by GICv4, and we do not
		 * have direct access to that state. Let's simply fail
		 * the save operation...
		 * have direct access to that state without GICv4.1.
		 * Let's simply fail the save operation...
		 */
		if (ite->irq->hw)
		if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
			return -EACCES;

		ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
+60 −6
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0-only

#include <linux/irqchip/arm-gic-v3.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
@@ -356,6 +358,32 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
	return 0;
}

/*
 * The deactivation of the doorbell interrupt will trigger the
 * unmapping of the associated vPE.
 */
static void unmap_all_vpes(struct vgic_dist *dist)
{
	struct irq_desc *desc;
	int i;

	for (i = 0; i < dist->its_vm.nr_vpes; i++) {
		desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
		irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
	}
}

static void map_all_vpes(struct vgic_dist *dist)
{
	struct irq_desc *desc;
	int i;

	for (i = 0; i < dist->its_vm.nr_vpes; i++) {
		desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
		irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
	}
}

/**
 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
 * kvm lock and all vcpu lock must be held
@@ -365,13 +393,28 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
	struct vgic_dist *dist = &kvm->arch.vgic;
	struct vgic_irq *irq;
	gpa_t last_ptr = ~(gpa_t)0;
	int ret;
	bool vlpi_avail = false;
	int ret = 0;
	u8 val;

	if (unlikely(!vgic_initialized(kvm)))
		return -ENXIO;

	/*
	 * A preparation for getting any VLPI states.
	 * The above vgic initialized check also ensures that the allocation
	 * and enabling of the doorbells have already been done.
	 */
	if (kvm_vgic_global_state.has_gicv4_1) {
		unmap_all_vpes(dist);
		vlpi_avail = true;
	}

	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
		int byte_offset, bit_nr;
		struct kvm_vcpu *vcpu;
		gpa_t pendbase, ptr;
		bool is_pending;
		bool stored;

		vcpu = irq->target_vcpu;
@@ -387,24 +430,35 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
		if (ptr != last_ptr) {
			ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
			if (ret)
				return ret;
				goto out;
			last_ptr = ptr;
		}

		stored = val & (1U << bit_nr);
		if (stored == irq->pending_latch)

		is_pending = irq->pending_latch;

		if (irq->hw && vlpi_avail)
			vgic_v4_get_vlpi_state(irq, &is_pending);

		if (stored == is_pending)
			continue;

		if (irq->pending_latch)
		if (is_pending)
			val |= 1 << bit_nr;
		else
			val &= ~(1 << bit_nr);

		ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
		if (ret)
			return ret;
			goto out;
	}
	return 0;

out:
	if (vlpi_avail)
		map_all_vpes(dist);

	return ret;
}

/**
+38 −0
Original line number Diff line number Diff line
@@ -203,6 +203,25 @@ void vgic_v4_configure_vsgis(struct kvm *kvm)
	kvm_arm_resume_guest(kvm);
}

/*
 * Must be called with GICv4.1 and the vPE unmapped, which
 * indicates the invalidation of any VPT caches associated
 * with the vPE, thus we can get the VLPI state by peeking
 * at the VPT.
 */
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
{
	struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
	int mask = BIT(irq->intid % BITS_PER_BYTE);
	void *va;
	u8 *ptr;

	va = page_address(vpe->vpt_page);
	ptr = va + irq->intid / BITS_PER_BYTE;

	*val = !!(*ptr & mask);
}

/**
 * vgic_v4_init - Initialize the GICv4 data structures
 * @kvm:	Pointer to the VM being initialized
@@ -385,6 +404,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
	struct vgic_its *its;
	struct vgic_irq *irq;
	struct its_vlpi_map map;
	unsigned long flags;
	int ret;

	if (!vgic_supports_direct_msis(kvm))
@@ -430,6 +450,24 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
	irq->host_irq	= virq;
	atomic_inc(&map.vpe->vlpi_count);

	/* Transfer pending state */
	raw_spin_lock_irqsave(&irq->irq_lock, flags);
	if (irq->pending_latch) {
		ret = irq_set_irqchip_state(irq->host_irq,
					    IRQCHIP_STATE_PENDING,
					    irq->pending_latch);
		WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);

		/*
		 * Clear pending_latch and communicate this state
		 * change via vgic_queue_irq_unlock.
		 */
		irq->pending_latch = false;
		vgic_queue_irq_unlock(kvm, irq, flags);
	} else {
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
	}

out:
	mutex_unlock(&its->its_lock);
	return ret;
+1 −0
Original line number Diff line number Diff line
@@ -318,5 +318,6 @@ bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);

#endif
Loading