Commit 9eb939d9 authored by Zenghui Yu's avatar Zenghui Yu Committed by Dongxu Sun
Browse files

KVM: arm64: GICv4.1: Add direct injection capability to PPI registers

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I8K89F


CVE: NA

------------------------------------------------------------------

Most of the GICv3 emulation code that deals with timer interrupts now
has to be aware of the HiSilicon direct vtimer capabilities in order
to benefit from it.

Add such support, keyed on the interrupt having the non-NULL vtimer_info.
Except for the uaccess save path for ISPENDR0 and ISACTIVER0 registers,
I haven't taken too much care of the userspace save/restore path yet...

Signed-off-by: default avatarZenghui Yu <yuzenghui@huawei.com>
Signed-off-by: default avatarwanghaibin <wanghaibin.wang@huawei.com>
Signed-off-by: default avatarKunkun Jiang <jiangkunkun@huawei.com>
Signed-off-by: default avatarDongxu Sun <sundongxu3@huawei.com>
parent c3680403
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -326,7 +326,7 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		bool state = irq->pending_latch;
		bool state = irq->pending_latch;


		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
		if (vgic_direct_sgi_or_ppi(irq)) {
			int err;
			int err;


			err = irq_get_irqchip_state(irq->host_irq,
			err = irq_get_irqchip_state(irq->host_irq,
+16 −8
Original line number Original line Diff line number Diff line
@@ -78,7 +78,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,


		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		irq->group = !!(val & BIT(i));
		irq->group = !!(val & BIT(i));
		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
		if (vgic_direct_sgi_or_ppi(irq)) {
			vgic_update_vsgi(irq);
			vgic_update_vsgi(irq);
			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
		} else {
		} else {
@@ -125,7 +125,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);


		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
		if (vgic_direct_sgi_or_ppi(irq)) {
			if (!irq->enabled) {
			if (!irq->enabled) {
				struct irq_data *data;
				struct irq_data *data;


@@ -174,7 +174,7 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);


		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
		if (vgic_direct_sgi_or_ppi(irq) && irq->enabled)
			disable_irq_nosync(irq->host_irq);
			disable_irq_nosync(irq->host_irq);


		irq->enabled = false;
		irq->enabled = false;
@@ -241,7 +241,7 @@ static unsigned long __read_pending(struct kvm_vcpu *vcpu,
		bool val;
		bool val;


		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
		if (vgic_direct_sgi_or_ppi(irq)) {
			int err;
			int err;


			val = false;
			val = false;
@@ -301,7 +301,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,


		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);


		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
		if (vgic_direct_sgi_or_ppi(irq)) {
			/* HW SGI? Ask the GIC to inject it */
			/* HW SGI? Ask the GIC to inject it */
			int err;
			int err;
			err = irq_set_irqchip_state(irq->host_irq,
			err = irq_set_irqchip_state(irq->host_irq,
@@ -394,7 +394,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,


		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);


		if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
		if (vgic_direct_sgi_or_ppi(irq)) {
			/* HW SGI? Ask the GIC to clear its pending bit */
			/* HW SGI? Ask the GIC to clear its pending bit */
			int err;
			int err;
			err = irq_set_irqchip_state(irq->host_irq,
			err = irq_set_irqchip_state(irq->host_irq,
@@ -488,12 +488,17 @@ static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
	/* Loop over all IRQs affected by this read */
	/* Loop over all IRQs affected by this read */
	for (i = 0; i < len * 8; i++) {
	for (i = 0; i < len * 8; i++) {
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
		struct vtimer_info *vtimer = irq->vtimer_info;
		bool state = irq->active;

		if (vtimer)
			state = vtimer->get_active_stat(vcpu, irq->intid);


		/*
		/*
		 * Even for HW interrupts, don't evaluate the HW state as
		 * Even for HW interrupts, don't evaluate the HW state as
		 * all the guest is interested in is the virtual state.
		 * all the guest is interested in is the virtual state.
		 */
		 */
		if (irq->active)
		if (state)
			value |= (1U << i);
			value |= (1U << i);


		vgic_put_irq(vcpu->kvm, irq);
		vgic_put_irq(vcpu->kvm, irq);
@@ -553,6 +558,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
		 * do here.
		 * do here.
		 */
		 */
		irq->active = false;
		irq->active = false;
	} else if (irq->vtimer_info) {
		/* MMIO trap only */
		irq->vtimer_info->set_active_stat(vcpu, irq->intid, active);
	} else {
	} else {
		u32 model = vcpu->kvm->arch.vgic.vgic_model;
		u32 model = vcpu->kvm->arch.vgic.vgic_model;
		u8 active_source;
		u8 active_source;
@@ -696,7 +704,7 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		raw_spin_lock_irqsave(&irq->irq_lock, flags);
		/* Narrow the priority range to what we actually support */
		/* Narrow the priority range to what we actually support */
		irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
		irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
		if (irq->hw && vgic_irq_is_sgi(irq->intid))
		if (vgic_direct_sgi_or_ppi(irq))
			vgic_update_vsgi(irq);
			vgic_update_vsgi(irq);
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);


+10 −0
Original line number Original line Diff line number Diff line
@@ -126,6 +126,16 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
	return vgic_irq_get_lr_count(irq) > 1;
	return vgic_irq_get_lr_count(irq) > 1;
}
}


static inline bool vgic_direct_sgi_or_ppi(struct vgic_irq *irq)
{
	bool direct_sgi, direct_ppi;

	direct_sgi = irq->hw && vgic_irq_is_sgi(irq->intid);
	direct_ppi = !!(irq->vtimer_info);

	return direct_sgi || direct_ppi;
}

/*
/*
 * This struct provides an intermediate representation of the fields contained
 * This struct provides an intermediate representation of the fields contained
 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
 * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC