Commit 9e35276a authored by Jason Wang's avatar Jason Wang Committed by Michael S. Tsirkin
Browse files

virtio_pci: harden MSI-X interrupts



We used to synchronize pending MSI-X irq handlers via
synchronize_irq(), this may not work for the untrusted device which
may keep sending interrupts after reset which may lead unexpected
results. Similarly, we should not enable MSI-X interrupt until the
device is ready. So this patch fixes those two issues by:

1) switching to use disable_irq() to prevent the virtio interrupt
   handlers to be called after the device is reset.
2) using IRQF_NO_AUTOEN and enable the MSI-X irq during .ready()

This can make sure the virtio interrupt handler won't be called before
virtio_device_ready() and after reset.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20211019070152.8236-5-jasowang@redhat.com


Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent d50497eb
Loading
Loading
Loading
Loading
+21 −6
Original line number Diff line number Diff line
@@ -24,8 +24,8 @@ MODULE_PARM_DESC(force_legacy,
		 "Force legacy mode for transitional virtio 1 devices");
#endif

/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev)
/* disable irq handlers */
void vp_disable_cbs(struct virtio_device *vdev)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	int i;
@@ -34,7 +34,20 @@ void vp_synchronize_vectors(struct virtio_device *vdev)
		synchronize_irq(vp_dev->pci_dev->irq);

	for (i = 0; i < vp_dev->msix_vectors; ++i)
		synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
		disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
}

/* enable irq handlers */
void vp_enable_cbs(struct virtio_device *vdev)
{
	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
	int i;

	if (vp_dev->intx_enabled)
		return;

	for (i = 0; i < vp_dev->msix_vectors; ++i)
		enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
}

/* the notify function used when creating a virt queue */
@@ -141,7 +154,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
	snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
		 "%s-config", name);
	err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
			  vp_config_changed, 0, vp_dev->msix_names[v],
			  vp_config_changed, IRQF_NO_AUTOEN,
			  vp_dev->msix_names[v],
			  vp_dev);
	if (err)
		goto error;
@@ -160,7 +174,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
		snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
			 "%s-virtqueues", name);
		err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
				  vp_vring_interrupt, 0, vp_dev->msix_names[v],
				  vp_vring_interrupt, IRQF_NO_AUTOEN,
				  vp_dev->msix_names[v],
				  vp_dev);
		if (err)
			goto error;
@@ -337,7 +352,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
			 "%s-%s",
			 dev_name(&vp_dev->vdev.dev), names[i]);
		err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
				  vring_interrupt, 0,
				  vring_interrupt, IRQF_NO_AUTOEN,
				  vp_dev->msix_names[msix_vec],
				  vqs[i]);
		if (err)
+4 −2
Original line number Diff line number Diff line
@@ -101,8 +101,10 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
	return container_of(vdev, struct virtio_pci_device, vdev);
}

/* wait for pending irq handlers */
void vp_synchronize_vectors(struct virtio_device *vdev);
/* disable irq handlers */
void vp_disable_cbs(struct virtio_device *vdev);
/* enable irq handlers */
void vp_enable_cbs(struct virtio_device *vdev);
/* the notify function used when creating a virt queue */
bool vp_notify(struct virtqueue *vq);
/* the config->del_vqs() implementation */
+3 −2
Original line number Diff line number Diff line
@@ -98,8 +98,8 @@ static void vp_reset(struct virtio_device *vdev)
	/* Flush out the status write, and flush in device writes,
	 * including MSi-X interrupts, if any. */
	vp_legacy_get_status(&vp_dev->ldev);
	/* Flush pending VQ/configuration callbacks. */
	vp_synchronize_vectors(vdev);
	/* Disable VQ/configuration callbacks. */
	vp_disable_cbs(vdev);
}

static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -185,6 +185,7 @@ static void del_vq(struct virtio_pci_vq_info *info)
}

static const struct virtio_config_ops virtio_pci_config_ops = {
	.enable_cbs	= vp_enable_cbs,
	.get		= vp_get,
	.set		= vp_set,
	.get_status	= vp_get_status,
+4 −2
Original line number Diff line number Diff line
@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
	 */
	while (vp_modern_get_status(mdev))
		msleep(1);
	/* Flush pending VQ/configuration callbacks. */
	vp_synchronize_vectors(vdev);
	/* Disable VQ/configuration callbacks. */
	vp_disable_cbs(vdev);
}

static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -380,6 +380,7 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
}

static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
	.enable_cbs	= vp_enable_cbs,
	.get		= NULL,
	.set		= NULL,
	.generation	= vp_generation,
@@ -397,6 +398,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
};

static const struct virtio_config_ops virtio_pci_config_ops = {
	.enable_cbs	= vp_enable_cbs,
	.get		= vp_get,
	.set		= vp_set,
	.generation	= vp_generation,