Unverified Commit c78bd94b authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!6178 CVE-2024-26812

Merge Pull Request from: @ci-robot 
 
PR sync from: Jinjie Ruan <ruanjinjie@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/PXOG7YHBX53REU4QJSWWLK6CHEITQMXH/ 
CVE-2024-26812

Alex Williamson (2):
  vfio: Introduce interface to flush virqfd inject workqueue
  vfio/pci: Create persistent INTx handler


-- 
2.34.1
 
https://gitee.com/src-openeuler/kernel/issues/I9E6TE 
 
Link:https://gitee.com/openeuler/kernel/pulls/6178

 

Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents bf2976e5 5b58d1ef
Loading
Loading
Loading
Loading
+82 −67
Original line number Diff line number Diff line
@@ -29,8 +29,13 @@ static void vfio_send_intx_eventfd(void *opaque, void *unused)
{
	struct vfio_pci_device *vdev = opaque;

	if (likely(is_intx(vdev) && !vdev->virq_disabled))
		eventfd_signal(vdev->ctx[0].trigger, 1);
	if (likely(is_intx(vdev) && !vdev->virq_disabled)) {
		struct eventfd_ctx *trigger;

		trigger = READ_ONCE(vdev->ctx[0].trigger);
		if (likely(trigger))
			eventfd_signal(trigger, 1);
	}
}

static void __vfio_pci_intx_mask(struct vfio_pci_device *vdev)
@@ -157,98 +162,104 @@ static irqreturn_t vfio_intx_handler(int irq, void *dev_id)
	return ret;
}

static int vfio_intx_enable(struct vfio_pci_device *vdev)
static int vfio_intx_enable(struct vfio_pci_device *vdev,
			    struct eventfd_ctx *trigger)
{
	struct pci_dev *pdev = vdev->pdev;
	unsigned long irqflags;
	char *name;
	int ret;

	if (!is_irq_none(vdev))
		return -EINVAL;

	if (!vdev->pdev->irq)
	if (!pdev->irq)
		return -ENODEV;

	name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev));
	if (!name)
		return -ENOMEM;

	vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
	if (!vdev->ctx)
		return -ENOMEM;

	vdev->num_ctx = 1;

	vdev->ctx[0].name = name;
	vdev->ctx[0].trigger = trigger;

	/*
	 * If the virtual interrupt is masked, restore it.  Devices
	 * supporting DisINTx can be masked at the hardware level
	 * here, non-PCI-2.3 devices will have to wait until the
	 * interrupt is enabled.
	 * Fill the initial masked state based on virq_disabled.  After
	 * enable, changing the DisINTx bit in vconfig directly changes INTx
	 * masking.  igate prevents races during setup, once running masked
	 * is protected via irqlock.
	 *
	 * Devices supporting DisINTx also reflect the current mask state in
	 * the physical DisINTx bit, which is not affected during IRQ setup.
	 *
	 * Devices without DisINTx support require an exclusive interrupt.
	 * IRQ masking is performed at the IRQ chip.  Again, igate protects
	 * against races during setup and IRQ handlers and irqfds are not
	 * yet active, therefore masked is stable and can be used to
	 * conditionally auto-enable the IRQ.
	 *
	 * irq_type must be stable while the IRQ handler is registered,
	 * therefore it must be set before request_irq().
	 */
	vdev->ctx[0].masked = vdev->virq_disabled;
	if (vdev->pci_2_3)
		pci_intx(vdev->pdev, !vdev->ctx[0].masked);
	if (vdev->pci_2_3) {
		pci_intx(pdev, !vdev->ctx[0].masked);
		irqflags = IRQF_SHARED;
	} else {
		irqflags = vdev->ctx[0].masked ? IRQF_NO_AUTOEN : 0;
	}

	vdev->irq_type = VFIO_PCI_INTX_IRQ_INDEX;

	ret = request_irq(pdev->irq, vfio_intx_handler,
			  irqflags, vdev->ctx[0].name, vdev);
	if (ret) {
		vdev->irq_type = VFIO_PCI_NUM_IRQS;
		kfree(name);
		vdev->num_ctx = 0;
		kfree(vdev->ctx);
		return ret;
	}

	return 0;
}

static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
static int vfio_intx_set_signal(struct vfio_pci_device *vdev,
				struct eventfd_ctx *trigger)
{
	struct pci_dev *pdev = vdev->pdev;
	unsigned long irqflags = IRQF_SHARED;
	struct eventfd_ctx *trigger;
	unsigned long flags;
	int ret;
	struct eventfd_ctx *old;

	if (vdev->ctx[0].trigger) {
		free_irq(pdev->irq, vdev);
		kfree(vdev->ctx[0].name);
		eventfd_ctx_put(vdev->ctx[0].trigger);
		vdev->ctx[0].trigger = NULL;
	}
	old = vdev->ctx[0].trigger;

	if (fd < 0) /* Disable only */
		return 0;

	vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)",
				      pci_name(pdev));
	if (!vdev->ctx[0].name)
		return -ENOMEM;
	WRITE_ONCE(vdev->ctx[0].trigger, trigger);

	trigger = eventfd_ctx_fdget(fd);
	if (IS_ERR(trigger)) {
		kfree(vdev->ctx[0].name);
		return PTR_ERR(trigger);
	/* Releasing an old ctx requires synchronizing in-flight users */
	if (old) {
		synchronize_irq(pdev->irq);
		vfio_virqfd_flush_thread(&vdev->ctx[0].unmask);
		eventfd_ctx_put(old);
	}

	vdev->ctx[0].trigger = trigger;

	/*
	 * Devices without DisINTx support require an exclusive interrupt,
	 * IRQ masking is performed at the IRQ chip.  The masked status is
	 * protected by vdev->irqlock. Setup the IRQ without auto-enable and
	 * unmask as necessary below under lock.  DisINTx is unmodified by
	 * the IRQ configuration and may therefore use auto-enable.
	 */
	if (!vdev->pci_2_3)
		irqflags = IRQF_NO_AUTOEN;

	ret = request_irq(pdev->irq, vfio_intx_handler,
			  irqflags, vdev->ctx[0].name, vdev);
	if (ret) {
		vdev->ctx[0].trigger = NULL;
		kfree(vdev->ctx[0].name);
		eventfd_ctx_put(trigger);
		return ret;
	}

	spin_lock_irqsave(&vdev->irqlock, flags);
	if (!vdev->pci_2_3 && !vdev->ctx[0].masked)
		enable_irq(pdev->irq);
	spin_unlock_irqrestore(&vdev->irqlock, flags);

	return 0;
}

static void vfio_intx_disable(struct vfio_pci_device *vdev)
{
	struct pci_dev *pdev = vdev->pdev;

	vfio_virqfd_disable(&vdev->ctx[0].unmask);
	vfio_virqfd_disable(&vdev->ctx[0].mask);
	vfio_intx_set_signal(vdev, -1);
	free_irq(pdev->irq, vdev);
	if (vdev->ctx[0].trigger)
		eventfd_ctx_put(vdev->ctx[0].trigger);
	kfree(vdev->ctx[0].name);
	vdev->irq_type = VFIO_PCI_NUM_IRQS;
	vdev->num_ctx = 0;
	kfree(vdev->ctx);
@@ -498,19 +509,23 @@ static int vfio_pci_set_intx_trigger(struct vfio_pci_device *vdev,
		return -EINVAL;

	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
		struct eventfd_ctx *trigger = NULL;
		int32_t fd = *(int32_t *)data;
		int ret;

		if (is_intx(vdev))
			return vfio_intx_set_signal(vdev, fd);
		if (fd >= 0) {
			trigger = eventfd_ctx_fdget(fd);
			if (IS_ERR(trigger))
				return PTR_ERR(trigger);
		}

		ret = vfio_intx_enable(vdev);
		if (ret)
			return ret;
		if (is_intx(vdev))
			ret = vfio_intx_set_signal(vdev, trigger);
		else
			ret = vfio_intx_enable(vdev, trigger);

		ret = vfio_intx_set_signal(vdev, fd);
		if (ret)
			vfio_intx_disable(vdev);
		if (ret && trigger)
			eventfd_ctx_put(trigger);

		return ret;
	}
+21 −0
Original line number Diff line number Diff line
@@ -101,6 +101,13 @@ static void virqfd_inject(struct work_struct *work)
		virqfd->thread(virqfd->opaque, virqfd->data);
}

static void virqfd_flush_inject(struct work_struct *work)
{
	struct virqfd *virqfd = container_of(work, struct virqfd, flush_inject);

	flush_work(&virqfd->inject);
}

int vfio_virqfd_enable(void *opaque,
		       int (*handler)(void *, void *),
		       void (*thread)(void *, void *),
@@ -124,6 +131,7 @@ int vfio_virqfd_enable(void *opaque,

	INIT_WORK(&virqfd->shutdown, virqfd_shutdown);
	INIT_WORK(&virqfd->inject, virqfd_inject);
	INIT_WORK(&virqfd->flush_inject, virqfd_flush_inject);

	irqfd = fdget(fd);
	if (!irqfd.file) {
@@ -214,6 +222,19 @@ void vfio_virqfd_disable(struct virqfd **pvirqfd)
}
EXPORT_SYMBOL_GPL(vfio_virqfd_disable);

void vfio_virqfd_flush_thread(struct virqfd **pvirqfd)
{
	unsigned long flags;

	spin_lock_irqsave(&virqfd_lock, flags);
	if (*pvirqfd && (*pvirqfd)->thread)
		queue_work(vfio_irqfd_cleanup_wq, &(*pvirqfd)->flush_inject);
	spin_unlock_irqrestore(&virqfd_lock, flags);

	flush_workqueue(vfio_irqfd_cleanup_wq);
}
EXPORT_SYMBOL_GPL(vfio_virqfd_flush_thread);

module_init(vfio_virqfd_init);
module_exit(vfio_virqfd_exit);

+2 −0
Original line number Diff line number Diff line
@@ -225,6 +225,7 @@ struct virqfd {
	wait_queue_entry_t		wait;
	poll_table		pt;
	struct work_struct	shutdown;
	struct work_struct	flush_inject;
	struct virqfd		**pvirqfd;
};

@@ -233,6 +234,7 @@ extern int vfio_virqfd_enable(void *opaque,
			      void (*thread)(void *, void *),
			      void *data, struct virqfd **pvirqfd, int fd);
extern void vfio_virqfd_disable(struct virqfd **pvirqfd);
void vfio_virqfd_flush_thread(struct virqfd **pvirqfd);

extern int vfio_pci_num_regions(void *device_data);
extern struct pci_dev *vfio_pci_pdev(void *device_data);