Unverified Commit cf9c77ec authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!6458 CVE-2024-26813

Merge Pull Request from: @ci-robot 
 
PR sync from: Liu Mingrui <liumingrui@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/ZJJP57SVLUWAHYLR2C3VZUM2YOHJPDZ2/ 
CVE-2024-26813

Alex Williamson (2):
  vfio/platform: Disable virqfds on cleanup
  vfio/platform: Create persistent IRQ handlers


-- 
2.25.1
 
https://gitee.com/src-openeuler/kernel/issues/I9E6TQ 
 
Link:https://gitee.com/openeuler/kernel/pulls/6458

 

Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 1507cf0f 8bc2e96f
Loading
Loading
Loading
Loading
+72 −34
Original line number Diff line number Diff line
@@ -136,6 +136,16 @@ static int vfio_platform_set_irq_unmask(struct vfio_platform_device *vdev,
	return 0;
}

/*
 * The trigger eventfd is guaranteed valid in the interrupt path
 * and protected by the igate mutex when triggered via ioctl.
 */
static void vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
{
	if (likely(irq_ctx->trigger))
		eventfd_signal(irq_ctx->trigger, 1);
}

static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
{
	struct vfio_platform_irq *irq_ctx = dev_id;
@@ -155,7 +165,7 @@ static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
	spin_unlock_irqrestore(&irq_ctx->lock, flags);

	if (ret == IRQ_HANDLED)
		eventfd_signal(irq_ctx->trigger, 1);
		vfio_send_eventfd(irq_ctx);

	return ret;
}
@@ -164,22 +174,19 @@ static irqreturn_t vfio_irq_handler(int irq, void *dev_id)
{
	struct vfio_platform_irq *irq_ctx = dev_id;

	eventfd_signal(irq_ctx->trigger, 1);
	vfio_send_eventfd(irq_ctx);

	return IRQ_HANDLED;
}

static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
			    int fd, irq_handler_t handler)
			    int fd)
{
	struct vfio_platform_irq *irq = &vdev->irqs[index];
	struct eventfd_ctx *trigger;
	int ret;

	if (irq->trigger) {
		irq_clear_status_flags(irq->hwirq, IRQ_NOAUTOEN);
		free_irq(irq->hwirq, irq);
		kfree(irq->name);
		disable_irq(irq->hwirq);
		eventfd_ctx_put(irq->trigger);
		irq->trigger = NULL;
	}
@@ -187,29 +194,19 @@ static int vfio_set_trigger(struct vfio_platform_device *vdev, int index,
	if (fd < 0) /* Disable only */
		return 0;

	irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
						irq->hwirq, vdev->name);
	if (!irq->name)
		return -ENOMEM;

	trigger = eventfd_ctx_fdget(fd);
	if (IS_ERR(trigger)) {
		kfree(irq->name);
	if (IS_ERR(trigger))
		return PTR_ERR(trigger);
	}

	irq->trigger = trigger;

	irq_set_status_flags(irq->hwirq, IRQ_NOAUTOEN);
	ret = request_irq(irq->hwirq, handler, 0, irq->name, irq);
	if (ret) {
		kfree(irq->name);
		eventfd_ctx_put(trigger);
		irq->trigger = NULL;
		return ret;
	}

	if (!irq->masked)
	/*
	 * irq->masked effectively provides nested disables within the overall
	 * enable relative to trigger.  Specifically request_irq() is called
	 * with NO_AUTOEN, therefore the IRQ is initially disabled.  The user
	 * may only further disable the IRQ with a MASK operations because
	 * irq->masked is initially false.
	 */
	enable_irq(irq->hwirq);

	return 0;
@@ -229,7 +226,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
		handler = vfio_irq_handler;

	if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
		return vfio_set_trigger(vdev, index, -1, handler);
		return vfio_set_trigger(vdev, index, -1);

	if (start != 0 || count != 1)
		return -EINVAL;
@@ -237,7 +234,7 @@ static int vfio_platform_set_irq_trigger(struct vfio_platform_device *vdev,
	if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
		int32_t fd = *(int32_t *)data;

		return vfio_set_trigger(vdev, index, fd, handler);
		return vfio_set_trigger(vdev, index, fd);
	}

	if (flags & VFIO_IRQ_SET_DATA_NONE) {
@@ -261,6 +258,14 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,
		    unsigned start, unsigned count, uint32_t flags,
		    void *data) = NULL;

	/*
	 * For compatibility, errors from request_irq() are local to the
	 * SET_IRQS path and reflected in the name pointer.  This allows,
	 * for example, polling mode fallback for an exclusive IRQ failure.
	 */
	if (IS_ERR(vdev->irqs[index].name))
		return PTR_ERR(vdev->irqs[index].name);

	switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
	case VFIO_IRQ_SET_ACTION_MASK:
		func = vfio_platform_set_irq_mask;
@@ -281,7 +286,7 @@ int vfio_platform_set_irqs_ioctl(struct vfio_platform_device *vdev,

int vfio_platform_irq_init(struct vfio_platform_device *vdev)
{
	int cnt = 0, i;
	int cnt = 0, i, ret = 0;

	while (vdev->get_irq(vdev, cnt) >= 0)
		cnt++;
@@ -292,37 +297,70 @@ int vfio_platform_irq_init(struct vfio_platform_device *vdev)

	for (i = 0; i < cnt; i++) {
		int hwirq = vdev->get_irq(vdev, i);
		irq_handler_t handler = vfio_irq_handler;

		if (hwirq < 0)
		if (hwirq < 0) {
			ret = -EINVAL;
			goto err;
		}

		spin_lock_init(&vdev->irqs[i].lock);

		vdev->irqs[i].flags = VFIO_IRQ_INFO_EVENTFD;

		if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK)
		if (irq_get_trigger_type(hwirq) & IRQ_TYPE_LEVEL_MASK) {
			vdev->irqs[i].flags |= VFIO_IRQ_INFO_MASKABLE
						| VFIO_IRQ_INFO_AUTOMASKED;
			handler = vfio_automasked_irq_handler;
		}

		vdev->irqs[i].count = 1;
		vdev->irqs[i].hwirq = hwirq;
		vdev->irqs[i].masked = false;
		vdev->irqs[i].name = kasprintf(GFP_KERNEL,
					       "vfio-irq[%d](%s)", hwirq,
					       vdev->name);
		if (!vdev->irqs[i].name) {
			ret = -ENOMEM;
			goto err;
		}

		ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
				  vdev->irqs[i].name, &vdev->irqs[i]);
		if (ret) {
			kfree(vdev->irqs[i].name);
			vdev->irqs[i].name = ERR_PTR(ret);
		}
	}

	vdev->num_irqs = cnt;

	return 0;
err:
	for (--i; i >= 0; i--) {
		if (!IS_ERR(vdev->irqs[i].name)) {
			free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
			kfree(vdev->irqs[i].name);
		}
	}
	kfree(vdev->irqs);
	return -EINVAL;
	return ret;
}

void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
{
	int i;

	for (i = 0; i < vdev->num_irqs; i++)
		vfio_set_trigger(vdev, i, -1, NULL);
	for (i = 0; i < vdev->num_irqs; i++) {
		vfio_virqfd_disable(&vdev->irqs[i].mask);
		vfio_virqfd_disable(&vdev->irqs[i].unmask);
		if (!IS_ERR(vdev->irqs[i].name)) {
			free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
			if (vdev->irqs[i].trigger)
				eventfd_ctx_put(vdev->irqs[i].trigger);
			kfree(vdev->irqs[i].name);
		}
	}

	vdev->num_irqs = 0;
	kfree(vdev->irqs);