Commit e7a1d5ef authored by Kunkun Jiang's avatar Kunkun Jiang Committed by Zheng Zengkai
Browse files

Revert "vfio/pci: Inject page response upon response region fill"

virt inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I61SPO


CVE: NA

--------------------------------

This reverts commit 9b4742a6.

Signed-off-by: default avatarKunkun Jiang <jiangkunkun@huawei.com>
Reviewed-by: default avatarKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent abdedd4b
Loading
Loading
Loading
Loading
+0 −40
Original line number Diff line number Diff line
@@ -607,32 +607,6 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
	return ret;
}

static void dma_response_inject(struct work_struct *work)
{
	struct vfio_pci_dma_fault_response_work *rwork =
		container_of(work, struct vfio_pci_dma_fault_response_work, inject);
	struct vfio_region_dma_fault_response *header = rwork->header;
	struct vfio_pci_device *vdev = rwork->vdev;
	struct iommu_page_response *resp;
	u32 tail, head, size;

	mutex_lock(&vdev->fault_response_queue_lock);

	tail = header->tail;
	head = header->head;
	size = header->nb_entries;

	while (CIRC_CNT(head, tail, size) >= 1) {
		resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
						tail * header->entry_size);

		/* TODO: properly handle the return value */
		iommu_page_response(&vdev->pdev->dev, resp);
		header->tail = tail = (tail + 1) % size;
	}
	mutex_unlock(&vdev->fault_response_queue_lock);
}

#define DMA_FAULT_RESPONSE_RING_LENGTH 512

static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
@@ -678,22 +652,8 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
	header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
	header->offset = PAGE_SIZE;

	vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
	if (!vdev->response_work)
		goto out;
	vdev->response_work->header = header;
	vdev->response_work->vdev = vdev;

	/* launch the thread that will extract the response */
	INIT_WORK(&vdev->response_work->inject, dma_response_inject);
	vdev->dma_fault_response_wq =
		create_singlethread_workqueue("vfio-dma-fault-response");
	if (!vdev->dma_fault_response_wq)
		return -ENOMEM;

	return 0;
out:
	kfree(vdev->fault_response_pages);
	vdev->fault_response_pages = NULL;
	return ret;
}
+0 −7
Original line number Diff line number Diff line
@@ -52,12 +52,6 @@ struct vfio_pci_irq_ctx {
	struct irq_bypass_producer	producer;
};

struct vfio_pci_dma_fault_response_work {
	struct work_struct inject;
	struct vfio_region_dma_fault_response *header;
	struct vfio_pci_device *vdev;
};

struct vfio_pci_device;
struct vfio_pci_region;

@@ -159,7 +153,6 @@ struct vfio_pci_device {
	u8			*fault_pages;
	u8			*fault_response_pages;
	struct workqueue_struct *dma_fault_response_wq;
	struct vfio_pci_dma_fault_response_work *response_work;
	struct mutex		fault_queue_lock;
	struct mutex		fault_response_queue_lock;
	struct list_head	dummy_resources_list;
+0 −1
Original line number Diff line number Diff line
@@ -440,7 +440,6 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
		mutex_lock(&vdev->fault_response_queue_lock);
		header->head = new_head;
		mutex_unlock(&vdev->fault_response_queue_lock);
		queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
	} else {
		if (copy_to_user(buf, base + pos, count))
			return -EFAULT;