Commit 9b4742a6 authored by Eric Auger's avatar Eric Auger Committed by Zheng Zengkai
Browse files

vfio/pci: Inject page response upon response region fill

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I401IF


CVE: NA

------------------------------

When the userspace increments the head of the page response
buffer ring, let's push the response into the iommu layer.
This is done through a workqueue that pops the responses from
the ring buffer and increment the tail.

Signed-off-by: default avatarEric Auger <eric.auger@redhat.com>
Signed-off-by: default avatarKunkun <Jiang&lt;jiangkunkun@huawei.com>
Reviewed-by: default avatarKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent cbbf4b3a
Loading
Loading
Loading
Loading
+40 −0
Original line number Diff line number Diff line
@@ -552,6 +552,32 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
	return ret;
}

static void dma_response_inject(struct work_struct *work)
{
	struct vfio_pci_dma_fault_response_work *rwork =
		container_of(work, struct vfio_pci_dma_fault_response_work, inject);
	struct vfio_region_dma_fault_response *header = rwork->header;
	struct vfio_pci_device *vdev = rwork->vdev;
	struct iommu_page_response *resp;
	u32 tail, head, size;

	mutex_lock(&vdev->fault_response_queue_lock);

	tail = header->tail;
	head = header->head;
	size = header->nb_entries;

	while (CIRC_CNT(head, tail, size) >= 1) {
		resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
						tail * header->entry_size);

		/* TODO: properly handle the return value */
		iommu_page_response(&vdev->pdev->dev, resp);
		header->tail = tail = (tail + 1) % size;
	}
	mutex_unlock(&vdev->fault_response_queue_lock);
}

#define DMA_FAULT_RESPONSE_RING_LENGTH 512

static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
@@ -597,8 +623,22 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
	header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
	header->offset = PAGE_SIZE;

	vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
	if (!vdev->response_work)
		goto out;
	vdev->response_work->header = header;
	vdev->response_work->vdev = vdev;

	/* launch the thread that will extract the response */
	INIT_WORK(&vdev->response_work->inject, dma_response_inject);
	vdev->dma_fault_response_wq =
		create_singlethread_workqueue("vfio-dma-fault-response");
	if (!vdev->dma_fault_response_wq)
		return -ENOMEM;

	return 0;
out:
	kfree(vdev->fault_response_pages);
	vdev->fault_response_pages = NULL;
	return ret;
}
+7 −0
Original line number Diff line number Diff line
@@ -52,6 +52,12 @@ struct vfio_pci_irq_ctx {
	struct irq_bypass_producer	producer;
};

struct vfio_pci_dma_fault_response_work {
	struct work_struct inject;
	struct vfio_region_dma_fault_response *header;
	struct vfio_pci_device *vdev;
};

struct vfio_pci_device;
struct vfio_pci_region;

@@ -146,6 +152,7 @@ struct vfio_pci_device {
	u8			*fault_pages;
	u8			*fault_response_pages;
	struct workqueue_struct *dma_fault_response_wq;
	struct vfio_pci_dma_fault_response_work *response_work;
	struct mutex		fault_queue_lock;
	struct mutex		fault_response_queue_lock;
	struct list_head	dummy_resources_list;
+1 −0
Original line number Diff line number Diff line
@@ -430,6 +430,7 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
		mutex_lock(&vdev->fault_response_queue_lock);
		header->head = new_head;
		mutex_unlock(&vdev->fault_response_queue_lock);
		queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
	} else {
		if (copy_to_user(buf, base + pos, count))
			return -EFAULT;