Commit 22af48cf authored by Xie Yongji's avatar Xie Yongji Committed by Michael S. Tsirkin
Browse files

vdpa: factor out vhost_vdpa_pa_map() and vhost_vdpa_pa_unmap()



The upcoming patch is going to support VA mapping/unmapping.
So let's factor out the logic of PA mapping/unmapping firstly
to make the code more readable.

Suggested-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarXie Yongji <xieyongji@bytedance.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20210831103634.33-10-xieyongji@bytedance.com


Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent c10fb945
Loading
Loading
Loading
Loading
+35 −20
Original line number Diff line number Diff line
@@ -504,7 +504,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
	return r;
}

static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{
	struct vhost_dev *dev = &v->vdev;
	struct vhost_iotlb *iotlb = dev->iotlb;
@@ -526,6 +526,11 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
	}
}

static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
{
	return vhost_vdpa_pa_unmap(v, start, last);
}

static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{
	struct vhost_dev *dev = &v->vdev;
@@ -606,38 +611,28 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
	}
}

static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
					   struct vhost_iotlb_msg *msg)
static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
			     u64 iova, u64 size, u64 uaddr, u32 perm)
{
	struct vhost_dev *dev = &v->vdev;
	struct vhost_iotlb *iotlb = dev->iotlb;
	struct page **page_list;
	unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
	unsigned int gup_flags = FOLL_LONGTERM;
	unsigned long npages, cur_base, map_pfn, last_pfn = 0;
	unsigned long lock_limit, sz2pin, nchunks, i;
	u64 iova = msg->iova;
	u64 start = iova;
	long pinned;
	int ret = 0;

	if (msg->iova < v->range.first || !msg->size ||
	    msg->iova > U64_MAX - msg->size + 1 ||
	    msg->iova + msg->size - 1 > v->range.last)
		return -EINVAL;

	if (vhost_iotlb_itree_first(iotlb, msg->iova,
				    msg->iova + msg->size - 1))
		return -EEXIST;

	/* Limit the use of memory for bookkeeping */
	page_list = (struct page **) __get_free_page(GFP_KERNEL);
	if (!page_list)
		return -ENOMEM;

	if (msg->perm & VHOST_ACCESS_WO)
	if (perm & VHOST_ACCESS_WO)
		gup_flags |= FOLL_WRITE;

	npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
	npages = PFN_UP(size + (iova & ~PAGE_MASK));
	if (!npages) {
		ret = -EINVAL;
		goto free;
@@ -651,7 +646,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
		goto unlock;
	}

	cur_base = msg->uaddr & PAGE_MASK;
	cur_base = uaddr & PAGE_MASK;
	iova &= PAGE_MASK;
	nchunks = 0;

@@ -682,7 +677,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
				csize = PFN_PHYS(last_pfn - map_pfn + 1);
				ret = vhost_vdpa_map(v, iova, csize,
						     PFN_PHYS(map_pfn),
						     msg->perm);
						     perm);
				if (ret) {
					/*
					 * Unpin the pages that are left unmapped
@@ -711,7 +706,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,

	/* Pin the rest chunk */
	ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
			     PFN_PHYS(map_pfn), msg->perm);
			     PFN_PHYS(map_pfn), perm);
out:
	if (ret) {
		if (nchunks) {
@@ -730,13 +725,33 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
				unpin_user_page(pfn_to_page(pfn));
		}
		vhost_vdpa_unmap(v, msg->iova, msg->size);
		vhost_vdpa_unmap(v, start, size);
	}
unlock:
	mmap_read_unlock(dev->mm);
free:
	free_page((unsigned long)page_list);
	return ret;

}

static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
					   struct vhost_iotlb_msg *msg)
{
	struct vhost_dev *dev = &v->vdev;
	struct vhost_iotlb *iotlb = dev->iotlb;

	if (msg->iova < v->range.first || !msg->size ||
	    msg->iova > U64_MAX - msg->size + 1 ||
	    msg->iova + msg->size - 1 > v->range.last)
		return -EINVAL;

	if (vhost_iotlb_itree_first(iotlb, msg->iova,
				    msg->iova + msg->size - 1))
		return -EEXIST;

	return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
				 msg->perm);
}

static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,