Commit 729ce5a5 authored by Cai Huoqing's avatar Cai Huoqing Committed by Michael S. Tsirkin
Browse files

vdpa: Make use of PFN_PHYS/PFN_UP/PFN_DOWN helper macro



it's a nice refactor to make use of
PFN_PHYS/PFN_UP/PFN_DOWN helper macro

Signed-off-by: default avatarCai Huoqing <caihuoqing@baidu.com>
Link: https://lore.kernel.org/r/20210802013717.851-1-caihuoqing@baidu.com


Acked-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent 0e115c45
Loading
Loading
Loading
Loading
+12 −12
Original line number Original line Diff line number Diff line
@@ -507,15 +507,15 @@ static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
	unsigned long pfn, pinned;
	unsigned long pfn, pinned;


	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
	while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
		pinned = map->size >> PAGE_SHIFT;
		pinned = PFN_DOWN(map->size);
		for (pfn = map->addr >> PAGE_SHIFT;
		for (pfn = PFN_DOWN(map->addr);
		     pinned > 0; pfn++, pinned--) {
		     pinned > 0; pfn++, pinned--) {
			page = pfn_to_page(pfn);
			page = pfn_to_page(pfn);
			if (map->perm & VHOST_ACCESS_WO)
			if (map->perm & VHOST_ACCESS_WO)
				set_page_dirty_lock(page);
				set_page_dirty_lock(page);
			unpin_user_page(page);
			unpin_user_page(page);
		}
		}
		atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
		vhost_iotlb_map_free(iotlb, map);
		vhost_iotlb_map_free(iotlb, map);
	}
	}
}
}
@@ -577,7 +577,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
	if (r)
	if (r)
		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
	else
	else
		atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
		atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);


	return r;
	return r;
}
}
@@ -631,7 +631,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
	if (msg->perm & VHOST_ACCESS_WO)
	if (msg->perm & VHOST_ACCESS_WO)
		gup_flags |= FOLL_WRITE;
		gup_flags |= FOLL_WRITE;


	npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
	npages = PFN_UP(msg->size + (iova & ~PAGE_MASK));
	if (!npages) {
	if (!npages) {
		ret = -EINVAL;
		ret = -EINVAL;
		goto free;
		goto free;
@@ -639,7 +639,7 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,


	mmap_read_lock(dev->mm);
	mmap_read_lock(dev->mm);


	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
	if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
		ret = -ENOMEM;
		ret = -ENOMEM;
		goto unlock;
		goto unlock;
@@ -673,9 +673,9 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,


			if (last_pfn && (this_pfn != last_pfn + 1)) {
			if (last_pfn && (this_pfn != last_pfn + 1)) {
				/* Pin a contiguous chunk of memory */
				/* Pin a contiguous chunk of memory */
				csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
				csize = PFN_PHYS(last_pfn - map_pfn + 1);
				ret = vhost_vdpa_map(v, iova, csize,
				ret = vhost_vdpa_map(v, iova, csize,
						     map_pfn << PAGE_SHIFT,
						     PFN_PHYS(map_pfn),
						     msg->perm);
						     msg->perm);
				if (ret) {
				if (ret) {
					/*
					/*
@@ -699,13 +699,13 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
			last_pfn = this_pfn;
			last_pfn = this_pfn;
		}
		}


		cur_base += pinned << PAGE_SHIFT;
		cur_base += PFN_PHYS(pinned);
		npages -= pinned;
		npages -= pinned;
	}
	}


	/* Pin the rest chunk */
	/* Pin the rest chunk */
	ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
	ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
			     map_pfn << PAGE_SHIFT, msg->perm);
			     PFN_PHYS(map_pfn), msg->perm);
out:
out:
	if (ret) {
	if (ret) {
		if (nchunks) {
		if (nchunks) {
@@ -945,7 +945,7 @@ static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)


	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
	if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
			    notify.addr >> PAGE_SHIFT, PAGE_SIZE,
			    PFN_DOWN(notify.addr), PAGE_SIZE,
			    vma->vm_page_prot))
			    vma->vm_page_prot))
		return VM_FAULT_SIGBUS;
		return VM_FAULT_SIGBUS;