Unverified Commit a7a8b633 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!2756 Introduce some vdpa ops to support vdpa device live migrate

Merge Pull Request from: @ci-robot 
 
PR sync from: Jiang Dongxu <jiangdongxu1@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/6SF626Y2SGOPGX6NZ5RIYSTLR7T3B2IA/ 
From: jiangdongxu <jiangdongxu1@huawei.com>

Patch 1-17: some bugfix and ops intruduced by upstream
Patch 18-19: introduce vdpa device logging ops
Patch 20-21: introduce vdpa device state ops
Patch 22-23: introduce vdpa device migrate state ops
Patch 24: introduce new vhost feature BYTEPMAPLOG
Patch 25: export iommu_get_resv_regions/iommu_set_resv_regions
Patch 26-27: some optimization about vhost-vdpa
Patch 28: add vdpa/vhost-vdpa build config
Patch 29: fix vhost-vdpa compile warnings

Arnaldo Carvalho de Melo (1):
  tools include UAPI: Sync linux/vhost.h with the kernel sources

Cindy Lu (2):
  vhost_vdpa: fix the crash in unmap a large memory

Eugenio Pérez (1):
  vdpa: add get_backend_features vdpa operation

Gautam Dawar (1):
  vhost-vdpa: free iommu domain after last use during cleanup

Greg Kroah-Hartman (1):
  vhost-vdpa: vhost_vdpa_alloc_domain() should be using a const struct
    bus_type *

Jason Gunthorpe (1):
  PCI/IOV: Add pci_iov_vf_id() to get VF index

Sebastien Boeuf (3):
  vdpa: Add resume operation
  vhost-vdpa: Introduce RESUME backend feature bit
  vhost-vdpa: uAPI to resume the device

Shannon Nelson (2):
  vhost_vdpa: tell vqs about the negotiated
  vhost_vdpa: support PACKED when setting-getting vring_base

Shunsuke Mie (1):
  virtio: fix virtio transitional ids

Stefano Garzarella (3):
  vhost-vdpa: fix an iotlb memory leak
  vdpa: add bind_mm/unbind_mm callbacks
  vhost-vdpa: use bind_mm/unbind_mm device callbacks

Zhu Lingshan (1):
  virtio: update virtio id table, add transitional ids

jiangdongxu (12):
  vdpa: add log operations
  vhost-vdpa: add uAPI for logging
  vdpa: add device state operations
  vhost-vdpa: add uAPI for device buffer
  vdpa: add vdpa device migration status ops
  vhost-vdpa: add uAPI for device migration status
  vhost: add VHOST feature VHOST_BACKEND_F_BYTEMAPLOG
  export iommu_get_resv_regions and iommu_set_resv_regions
  vhost-vdpa: Allow transparent MSI IOV
  vhost-vdpa: fix msi irq request err
  arm64: openeuler_defconfig: add VDPA config
  vhost-vdpa: fix compile warnings


-- 
2.27.0
 
https://gitee.com/openeuler/kernel/issues/I86ITO 
 
Link:https://gitee.com/openeuler/kernel/pulls/2756

 

Reviewed-by: default avatarLiu Chao <liuchao173@huawei.com>
Reviewed-by: default avatarKevin Zhu <zhukeqian1@huawei.com>
Reviewed-by: default avatarWeilong Chen <chenweilong@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Acked-by: default avatarXie XiuQi <xiexiuqi@huawei.com>
parents c93a5cd3 eef74c6a
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -5835,13 +5835,17 @@ CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set
CONFIG_VIRTIO_DMA_SHARED_BUFFER=m
# CONFIG_VDPA is not set
CONFIG_VDPA=m
# CONFIG_IFCVF is not set
# CONFIG_MLX5_VDPA_NET is not set
# CONFIG_VP_VDPA is not set
CONFIG_VHOST_IOTLB=m
CONFIG_VHOST=m
CONFIG_VHOST_MENU=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_SCSI=m
CONFIG_VHOST_VSOCK=m
CONFIG_VHOST_VDPA=m
# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set

#
+2 −0
Original line number Diff line number Diff line
@@ -3040,6 +3040,7 @@ void iommu_get_resv_regions(struct device *dev, struct list_head *list)
	if (ops && ops->get_resv_regions)
		ops->get_resv_regions(dev, list);
}
EXPORT_SYMBOL_GPL(iommu_get_resv_regions);

void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
@@ -3048,6 +3049,7 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list)
	if (ops && ops->put_resv_regions)
		ops->put_resv_regions(dev, list);
}
EXPORT_SYMBOL_GPL(iommu_put_resv_regions);

/**
 * generic_iommu_put_resv_regions - Reserved region driver helper
+14 −0
Original line number Diff line number Diff line
@@ -32,6 +32,20 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
		dev->sriov->stride * vf_id) & 0xff;
}

int pci_iov_vf_id(struct pci_dev *dev)
{
	struct pci_dev *pf;

	if (!dev->is_virtfn)
		return -EINVAL;

	pf = pci_physfn(dev);
	return (((dev->bus->number << 8) + dev->devfn) -
		((pf->bus->number << 8) + pf->devfn + pf->sriov->offset)) /
	       pf->sriov->stride;
}
EXPORT_SYMBOL_GPL(pci_iov_vf_id);

/*
 * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may
 * change when NumVFs changes.
+341 −42
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/vdpa.h>
#include <linux/nospec.h>
#include <linux/vhost.h>
#include <linux/dma-iommu.h>

#include "vhost.h"

@@ -49,6 +50,7 @@ struct vhost_vdpa {
	struct completion completion;
	struct vdpa_device *vdpa;
	struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
	struct vhost_iotlb resv_iotlb;
	struct device dev;
	struct cdev cdev;
	atomic_t opened;
@@ -65,6 +67,10 @@ static DEFINE_IDA(vhost_vdpa_ida);

static dev_t vhost_vdpa_major;

static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
				   struct vhost_iotlb *iotlb, u64 start,
				   u64 last, u32 asid);

static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
{
	struct vhost_vdpa_as *as = container_of(iotlb, struct
@@ -135,7 +141,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
		return -EINVAL;

	hlist_del(&as->hash_link);
	vhost_iotlb_reset(&as->iotlb);
	vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
	kfree(as);

	return 0;
@@ -215,6 +221,28 @@ static int vhost_vdpa_reset(struct vhost_vdpa *v)
	return vdpa_reset(vdpa);
}

static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;

	if (!vdpa->use_va || !ops->bind_mm)
		return 0;

	return ops->bind_mm(vdpa, v->vdev.mm);
}

static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;

	if (!vdpa->use_va || !ops->unbind_mm)
		return;

	ops->unbind_mm(vdpa);
}

static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
{
	struct vdpa_device *vdpa = v->vdpa;
@@ -355,6 +383,14 @@ static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
	return ops->suspend;
}

static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;

	return ops->resume;
}

static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
{
	struct vdpa_device *vdpa = v->vdpa;
@@ -369,11 +405,25 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
	return 0;
}

static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;

	if (!ops->get_backend_features)
		return 0;
	else
		return ops->get_backend_features(vdpa);
}

static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	struct vhost_dev *d = &v->vdev;
	u64 actual_features;
	u64 features;
	int i;

	/*
	 * It's not allowed to change the features after they have
@@ -388,6 +438,16 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
	if (vdpa_set_features(vdpa, features))
		return -EINVAL;

	/* let the vqs know what has been configured */
	actual_features = ops->get_driver_features(vdpa);
	for (i = 0; i < d->nvqs; ++i) {
		struct vhost_virtqueue *vq = d->vqs[i];

		mutex_lock(&vq->mutex);
		vq->acked_features = actual_features;
		mutex_unlock(&vq->mutex);
	}

	return 0;
}

@@ -494,6 +554,139 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v)
	return ops->suspend(vdpa);
}

/* After a successful return of this ioctl the device resumes processing
 * virtqueue descriptors. The device becomes fully operational the same way it
 * was before it was suspended.
 */
static long vhost_vdpa_resume(struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;

	if (!ops->resume)
		return -EOPNOTSUPP;

	return ops->resume(vdpa);
}

static int vhost_vdpa_get_dev_buffer_size(struct vhost_vdpa *v,
					  uint32_t __user *argp)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	uint32_t size;

	if (!ops->get_dev_buffer_size)
		return -EOPNOTSUPP;

	size = ops->get_dev_buffer_size(vdpa);

	if (copy_to_user(argp, &size, sizeof(size)))
		return -EFAULT;

	return 0;
}

static int vhost_vdpa_get_dev_buffer(struct vhost_vdpa *v,
				     struct vhost_vdpa_config __user *c)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	struct vhost_vdpa_config config;
	int ret;
	unsigned long size = offsetof(struct vhost_vdpa_config, buf);

	if (copy_from_user(&config, c, size))
		return -EFAULT;

	if (!ops->get_dev_buffer)
		return -EOPNOTSUPP;

	down_read(&vdpa->cf_lock);
	ret = ops->get_dev_buffer(vdpa, config.off, c->buf, config.len);
	up_read(&vdpa->cf_lock);

	return ret;
}

static int vhost_vdpa_set_dev_buffer(struct vhost_vdpa *v,
				     struct vhost_vdpa_config __user *c)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	struct vhost_vdpa_config config;
	int ret;
	unsigned long size = offsetof(struct vhost_vdpa_config, buf);

	if (copy_from_user(&config, c, size))
		return -EFAULT;

	if (!ops->set_dev_buffer)
		return -EOPNOTSUPP;

	down_write(&vdpa->cf_lock);
	ret = ops->set_dev_buffer(vdpa, config.off, c->buf, config.len);
	up_write(&vdpa->cf_lock);

	return ret;
}

static int vhost_vdpa_set_mig_state(struct vhost_vdpa *v, u8 __user *c)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	u8 state;

	if (!ops->set_mig_state)
		return -EOPNOTSUPP;

	if (get_user(state, c))
		return -EFAULT;

	return ops->set_mig_state(vdpa, state);
}

static long vhost_vdpa_set_log_base(struct vhost_vdpa *v, u64 __user *argp)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	u64 log;

	if (!ops->set_log_base)
		return -EOPNOTSUPP;

	if (copy_from_user(&log, argp, sizeof(uint64_t)))
		return -EFAULT;

	return ops->set_log_base(vdpa, log);
}

static long vhost_vdpa_set_log_size(struct vhost_vdpa *v, u64 __user *sizep)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	u64 log_size;

	if (!ops->set_log_size)
		return -EOPNOTSUPP;

	if (copy_from_user(&log_size, sizep, sizeof(log_size)))
		return -EFAULT;

	return ops->set_log_size(vdpa, log_size);
}

static long vhost_vdpa_log_sync(struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;

	if (!ops->log_sync)
		return -EOPNOTSUPP;

	return ops->log_sync(vdpa);
}

static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
				   void __user *argp)
{
@@ -545,7 +738,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
		if (r)
			return r;

		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
			vq->last_avail_idx = vq_state.packed.last_avail_idx |
					     (vq_state.packed.last_avail_counter << 15);
			vq->last_used_idx = vq_state.packed.last_used_idx |
					    (vq_state.packed.last_used_counter << 15);
		} else {
			vq->last_avail_idx = vq_state.split.avail_index;
		}
		break;
	}

@@ -563,9 +763,15 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
		break;

	case VHOST_SET_VRING_BASE:
		if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
			vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
			vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
			vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
			vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
		} else {
			vq_state.split.avail_index = vq->last_avail_idx;
		if (ops->set_vq_state(vdpa, idx, &vq_state))
			r = -EINVAL;
		}
		r = ops->set_vq_state(vdpa, idx, &vq_state);
		break;

	case VHOST_SET_VRING_CALL:
@@ -602,11 +808,16 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
		if (copy_from_user(&features, featurep, sizeof(features)))
			return -EFAULT;
		if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
				 BIT_ULL(VHOST_BACKEND_F_SUSPEND)))
				 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
				 BIT_ULL(VHOST_BACKEND_F_RESUME) |
				 BIT_ULL(VHOST_BACKEND_F_BYTEMAPLOG)))
			return -EOPNOTSUPP;
		if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
		     !vhost_vdpa_can_suspend(v))
			return -EOPNOTSUPP;
		if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
		     !vhost_vdpa_can_resume(v))
			return -EOPNOTSUPP;
		vhost_set_backend_features(&v->vdev, features);
		return 0;
	}
@@ -648,6 +859,14 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
			r = -EFAULT;
		break;
	case VHOST_SET_LOG_BASE:
		r = vhost_vdpa_set_log_base(v, argp);
		break;
	case VHOST_SET_LOG_SIZE:
		r = vhost_vdpa_set_log_size(v, argp);
		break;
	case VHOST_LOG_SYNC:
		r = vhost_vdpa_log_sync(v);
		break;
	case VHOST_SET_LOG_FD:
		r = -ENOIOCTLCMD;
		break;
@@ -658,6 +877,9 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
		features = VHOST_VDPA_BACKEND_FEATURES;
		if (vhost_vdpa_can_suspend(v))
			features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
		if (vhost_vdpa_can_resume(v))
			features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
		features |= vhost_vdpa_get_backend_features(v);
		if (copy_to_user(featurep, &features, sizeof(features)))
			r = -EFAULT;
		break;
@@ -673,6 +895,21 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
	case VHOST_VDPA_SUSPEND:
		r = vhost_vdpa_suspend(v);
		break;
	case VHOST_VDPA_RESUME:
		r = vhost_vdpa_resume(v);
		break;
	case VHOST_GET_DEV_BUFFER_SIZE:
		r = vhost_vdpa_get_dev_buffer_size(v, argp);
		break;
	case VHOST_GET_DEV_BUFFER:
		r = vhost_vdpa_get_dev_buffer(v, argp);
		break;
	case VHOST_SET_DEV_BUFFER:
		r = vhost_vdpa_set_dev_buffer(v, argp);
		break;
	case VHOST_VDPA_SET_MIG_STATE:
		r = vhost_vdpa_set_mig_state(v, argp);
		break;
	default:
		r = vhost_dev_ioctl(&v->vdev, cmd, argp);
		if (r == -ENOIOCTLCMD)
@@ -680,13 +917,34 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
		break;
	}

	if (r)
		goto out;

	switch (cmd) {
	case VHOST_SET_OWNER:
		r = vhost_vdpa_bind_mm(v);
		if (r)
			vhost_dev_reset_owner(d, NULL);
		break;
	}
out:
	mutex_unlock(&d->mutex);
	return r;
}
static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
				     struct vhost_iotlb_map *map, u32 asid)
{
	struct vdpa_device *vdpa = v->vdpa;
	const struct vdpa_config_ops *ops = vdpa->config;
	if (ops->dma_map) {
		ops->dma_unmap(vdpa, asid, map->start, map->size);
	} else if (ops->set_map == NULL) {
		iommu_unmap(v->domain, map->start, map->size);
	}
}

static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
				struct vhost_iotlb *iotlb,
				u64 start, u64 last)
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
				u64 start, u64 last, u32 asid)
{
	struct vhost_dev *dev = &v->vdev;
	struct vhost_iotlb_map *map;
@@ -703,13 +961,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
			unpin_user_page(page);
		}
		atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
		vhost_vdpa_general_unmap(v, map, asid);
		vhost_iotlb_map_free(iotlb, map);
	}
}

static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
				struct vhost_iotlb *iotlb,
				u64 start, u64 last)
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
				u64 start, u64 last, u32 asid)
{
	struct vhost_iotlb_map *map;
	struct vdpa_map_file *map_file;
@@ -718,20 +976,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
		map_file = (struct vdpa_map_file *)map->opaque;
		fput(map_file->file);
		kfree(map_file);
		vhost_vdpa_general_unmap(v, map, asid);
		vhost_iotlb_map_free(iotlb, map);
	}
}

static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
				   struct vhost_iotlb *iotlb,
				   u64 start, u64 last)
				   struct vhost_iotlb *iotlb, u64 start,
				   u64 last, u32 asid)
{
	struct vdpa_device *vdpa = v->vdpa;

	if (vdpa->use_va)
		return vhost_vdpa_va_unmap(v, iotlb, start, last);
		return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);

	return vhost_vdpa_pa_unmap(v, iotlb, start, last);
	return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
}

static int perm_to_iommu_flags(u32 perm)
@@ -798,22 +1057,13 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
	const struct vdpa_config_ops *ops = vdpa->config;
	u32 asid = iotlb_to_asid(iotlb);

	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);

	if (ops->dma_map) {
		ops->dma_unmap(vdpa, asid, iova, size);
	} else if (ops->set_map) {
	if (ops->set_map) {
		if (!v->in_batch)
			ops->set_map(vdpa, asid, iotlb);
	} else {
		iommu_unmap(v->domain, iova, size);
	}

	/* If we are in the middle of batch processing, delay the free
	 * of AS until BATCH_END.
	 */
	if (!v->in_batch && !iotlb->nmaps)
		vhost_vdpa_remove_as(v, asid);
}

static int vhost_vdpa_va_map(struct vhost_vdpa *v,
@@ -1003,6 +1253,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
	    msg->iova + msg->size - 1 > v->range.last)
		return -EINVAL;

	if (vhost_iotlb_itree_first(&v->resv_iotlb, msg->iova,
				    msg->iova + msg->size - 1))
		return -EINVAL;

	if (vhost_iotlb_itree_first(iotlb, msg->iova,
				    msg->iova + msg->size - 1))
		return -EEXIST;
@@ -1070,8 +1324,6 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
		if (v->in_batch && ops->set_map)
			ops->set_map(vdpa, asid, iotlb);
		v->in_batch = false;
		if (!iotlb->nmaps)
			vhost_vdpa_remove_as(v, asid);
		break;
	default:
		r = -EINVAL;
@@ -1093,6 +1345,46 @@ static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
	return vhost_chr_write_iter(dev, from);
}

static int vhost_vdpa_resv_iommu_region(struct iommu_domain *domain, struct device *dma_dev,
	struct vhost_iotlb *resv_iotlb)
{
	struct list_head dev_resv_regions;
	phys_addr_t resv_msi_base = 0;
	struct iommu_resv_region *region;
	int ret = 0;
	bool with_sw_msi = false;
	bool with_hw_msi = false;

	INIT_LIST_HEAD(&dev_resv_regions);
	iommu_get_resv_regions(dma_dev, &dev_resv_regions);

	list_for_each_entry(region, &dev_resv_regions, list) {
		ret = vhost_iotlb_add_range_ctx(resv_iotlb, region->start,
						region->start + region->length - 1,
						0, 0, NULL);
		if (ret) {
			vhost_iotlb_reset(resv_iotlb);
			break;
		}

		if (region->type == IOMMU_RESV_MSI)
			with_hw_msi = true;

		if (region->type == IOMMU_RESV_SW_MSI) {
			resv_msi_base = region->start;
			with_sw_msi = true;
		}

	}

	if (!ret && !with_hw_msi && with_sw_msi)
		ret = iommu_get_msi_cookie(domain, resv_msi_base);

	iommu_put_resv_regions(dma_dev, &dev_resv_regions);

	return ret;
}

static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
{
	struct vdpa_device *vdpa = v->vdpa;
@@ -1118,12 +1410,18 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)

	ret = iommu_attach_device(v->domain, dma_dev);
	if (ret)
		goto err_attach;
		goto err_alloc_domain;

	return 0;
	ret = vhost_vdpa_resv_iommu_region(v->domain, dma_dev, &v->resv_iotlb);
	if (ret)
		goto err_attach_device;

err_attach:
	return 0;
err_attach_device:
	iommu_detach_device(v->domain, dma_dev);
err_alloc_domain:
	iommu_domain_free(v->domain);
	v->domain = NULL;
	return ret;
}

@@ -1166,14 +1464,15 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
	struct vhost_vdpa_as *as;
	u32 asid;

	vhost_dev_cleanup(&v->vdev);
	kfree(v->vdev.vqs);

	for (asid = 0; asid < v->vdpa->nas; asid++) {
		as = asid_to_as(v, asid);
		if (as)
			vhost_vdpa_remove_as(v, asid);
	}

	vhost_vdpa_free_domain(v);
	vhost_dev_cleanup(&v->vdev);
	kfree(v->vdev.vqs);
}

static int vhost_vdpa_open(struct inode *inode, struct file *filep)
@@ -1189,6 +1488,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
	opened = atomic_cmpxchg(&v->opened, 0, 1);
	if (opened)
		return -EBUSY;
	r = vhost_vdpa_alloc_domain(v);
	if (r)
		return r;

	nvqs = v->nvqs;
	r = vhost_vdpa_reset(v);
@@ -1209,19 +1511,14 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
		       vhost_vdpa_process_iotlb_msg);

	r = vhost_vdpa_alloc_domain(v);
	if (r)
		goto err_alloc_domain;

	vhost_vdpa_set_iova_range(v);

	filep->private_data = v;

	return 0;

err_alloc_domain:
	vhost_vdpa_cleanup(v);
err:
	vhost_vdpa_free_domain(v);
	atomic_dec(&v->opened);
	return r;
}
@@ -1244,7 +1541,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep)
	vhost_vdpa_clean_irq(v);
	vhost_vdpa_reset(v);
	vhost_dev_stop(&v->vdev);
	vhost_vdpa_free_domain(v);
	vhost_vdpa_unbind_mm(v);
	vhost_vdpa_config_put(v);
	vhost_vdpa_cleanup(v);
	mutex_unlock(&d->mutex);
@@ -1379,6 +1676,8 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
		goto err;
	}

	vhost_iotlb_init(&v->resv_iotlb, 0, 0);

	r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
	if (r)
		goto err;
+7 −1
Original line number Diff line number Diff line
@@ -2118,7 +2118,7 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar);
#ifdef CONFIG_PCI_IOV
int pci_iov_virtfn_bus(struct pci_dev *dev, int id);
int pci_iov_virtfn_devfn(struct pci_dev *dev, int id);

int pci_iov_vf_id(struct pci_dev *dev);
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);

@@ -2146,6 +2146,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id)
{
	return -ENOSYS;
}

static inline int pci_iov_vf_id(struct pci_dev *dev)
{
	return -ENOSYS;
}

static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }

Loading