Commit ac16d334 authored by Eric Auger's avatar Eric Auger Committed by Zheng Zengkai
Browse files

vfio: VFIO_IOMMU_SET_MSI_BINDING

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I401IF


CVE: NA

------------------------------

This patch adds the VFIO_IOMMU_SET_MSI_BINDING ioctl which aim
to (un)register the guest MSI binding to the host. This latter
then can use those stage 1 bindings to build a nested stage
binding targeting the physical MSIs.

Signed-off-by: default avatarEric Auger <eric.auger@redhat.com>
Signed-off-by: default avatarKunkun <Jiang&lt;jiangkunkun@huawei.com>
Reviewed-by: default avatarKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 04ba12c4
Loading
Loading
Loading
Loading
+62 −0
Original line number Diff line number Diff line
@@ -3088,6 +3088,41 @@ static int vfio_cache_inv_fn(struct device *dev, void *data)
	return iommu_uapi_cache_invalidate(dc->domain, dev, (void __user *)arg);
}

static int
vfio_bind_msi(struct vfio_iommu *iommu,
	      dma_addr_t giova, phys_addr_t gpa, size_t size)
{
	struct vfio_domain *d;
	int ret = 0;

	mutex_lock(&iommu->lock);

	list_for_each_entry(d, &iommu->domain_list, next) {
		ret = iommu_bind_guest_msi(d->domain, giova, gpa, size);
		if (ret)
			goto unwind;
	}
	goto unlock;
unwind:
	list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
		iommu_unbind_guest_msi(d->domain, giova);
	}
unlock:
	mutex_unlock(&iommu->lock);
	return ret;
}

static void
vfio_unbind_msi(struct vfio_iommu *iommu, dma_addr_t giova)
{
	struct vfio_domain *d;

	mutex_lock(&iommu->lock);
	list_for_each_entry(d, &iommu->domain_list, next)
		iommu_unbind_guest_msi(d->domain, giova);
	mutex_unlock(&iommu->lock);
}

static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
					   struct vfio_info_cap *caps)
{
@@ -3285,6 +3320,31 @@ static int vfio_iommu_type1_cache_invalidate(struct vfio_iommu *iommu,
	return ret;
}

static int vfio_iommu_type1_set_msi_binding(struct vfio_iommu *iommu,
					    unsigned long arg)
{
	struct vfio_iommu_type1_set_msi_binding msi_binding;
	unsigned long minsz;

	minsz = offsetofend(struct vfio_iommu_type1_set_msi_binding,
			    size);

	if (copy_from_user(&msi_binding, (void __user *)arg, minsz))
		return -EFAULT;

	if (msi_binding.argsz < minsz)
		return -EINVAL;

	if (msi_binding.flags == VFIO_IOMMU_UNBIND_MSI) {
		vfio_unbind_msi(iommu, msi_binding.iova);
		return 0;
	} else if (msi_binding.flags == VFIO_IOMMU_BIND_MSI) {
		return vfio_bind_msi(iommu, msi_binding.iova,
				     msi_binding.gpa, msi_binding.size);
	}
	return -EINVAL;
}

static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
					unsigned long arg)
{
@@ -3594,6 +3654,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
		return vfio_iommu_type1_set_pasid_table(iommu, arg);
	case VFIO_IOMMU_CACHE_INVALIDATE:
		return vfio_iommu_type1_cache_invalidate(iommu, arg);
	case VFIO_IOMMU_SET_MSI_BINDING:
		return vfio_iommu_type1_set_msi_binding(iommu, arg);
	default:
		return -ENOTTY;
	}
+20 −0
Original line number Diff line number Diff line
@@ -1323,6 +1323,26 @@ struct vfio_iommu_type1_cache_invalidate {
};
#define VFIO_IOMMU_CACHE_INVALIDATE      _IO(VFIO_TYPE, VFIO_BASE + 19)

/**
 * VFIO_IOMMU_SET_MSI_BINDING - _IOWR(VFIO_TYPE, VFIO_BASE + 20,
 *			struct vfio_iommu_type1_set_msi_binding)
 *
 * Pass a stage 1 MSI doorbell mapping to the host so that this
 * latter can build a nested stage2 mapping. Or conversely tear
 * down a previously bound stage 1 MSI binding.
 */
struct vfio_iommu_type1_set_msi_binding {
	__u32   argsz;
	__u32   flags;
#define VFIO_IOMMU_BIND_MSI	(1 << 0)
#define VFIO_IOMMU_UNBIND_MSI	(1 << 1)
	__u64	iova;	/* MSI guest IOVA */
	/* Fields below are used on BIND */
	__u64	gpa;	/* MSI guest physical address */
	__u64	size;	/* size of stage1 mapping (bytes) */
};
#define VFIO_IOMMU_SET_MSI_BINDING      _IO(VFIO_TYPE, VFIO_BASE + 20)

/* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */

/*