Commit 8fcd20c3 authored by Eli Cohen's avatar Eli Cohen Committed by Michael S. Tsirkin
Browse files

vdpa/mlx5: Support different address spaces for control and data



Partition virtqueues to two different address spaces: one for control
virtqueue which is implemented in software, and one for data virtqueues.

Based-on: <20220526124338.36247-1-eperezma@redhat.com>
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Message-Id: <20220714113927.85729-3-elic@nvidia.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent cae15c2e
Loading
Loading
Loading
Loading
+11 −0
Original line number Diff line number Diff line
@@ -70,6 +70,16 @@ struct mlx5_vdpa_wq_ent {
	struct mlx5_vdpa_dev *mvdev;
};

enum {
	MLX5_VDPA_DATAVQ_GROUP,
	MLX5_VDPA_CVQ_GROUP,
	MLX5_VDPA_NUMVQ_GROUPS
};

enum {
	MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
};

struct mlx5_vdpa_dev {
	struct vdpa_device vdev;
	struct mlx5_core_dev *mdev;
@@ -85,6 +95,7 @@ struct mlx5_vdpa_dev {
	struct mlx5_vdpa_mr mr;
	struct mlx5_control_vq cvq;
	struct workqueue_struct *wq;
	unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
};

int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
+77 −11
Original line number Diff line number Diff line
@@ -2127,9 +2127,14 @@ static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev)
	return PAGE_SIZE;
}

static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
{
	return 0;
	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);

	if (is_ctrl_vq_idx(mvdev, idx))
		return MLX5_VDPA_CVQ_GROUP;

	return MLX5_VDPA_DATAVQ_GROUP;
}

enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9,
@@ -2543,6 +2548,15 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
	up_write(&ndev->reslock);
}

static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
{
	int i;

	/* default mapping all groups are mapped to asid 0 */
	for (i = 0; i < MLX5_VDPA_NUMVQ_GROUPS; i++)
		mvdev->group2asid[i] = 0;
}

static int mlx5_vdpa_reset(struct vdpa_device *vdev)
{
	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2561,7 +2575,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
	ndev->mvdev.cvq.completed_desc = 0;
	memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
	ndev->mvdev.actual_features = 0;
	init_group_to_asid_map(mvdev);
	++mvdev->generation;

	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
		if (mlx5_vdpa_create_mr(mvdev, NULL))
			mlx5_vdpa_warn(mvdev, "create MR failed\n");
@@ -2599,26 +2615,63 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
	return mvdev->generation;
}

static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
			     struct vhost_iotlb *iotlb)
static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
	u64 start = 0ULL, last = 0ULL - 1;
	struct vhost_iotlb_map *map;
	int err = 0;

	spin_lock(&mvdev->cvq.iommu_lock);
	vhost_iotlb_reset(mvdev->cvq.iotlb);

	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
	     map = vhost_iotlb_itree_next(map, start, last)) {
		err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
					    map->last, map->addr, map->perm);
		if (err)
			goto out;
	}

out:
	spin_unlock(&mvdev->cvq.iommu_lock);
	return err;
}

static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
	bool change_map;
	int err;

	down_write(&ndev->reslock);

	err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
	if (err) {
		mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
		goto err;
		return err;
	}

	if (change_map)
		err = mlx5_vdpa_change_map(mvdev, iotlb);

err:
	return err;
}

static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
			     struct vhost_iotlb *iotlb)
{
	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
	int err;

	down_write(&ndev->reslock);
	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
		err = set_map_data(mvdev, iotlb);
		if (err)
			goto out;
	}

	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
		err = set_map_control(mvdev, iotlb);

out:
	up_write(&ndev->reslock);
	return err;
}
@@ -2796,6 +2849,18 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
	return 0;
}

static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
			       unsigned int asid)
{
	struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);

	if (group >= MLX5_VDPA_NUMVQ_GROUPS)
		return -EINVAL;

	mvdev->group2asid[group] = asid;
	return 0;
}

static const struct vdpa_config_ops mlx5_vdpa_ops = {
	.set_vq_address = mlx5_vdpa_set_vq_address,
	.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2825,6 +2890,7 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
	.set_config = mlx5_vdpa_set_config,
	.get_generation = mlx5_vdpa_get_generation,
	.set_map = mlx5_vdpa_set_map,
	.set_group_asid = mlx5_set_group_asid,
	.free = mlx5_vdpa_free,
	.suspend = mlx5_vdpa_suspend,
};
@@ -3055,7 +3121,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
	}

	ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
				 1, 1, name, false);
				 MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
	if (IS_ERR(ndev))
		return PTR_ERR(ndev);