Commit 94e95d58 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull virtio fixes from Michael Tsirkin:
 "Fixes in virtio, vhost, and vdpa drivers"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost:
  vdpa/mlx5: Fix queue type selection logic
  vdpa/mlx5: Avoid destroying MR on empty iotlb
  tools/virtio: fix build
  virtio_ring: pull in spinlock header
  vringh: pull in spinlock header
  virtio-blk: Add validation for block size in config space
  vringh: Use wiov->used to check for read/write desc order
  virtio_vdpa: reject invalid vq indices
  vdpa: Add documentation for vdpa_alloc_device() macro
  vDPA/ifcvf: Fix return value check for vdpa_alloc_device()
  vp_vdpa: Fix return value check for vdpa_alloc_device()
  vdpa_sim: Fix return value check for vdpa_alloc_device()
  vhost: Fix the calculation in vhost_overflow()
  vhost-vdpa: Fix integer overflow in vhost_vdpa_process_iotlb_update()
  virtio_pci: Support surprise removal of virtio pci device
  virtio: Protect vqs list access
  virtio: Keep vring_del_virtqueue() mirror of VQ create
  virtio: Improve vq->broken access to avoid any compiler optimization
parents 7c60610d 879753c8
Loading
Loading
Loading
Loading
+33 −6
Original line number Diff line number Diff line
@@ -692,6 +692,28 @@ static const struct blk_mq_ops virtio_mq_ops = {
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);

static int virtblk_validate(struct virtio_device *vdev)
{
	u32 blk_size;

	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

	if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
		return 0;

	blk_size = virtio_cread32(vdev,
			offsetof(struct virtio_blk_config, blk_size));

	if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
		__virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);

	return 0;
}

static int virtblk_probe(struct virtio_device *vdev)
{
	struct virtio_blk *vblk;
@@ -703,12 +725,6 @@ static int virtblk_probe(struct virtio_device *vdev)
	u8 physical_block_exp, alignment_offset;
	unsigned int queue_depth;

	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
@@ -823,6 +839,14 @@ static int virtblk_probe(struct virtio_device *vdev)
	else
		blk_size = queue_logical_block_size(q);

	if (unlikely(blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)) {
		dev_err(&vdev->dev,
			"block size is changed unexpectedly, now is %u\n",
			blk_size);
		err = -EINVAL;
		goto err_cleanup_disk;
	}

	/* Use topology information if available */
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
@@ -881,6 +905,8 @@ static int virtblk_probe(struct virtio_device *vdev)
	device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
	return 0;

err_cleanup_disk:
	blk_cleanup_disk(vblk->disk);
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
out_free_vq:
@@ -983,6 +1009,7 @@ static struct virtio_driver virtio_blk = {
	.driver.name			= KBUILD_MODNAME,
	.driver.owner			= THIS_MODULE,
	.id_table			= id_table,
	.validate			= virtblk_validate,
	.probe				= virtblk_probe,
	.remove				= virtblk_remove,
	.config_changed			= virtblk_config_changed,
+2 −2
Original line number Diff line number Diff line
@@ -493,9 +493,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)

	adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
				    dev, &ifc_vdpa_ops, NULL);
	if (adapter == NULL) {
	if (IS_ERR(adapter)) {
		IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
		return -ENOMEM;
		return PTR_ERR(adapter);
	}

	pci_set_master(pdev);
+0 −9
Original line number Diff line number Diff line
@@ -512,11 +512,6 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
	mutex_unlock(&mr->mkey_mtx);
}

static bool map_empty(struct vhost_iotlb *iotlb)
{
	return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX);
}

int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
			     bool *change_map)
{
@@ -524,10 +519,6 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
	int err = 0;

	*change_map = false;
	if (map_empty(iotlb)) {
		mlx5_vdpa_destroy_mr(mvdev);
		return 0;
	}
	mutex_lock(&mr->mkey_mtx);
	if (mr->initialized) {
		mlx5_vdpa_info(mvdev, "memory map update\n");
+10 −4
Original line number Diff line number Diff line
@@ -752,12 +752,12 @@ static int get_queue_type(struct mlx5_vdpa_net *ndev)
	type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type);

	/* prefer split queue */
	if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED)
		return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
	if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)
		return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;

	WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT));
	WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED));

	return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT;
	return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED;
}

static bool vq_is_tx(u16 idx)
@@ -2029,6 +2029,12 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
		return -ENOSPC;

	mdev = mgtdev->madev->mdev;
	if (!(MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_queue_type) &
	    MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)) {
		dev_warn(mdev->device, "missing support for split virtqueues\n");
		return -EOPNOTSUPP;
	}

	/* we save one virtqueue for control virtqueue should we require it */
	max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
	max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+3 −1
Original line number Diff line number Diff line
@@ -251,8 +251,10 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)

	vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
				    dev_attr->name);
	if (!vdpasim)
	if (IS_ERR(vdpasim)) {
		ret = PTR_ERR(vdpasim);
		goto err_alloc;
	}

	vdpasim->dev_attr = *dev_attr;
	INIT_WORK(&vdpasim->work, dev_attr->work_fn);
Loading