Commit 41c03ba9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull virtio updates from Michael Tsirkin:
 "Mostly fixes all over the place, a couple of cleanups"

* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (32 commits)
  virtio_blk: Fix signedness bug in virtblk_prep_rq()
  vdpa_sim_net: should not drop the multicast/broadcast packet
  vdpasim: fix memory leak when freeing IOTLBs
  vdpa: conditionally fill max max queue pair for stats
  vdpa/vp_vdpa: fix kfree a wrong pointer in vp_vdpa_remove
  vduse: Validate vq_num in vduse_validate_config()
  tools/virtio: remove smp_read_barrier_depends()
  tools/virtio: remove stray characters
  vhost_vdpa: fix the crash in unmap a large memory
  virtio: Implementing attribute show with sysfs_emit
  virtio-crypto: fix memory leak in virtio_crypto_alg_skcipher_close_session()
  tools/virtio: Variable type completion
  vdpa_sim: fix vringh initialization in vdpasim_queue_ready()
  virtio_blk: use UINT_MAX instead of -1U
  vhost-vdpa: fix an iotlb memory leak
  vhost: fix range used in translate_desc()
  vringh: fix range used in iotlb_translate()
  vhost/vsock: Fix error handling in vhost_vsock_init()
  vdpa_sim: fix possible memory leak in vdpasim_net_init() and vdpasim_blk_init()
  tools: Delete the unneeded semicolon after curly braces
  ...
parents 512dee0c a26116c1
Loading
Loading
Loading
Loading
+20 −15
Original line number Diff line number Diff line
@@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
		virtqueue_notify(vq->vq);
}

static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
{
	virtblk_cleanup_cmd(req);
	switch (rc) {
	case -ENOSPC:
		return BLK_STS_DEV_RESOURCE;
	case -ENOMEM:
		return BLK_STS_RESOURCE;
	default:
		return BLK_STS_IOERR;
	}
}

static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
					struct virtio_blk *vblk,
					struct request *req,
					struct virtblk_req *vbr)
{
	blk_status_t status;
	int num;

	status = virtblk_setup_cmd(vblk->vdev, req, vbr);
	if (unlikely(status))
		return status;

	vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
	if (unlikely(vbr->sg_table.nents < 0)) {
		virtblk_cleanup_cmd(req);
		return BLK_STS_RESOURCE;
	}
	num = virtblk_map_data(hctx, req, vbr);
	if (unlikely(num < 0))
		return virtblk_fail_to_queue(req, -ENOMEM);
	vbr->sg_table.nents = num;

	blk_mq_start_request(req);

@@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
		virtblk_unmap_data(req, vbr);
		virtblk_cleanup_cmd(req);
		switch (err) {
		case -ENOSPC:
			return BLK_STS_DEV_RESOURCE;
		case -ENOMEM:
			return BLK_STS_RESOURCE;
		default:
			return BLK_STS_IOERR;
		}
		return virtblk_fail_to_queue(req, err);
	}

	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev)
	blk_queue_max_segments(q, sg_elems);

	/* No real sector limit. */
	blk_queue_max_hw_sectors(q, -1U);
	blk_queue_max_hw_sectors(q, UINT_MAX);

	max_size = virtio_max_dma_size(vdev);

+2 −1
Original line number Diff line number Diff line
@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
		pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
			ctrl_status->status, destroy_session->session_id);

		return -EINVAL;
		err = -EINVAL;
		goto out;
	}

	err = 0;
+3 −2
Original line number Diff line number Diff line
@@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
			  int inlen);
int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
			     bool *change_map);
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
			     bool *change_map, unsigned int asid);
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
			unsigned int asid);
void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);

#define mlx5_vdpa_warn(__dev, format, ...)                                                         \
+26 −20
Original line number Diff line number Diff line
@@ -311,7 +311,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
	u64 st;
	u64 sz;
	int err;
	int i = 0;

	st = start;
	while (size) {
@@ -336,7 +335,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
		mr->num_directs++;
		mr->num_klms++;
		st += sz;
		i++;
	}
	list_splice_tail(&tmp, &mr->head);
	return 0;
@@ -511,7 +509,8 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
	mutex_unlock(&mr->mkey_mtx);
}

static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
				struct vhost_iotlb *iotlb, unsigned int asid)
{
	struct mlx5_vdpa_mr *mr = &mvdev->mr;
	int err;
@@ -519,6 +518,7 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
	if (mr->initialized)
		return 0;

	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
		if (iotlb)
			err = create_user_mr(mvdev, iotlb);
		else
@@ -526,35 +526,41 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb

		if (err)
			return err;
	}

	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
		err = dup_iotlb(mvdev, iotlb);
		if (err)
			goto out_err;
	}

	mr->initialized = true;
	return 0;

out_err:
	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
		if (iotlb)
			destroy_user_mr(mvdev, mr);
		else
			destroy_dma_mr(mvdev, mr);
	}

	return err;
}

int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
			unsigned int asid)
{
	int err;

	mutex_lock(&mvdev->mr.mkey_mtx);
	err = _mlx5_vdpa_create_mr(mvdev, iotlb);
	err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
	mutex_unlock(&mvdev->mr.mkey_mtx);
	return err;
}

int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
			     bool *change_map)
			     bool *change_map, unsigned int asid)
{
	struct mlx5_vdpa_mr *mr = &mvdev->mr;
	int err = 0;
@@ -566,7 +572,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
		*change_map = true;
	}
	if (!*change_map)
		err = _mlx5_vdpa_create_mr(mvdev, iotlb);
		err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
	mutex_unlock(&mr->mkey_mtx);

	return err;
+23 −55
Original line number Diff line number Diff line
@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
	dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
	eth_broadcast_addr(dmac_c);
	ether_addr_copy(dmac_v, mac);
	if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
		MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
	}
	if (tagged) {
		MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
		MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
		MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
	}
	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)

		/* Need recreate the flow table entry, so that the packet could forward back
		 */
		mac_vlan_del(ndev, ndev->config.mac, 0, false);
		mac_vlan_del(ndev, mac_back, 0, false);

		if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
			mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
@@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
	size_t read;
	u16 id;

	if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
		return status;

	switch (cmd) {
	case VIRTIO_NET_CTRL_VLAN_ADD:
		read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
@@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
	}
}

static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
				struct vhost_iotlb *iotlb, unsigned int asid)
{
	struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
	int err;
@@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb

	teardown_driver(ndev);
	mlx5_vdpa_destroy_mr(mvdev);
	err = mlx5_vdpa_create_mr(mvdev, iotlb);
	err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
	if (err)
		goto err_mr;

@@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
	++mvdev->generation;

	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
		if (mlx5_vdpa_create_mr(mvdev, NULL))
		if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
			mlx5_vdpa_warn(mvdev, "create MR failed\n");
	}
	up_write(&ndev->reslock);
@@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
	return mvdev->generation;
}

static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
{
	u64 start = 0ULL, last = 0ULL - 1;
	struct vhost_iotlb_map *map;
	int err = 0;

	spin_lock(&mvdev->cvq.iommu_lock);
	vhost_iotlb_reset(mvdev->cvq.iotlb);

	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
	     map = vhost_iotlb_itree_next(map, start, last)) {
		err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
					    map->last, map->addr, map->perm);
		if (err)
			goto out;
	}

out:
	spin_unlock(&mvdev->cvq.iommu_lock);
	return err;
}

static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
			unsigned int asid)
{
	bool change_map;
	int err;

	err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
	err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
	if (err) {
		mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
		return err;
	}

	if (change_map)
		err = mlx5_vdpa_change_map(mvdev, iotlb);
		err = mlx5_vdpa_change_map(mvdev, iotlb, asid);

	return err;
}
@@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
	int err = -EINVAL;

	down_write(&ndev->reslock);
	if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
		err = set_map_data(mvdev, iotlb);
		if (err)
			goto out;
	}

	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
		err = set_map_control(mvdev, iotlb);

out:
	err = set_map_data(mvdev, iotlb, asid);
	up_write(&ndev->reslock);
	return err;
}
@@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
	int i;

	down_write(&ndev->reslock);
	mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
	ndev->nb_registered = false;
	mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
	flush_workqueue(ndev->mvdev.wq);
	for (i = 0; i < ndev->cur_num_vqs; i++) {
		mvq = &ndev->vqs[i];
@@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work)
	else
		ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);

	if (ndev->config_cb.callback)
	if (ndev->nb_registered && ndev->config_cb.callback)
		ndev->config_cb.callback(ndev->config_cb.private);

	kfree(wqent);
@@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
		switch (eqe->sub_type) {
		case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
		case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
			down_read(&ndev->reslock);
			if (!ndev->nb_registered) {
				up_read(&ndev->reslock);
				return NOTIFY_DONE;
			}
			wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
			if (!wqent) {
				up_read(&ndev->reslock);
			if (!wqent)
				return NOTIFY_DONE;
			}

			wqent->mvdev = &ndev->mvdev;
			INIT_WORK(&wqent->work, update_carrier);
			queue_work(ndev->mvdev.wq, &wqent->work);
			up_read(&ndev->reslock);
			ret = NOTIFY_OK;
			break;
		default:
@@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
		goto err_mpfs;

	if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
		err = mlx5_vdpa_create_mr(mvdev, NULL);
		err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
		if (err)
			goto err_res;
	}
@@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
	struct workqueue_struct *wq;

	if (ndev->nb_registered) {
		mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
		ndev->nb_registered = false;
		mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
	}
	wq = mvdev->wq;
	mvdev->wq = NULL;
Loading