Commit 3a26babb authored by David S. Miller's avatar David S. Miller
Browse files

Merge tag 'mlx5-net-next-5.15-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux



Saeed Mahameed says:

====================
Merge mlx5-next into net-next
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1feef2de 573bce9e
Loading
Loading
Loading
Loading
+26 −0
Original line number Diff line number Diff line
@@ -206,3 +206,29 @@ int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
	kfree(in);
	return err;
}

int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
{
	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
	int err;

	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
	MLX5_SET(alloc_uar_in, in, uid, uid);
	err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
	if (err)
		return err;

	*uarn = MLX5_GET(alloc_uar_out, out, uar);
	return 0;
}

int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
{
	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};

	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
	MLX5_SET(dealloc_uar_in, in, uar, uarn);
	MLX5_SET(dealloc_uar_in, in, uid, uid);
	return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
+2 −0
Original line number Diff line number Diff line
@@ -57,4 +57,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
		     u16 opmod, u8 port);
int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
#endif /* MLX5_IB_CMD_H */
+4 −9
Original line number Diff line number Diff line
@@ -1292,21 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
				     struct mlx5_ib_dev *dev,
				     void *in, void *out)
{
	struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
	struct mlx5_core_mkey *mkey;
	struct mlx5_ib_mkey *mkey = &obj->mkey;
	void *mkc;
	u8 key;

	mkey = &devx_mr->mmkey;
	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
	key = MLX5_GET(mkc, mkc, mkey_7_0);
	mkey->key = mlx5_idx_to_mkey(
			MLX5_GET(create_mkey_out, out, mkey_index)) | key;
	mkey->type = MLX5_MKEY_INDIRECT_DEVX;
	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
	mkey->size = MLX5_GET64(mkc, mkc, len);
	mkey->pd = MLX5_GET(mkc, mkc, pd);
	devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
	mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
	init_waitqueue_head(&mkey->wait);

	return mlx5r_store_odp_mkey(dev, mkey);
@@ -1384,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
	dev = mlx5_udata_to_mdev(&attrs->driver_udata);
	if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
	    xa_erase(&obj->ib_dev->odp_mkeys,
		     mlx5_base_mkey(obj->devx_mr.mmkey.key)))
		     mlx5_base_mkey(obj->mkey.key)))
		/*
		 * The pagefault_single_data_segment() does commands against
		 * the mmkey, we must wait for that to stop before freeing the
		 * mkey, as another allocation could get the same mkey #.
		 */
		mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
		mlx5r_deref_wait_odp_mkey(&obj->mkey);

	if (obj->flags & DEVX_OBJ_FLAGS_DCT)
		ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
+1 −1
Original line number Diff line number Diff line
@@ -16,7 +16,7 @@ struct devx_obj {
	u32			dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
	u32			flags;
	union {
		struct mlx5_ib_devx_mr	devx_mr;
		struct mlx5_ib_mkey	mkey;
		struct mlx5_core_dct	core_dct;
		struct mlx5_core_cq	core_cq;
		u32			flow_counter_bulk_size;
+31 −24
Original line number Diff line number Diff line
@@ -1643,7 +1643,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte

	bfregi = &context->bfregi;
	for (i = 0; i < bfregi->num_static_sys_pages; i++) {
		err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
		err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
					 context->devx_uid);
		if (err)
			goto error;

@@ -1657,7 +1658,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte

error:
	for (--i; i >= 0; i--)
		if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
		if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
					 context->devx_uid))
			mlx5_ib_warn(dev, "failed to free uar %d\n", i);

	return err;
@@ -1673,7 +1675,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
	for (i = 0; i < bfregi->num_sys_pages; i++)
		if (i < bfregi->num_static_sys_pages ||
		    bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
			mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
			mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
					     context->devx_uid);
}

int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
@@ -1891,6 +1894,13 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
	if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
		return -EINVAL;

	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
		err = mlx5_ib_devx_create(dev, true);
		if (err < 0)
			goto out_ctx;
		context->devx_uid = err;
	}

	lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
	lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
	bfregi = &context->bfregi;
@@ -1903,7 +1913,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
	/* updates req->total_num_bfregs */
	err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
	if (err)
		goto out_ctx;
		goto out_devx;

	mutex_init(&bfregi->lock);
	bfregi->lib_uar_4k = lib_uar_4k;
@@ -1911,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
				GFP_KERNEL);
	if (!bfregi->count) {
		err = -ENOMEM;
		goto out_ctx;
		goto out_devx;
	}

	bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
@@ -1927,17 +1937,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
		goto out_sys_pages;

uar_done:
	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
		err = mlx5_ib_devx_create(dev, true);
		if (err < 0)
			goto out_uars;
		context->devx_uid = err;
	}

	err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
					     context->devx_uid);
	if (err)
		goto out_devx;
		goto out_uars;

	INIT_LIST_HEAD(&context->db_page_list);
	mutex_init(&context->db_page_mutex);
@@ -1972,9 +1975,6 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,

out_mdev:
	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
out_devx:
	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
		mlx5_ib_devx_destroy(dev, context->devx_uid);

out_uars:
	deallocate_uars(dev, context);
@@ -1985,6 +1985,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
out_count:
	kfree(bfregi->count);

out_devx:
	if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
		mlx5_ib_devx_destroy(dev, context->devx_uid);

out_ctx:
	return err;
}
@@ -2021,12 +2025,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
	bfregi = &context->bfregi;
	mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);

	if (context->devx_uid)
		mlx5_ib_devx_destroy(dev, context->devx_uid);

	deallocate_uars(dev, context);
	kfree(bfregi->sys_pages);
	kfree(bfregi->count);

	if (context->devx_uid)
		mlx5_ib_devx_destroy(dev, context->devx_uid);
}

static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
@@ -2119,6 +2123,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
	struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
	struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
	struct mlx5_var_table *var_table = &dev->var_table;
	struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);

	switch (mentry->mmap_flag) {
	case MLX5_IB_MMAP_TYPE_MEMIC:
@@ -2133,7 +2138,8 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
		break;
	case MLX5_IB_MMAP_TYPE_UAR_WC:
	case MLX5_IB_MMAP_TYPE_UAR_NC:
		mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
		mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
				     context->devx_uid);
		kfree(mentry);
		break;
	default:
@@ -2211,7 +2217,8 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
		bfregi->count[bfreg_dyn_idx]++;
		mutex_unlock(&bfregi->lock);

		err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
		err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
					 context->devx_uid);
		if (err) {
			mlx5_ib_warn(dev, "UAR alloc failed\n");
			goto free_bfreg;
@@ -2240,7 +2247,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
	if (!dyn_uar)
		return err;

	mlx5_cmd_free_uar(dev->mdev, idx);
	mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);

free_bfreg:
	mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
@@ -3489,7 +3496,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
		return ERR_PTR(-ENOMEM);

	dev = to_mdev(c->ibucontext.device);
	err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
	err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
	if (err)
		goto end;

@@ -3507,7 +3514,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
	return entry;

err_insert:
	mlx5_cmd_free_uar(dev->mdev, uar_index);
	mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
end:
	kfree(entry);
	return ERR_PTR(err);
Loading