Commit 514aee66 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe
Browse files

RDMA: Globally allocate and release QP memory

Convert QP object to follow IB/core general allocation scheme.  That
change allows us to make sure that restrack properly kref the memory.

Link: https://lore.kernel.org/r/48e767124758aeecc433360ddd85eaa6325b34d9.1627040189.git.leonro@nvidia.com


Reviewed-by: Gal Pressman <galpress@amazon.com> #efa
Tested-by: default avatarGal Pressman <galpress@amazon.com>
Reviewed-by: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> #rdma and core
Tested-by: default avatarDennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Tested-by: default avatarTatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 44da3730
Loading
Loading
Loading
Loading
+21 −7
Original line number Diff line number Diff line
@@ -322,13 +322,14 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
	      struct ib_uqp_object *uobj, const char *caller)
{
	struct ib_qp *qp;
	int ret;

	if (!dev->ops.create_qp)
		return ERR_PTR(-EOPNOTSUPP);

	qp = dev->ops.create_qp(pd, attr, udata);
	if (IS_ERR(qp))
		return qp;
	qp = rdma_zalloc_drv_obj_numa(dev, ib_qp);
	if (!qp)
		return ERR_PTR(-ENOMEM);

	qp->device = dev;
	qp->pd = pd;
@@ -337,14 +338,10 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,

	qp->qp_type = attr->qp_type;
	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
	qp->send_cq = attr->send_cq;
	qp->recv_cq = attr->recv_cq;
	qp->srq = attr->srq;
	qp->rwq_ind_tbl = attr->rwq_ind_tbl;
	qp->event_handler = attr->event_handler;
	qp->port = attr->port_num;

	atomic_set(&qp->usecnt, 0);
	spin_lock_init(&qp->mr_lock);
	INIT_LIST_HEAD(&qp->rdma_mrs);
	INIT_LIST_HEAD(&qp->sig_mrs);
@@ -352,8 +349,25 @@ _ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
	rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
	WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
	rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
	ret = dev->ops.create_qp(qp, attr, udata);
	if (ret)
		goto err_create;

	/*
	 * TODO: The mlx4 internally overwrites send_cq and recv_cq.
	 * Unfortunately, it is not an easy task to fix that driver.
	 */
	qp->send_cq = attr->send_cq;
	qp->recv_cq = attr->recv_cq;

	rdma_restrack_add(&qp->res);
	return qp;

err_create:
	rdma_restrack_put(&qp->res);
	kfree(qp);
	return ERR_PTR(ret);

}

struct rdma_dev_addr;
+2 −0
Original line number Diff line number Diff line
@@ -2654,6 +2654,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
	SET_DEVICE_OP(dev_ops, get_hw_stats);
	SET_DEVICE_OP(dev_ops, get_link_layer);
	SET_DEVICE_OP(dev_ops, get_netdev);
	SET_DEVICE_OP(dev_ops, get_numa_node);
	SET_DEVICE_OP(dev_ops, get_port_immutable);
	SET_DEVICE_OP(dev_ops, get_vector_affinity);
	SET_DEVICE_OP(dev_ops, get_vf_config);
@@ -2710,6 +2711,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
	SET_OBJ_SIZE(dev_ops, ib_cq);
	SET_OBJ_SIZE(dev_ops, ib_mw);
	SET_OBJ_SIZE(dev_ops, ib_pd);
	SET_OBJ_SIZE(dev_ops, ib_qp);
	SET_OBJ_SIZE(dev_ops, ib_rwq_ind_table);
	SET_OBJ_SIZE(dev_ops, ib_srq);
	SET_OBJ_SIZE(dev_ops, ib_ucontext);
+1 −1
Original line number Diff line number Diff line
@@ -343,7 +343,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
	rt = &dev->res[res->type];

	old = xa_erase(&rt->xa, res->id);
	if (res->type == RDMA_RESTRACK_MR || res->type == RDMA_RESTRACK_QP)
	if (res->type == RDMA_RESTRACK_MR)
		return;
	WARN_ON(old != res);

+21 −19
Original line number Diff line number Diff line
@@ -1963,9 +1963,13 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
		rdma_rw_cleanup_mrs(qp);

	rdma_counter_unbind_qp(qp, true);
	rdma_restrack_del(&qp->res);
	ret = qp->device->ops.destroy_qp(qp, udata);
	if (!ret) {
	if (ret) {
		if (sec)
			ib_destroy_qp_security_abort(sec);
		return ret;
	}

	if (alt_path_sgid_attr)
		rdma_put_gid_attr(alt_path_sgid_attr);
	if (av_sgid_attr)
@@ -1982,11 +1986,9 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
		atomic_dec(&ind_tbl->usecnt);
	if (sec)
		ib_destroy_qp_security_end(sec);
	} else {
		if (sec)
			ib_destroy_qp_security_abort(sec);
	}

	rdma_restrack_del(&qp->res);
	kfree(qp);
	return ret;
}
EXPORT_SYMBOL(ib_destroy_qp_user);
+8 −18
Original line number Diff line number Diff line
@@ -815,7 +815,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
	if (ib_qp->qp_type == IB_QPT_GSI && rdev->gsi_ctx.gsi_sqp) {
		rc = bnxt_re_destroy_gsi_sqp(qp);
		if (rc)
			goto sh_fail;
			return rc;
	}

	mutex_lock(&rdev->qp_lock);
@@ -826,10 +826,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
	ib_umem_release(qp->rumem);
	ib_umem_release(qp->sumem);

	kfree(qp);
	return 0;
sh_fail:
	return rc;
}

static u8 __from_ib_qp_type(enum ib_qp_type type)
@@ -1402,27 +1399,22 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
	return rc;
}

struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
				struct ib_qp_init_attr *qp_init_attr,
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
		      struct ib_udata *udata)
{
	struct ib_pd *ib_pd = ib_qp->pd;
	struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
	struct bnxt_re_dev *rdev = pd->rdev;
	struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
	struct bnxt_re_qp *qp;
	struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
	int rc;

	rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
	if (!rc) {
		rc = -EINVAL;
		goto exit;
		goto fail;
	}

	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
	if (!qp) {
		rc = -ENOMEM;
		goto exit;
	}
	qp->rdev = rdev;
	rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
	if (rc)
@@ -1465,16 +1457,14 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
	mutex_unlock(&rdev->qp_lock);
	atomic_inc(&rdev->qp_count);

	return &qp->ib_qp;
	return 0;
qp_destroy:
	bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
free_umem:
	ib_umem_release(qp->rumem);
	ib_umem_release(qp->sumem);
fail:
	kfree(qp);
exit:
	return ERR_PTR(rc);
	return rc;
}

static u8 __from_ib_qp_state(enum ib_qp_state state)
Loading