Commit 5507f67d authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe
Browse files

RDMA/core: Properly increment and decrement QP usecnts

The QP usecnts were incremented through QP attributes structure while
decreased through QP itself. Rely on the ib_creat_qp_user() code that
initialized all QP parameters prior returning to the user and increment
exactly like destroy does.

Link: https://lore.kernel.org/r/25d256a3bb1fc480b77d7fe439817b993de48610.1628014762.git.leonro@nvidia.com


Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 00a79d6b
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -320,6 +320,8 @@ struct ib_qp *_ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
			    struct ib_qp_init_attr *attr,
			    struct ib_udata *udata, struct ib_uqp_object *uobj,
			    const char *caller);
void ib_qp_usecnt_inc(struct ib_qp *qp);
void ib_qp_usecnt_dec(struct ib_qp *qp);

struct rdma_dev_addr;
int rdma_resolve_ip_route(struct sockaddr *src_addr,
+2 −11
Original line number Diff line number Diff line
@@ -1445,18 +1445,9 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
		ret = PTR_ERR(qp);
		goto err_put;
	}
	ib_qp_usecnt_inc(qp);

	if (cmd->qp_type != IB_QPT_XRC_TGT) {
		atomic_inc(&pd->usecnt);
		if (attr.send_cq)
			atomic_inc(&attr.send_cq->usecnt);
		if (attr.recv_cq)
			atomic_inc(&attr.recv_cq->usecnt);
		if (attr.srq)
			atomic_inc(&attr.srq->usecnt);
		if (ind_tbl)
			atomic_inc(&ind_tbl->usecnt);
	} else {
	if (cmd->qp_type == IB_QPT_XRC_TGT) {
		/* It is done in _ib_create_qp for other QP types */
		qp->uobject = obj;
	}
+2 −11
Original line number Diff line number Diff line
@@ -258,18 +258,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QP_CREATE)(
		ret = PTR_ERR(qp);
		goto err_put;
	}
	ib_qp_usecnt_inc(qp);

	if (attr.qp_type != IB_QPT_XRC_TGT) {
		atomic_inc(&pd->usecnt);
		if (attr.send_cq)
			atomic_inc(&attr.send_cq->usecnt);
		if (attr.recv_cq)
			atomic_inc(&attr.recv_cq->usecnt);
		if (attr.srq)
			atomic_inc(&attr.srq->usecnt);
		if (attr.rwq_ind_tbl)
			atomic_inc(&attr.rwq_ind_tbl->usecnt);
	} else {
	if (attr.qp_type == IB_QPT_XRC_TGT) {
		obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
					  uobject);
		atomic_inc(&obj->uxrcd->refcnt);
+33 −27
Original line number Diff line number Diff line
@@ -1274,6 +1274,36 @@ struct ib_qp *_ib_create_qp(struct ib_device *dev, struct ib_pd *pd,
}
EXPORT_SYMBOL(_ib_create_qp);

void ib_qp_usecnt_inc(struct ib_qp *qp)
{
	if (qp->pd)
		atomic_inc(&qp->pd->usecnt);
	if (qp->send_cq)
		atomic_inc(&qp->send_cq->usecnt);
	if (qp->recv_cq)
		atomic_inc(&qp->recv_cq->usecnt);
	if (qp->srq)
		atomic_inc(&qp->srq->usecnt);
	if (qp->rwq_ind_tbl)
		atomic_inc(&qp->rwq_ind_tbl->usecnt);
}
EXPORT_SYMBOL(ib_qp_usecnt_inc);

void ib_qp_usecnt_dec(struct ib_qp *qp)
{
	if (qp->rwq_ind_tbl)
		atomic_dec(&qp->rwq_ind_tbl->usecnt);
	if (qp->srq)
		atomic_dec(&qp->srq->usecnt);
	if (qp->recv_cq)
		atomic_dec(&qp->recv_cq->usecnt);
	if (qp->send_cq)
		atomic_dec(&qp->send_cq->usecnt);
	if (qp->pd)
		atomic_dec(&qp->pd->usecnt);
}
EXPORT_SYMBOL(ib_qp_usecnt_dec);

struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
				  struct ib_qp_init_attr *qp_init_attr,
				  const char *caller)
@@ -1306,14 +1336,7 @@ struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
		return xrc_qp;
	}

	if (qp_init_attr->recv_cq)
		atomic_inc(&qp_init_attr->recv_cq->usecnt);
	if (qp->srq)
		atomic_inc(&qp_init_attr->srq->usecnt);

	atomic_inc(&pd->usecnt);
	if (qp_init_attr->send_cq)
		atomic_inc(&qp_init_attr->send_cq->usecnt);
	ib_qp_usecnt_inc(qp);

	if (qp_init_attr->cap.max_rdma_ctxs) {
		ret = rdma_rw_init_mrs(qp, qp_init_attr);
@@ -1971,10 +1994,6 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
{
	const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
	const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
	struct ib_pd *pd;
	struct ib_cq *scq, *rcq;
	struct ib_srq *srq;
	struct ib_rwq_ind_table *ind_tbl;
	struct ib_qp_security *sec;
	int ret;

@@ -1986,11 +2005,6 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
	if (qp->real_qp != qp)
		return __ib_destroy_shared_qp(qp);

	pd   = qp->pd;
	scq  = qp->send_cq;
	rcq  = qp->recv_cq;
	srq  = qp->srq;
	ind_tbl = qp->rwq_ind_tbl;
	sec  = qp->qp_sec;
	if (sec)
		ib_destroy_qp_security_begin(sec);
@@ -2010,16 +2024,8 @@ int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
		rdma_put_gid_attr(alt_path_sgid_attr);
	if (av_sgid_attr)
		rdma_put_gid_attr(av_sgid_attr);
	if (pd)
		atomic_dec(&pd->usecnt);
	if (scq)
		atomic_dec(&scq->usecnt);
	if (rcq)
		atomic_dec(&rcq->usecnt);
	if (srq)
		atomic_dec(&srq->usecnt);
	if (ind_tbl)
		atomic_dec(&ind_tbl->usecnt);

	ib_qp_usecnt_dec(qp);
	if (sec)
		ib_destroy_qp_security_end(sec);