Commit 78b26a33 authored by Bob Pearson's avatar Bob Pearson Committed by Leon Romanovsky
Browse files

RDMA/rxe: Remove tasklet call from rxe_cq.c



Remove the tasklet call in rxe_cq.c and also the is_dying in the
cq struct. There is no reason for the rxe driver to defer the call
to the cq completion handler by scheduling a tasklet. rxe_cq_post()
is not called in a hard irq context.

The rxe driver currently is incorrect because the tasklet call is
made without protecting the cq pointer with a reference from having
the underlying memory freed before the deferred routine is called.
Executing the comp_handler inline fixes this problem.

Fixes: 8700e3e7 ("Soft RoCE driver")
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Link: https://lore.kernel.org/r/20230327215643.10410-1-rpearsonhpe@gmail.com


Acked-by: default avatarZhu Yanjun <zyjzyj2000@gmail.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent cba968e3
Loading
Loading
Loading
Loading
+3 −29
Original line number Diff line number Diff line
@@ -39,21 +39,6 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
	return -EINVAL;
}

static void rxe_send_complete(struct tasklet_struct *t)
{
	struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
	unsigned long flags;

	spin_lock_irqsave(&cq->cq_lock, flags);
	if (cq->is_dying) {
		spin_unlock_irqrestore(&cq->cq_lock, flags);
		return;
	}
	spin_unlock_irqrestore(&cq->cq_lock, flags);

	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}

int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
		     int comp_vector, struct ib_udata *udata,
		     struct rxe_create_cq_resp __user *uresp)
@@ -79,10 +64,6 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,

	cq->is_user = uresp;

	cq->is_dying = false;

	tasklet_setup(&cq->comp_task, rxe_send_complete);

	spin_lock_init(&cq->cq_lock);
	cq->ibcq.cqe = cqe;
	return 0;
@@ -103,6 +84,7 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
	return err;
}

/* caller holds reference to cq */
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
	struct ib_event ev;
@@ -136,19 +118,11 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
	if ((cq->notify == IB_CQ_NEXT_COMP) ||
	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
		cq->notify = 0;
		tasklet_schedule(&cq->comp_task);
	}

	return 0;
		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
	}

void rxe_cq_disable(struct rxe_cq *cq)
{
	unsigned long flags;

	spin_lock_irqsave(&cq->cq_lock, flags);
	cq->is_dying = true;
	spin_unlock_irqrestore(&cq->cq_lock, flags);
	return 0;
}

void rxe_cq_cleanup(struct rxe_pool_elem *elem)
+0 −2
Original line number Diff line number Diff line
@@ -1178,8 +1178,6 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
		goto err_out;
	}

	rxe_cq_disable(cq);

	err = rxe_cleanup(cq);
	if (err)
		rxe_err_cq(cq, "cleanup failed, err = %d", err);
+0 −2
Original line number Diff line number Diff line
@@ -63,9 +63,7 @@ struct rxe_cq {
	struct rxe_queue	*queue;
	spinlock_t		cq_lock;
	u8			notify;
	bool			is_dying;
	bool			is_user;
	struct tasklet_struct	comp_task;
	atomic_t		num_wq;
};