Commit 7b560b89 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Move code to check if drained to subroutine

Move two blocks of code in rxe_comp.c and rxe_req.c to subroutines that
check if draining is complete in the SQD state and, if so, generate a
SQ_DRAINED event.

Link: https://lore.kernel.org/r/20230405042611.6467-4-rpearsonhpe@gmail.com


Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 98e891b5
Loading
Loading
Loading
Loading
+20 −15
Original line number Diff line number Diff line
@@ -477,20 +477,8 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
	}
}

static inline enum comp_state complete_ack(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt,
					   struct rxe_send_wqe *wqe)
static void comp_check_sq_drain_done(struct rxe_qp *qp)
{
	if (wqe->has_rd_atomic) {
		wqe->has_rd_atomic = 0;
		atomic_inc(&qp->req.rd_atomic);
		if (qp->req.need_rd_atomic) {
			qp->comp.timeout_retry = 0;
			qp->req.need_rd_atomic = 0;
			rxe_sched_task(&qp->req.task);
		}
	}

	if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
		/* state_lock used by requester & completer */
		spin_lock_bh(&qp->state_lock);
@@ -507,11 +495,28 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
				qp->ibqp.event_handler(&ev,
					qp->ibqp.qp_context);
			}
		} else {
			return;
		}
		spin_unlock_bh(&qp->state_lock);
	}
}

static inline enum comp_state complete_ack(struct rxe_qp *qp,
					   struct rxe_pkt_info *pkt,
					   struct rxe_send_wqe *wqe)
{
	if (wqe->has_rd_atomic) {
		wqe->has_rd_atomic = 0;
		atomic_inc(&qp->req.rd_atomic);
		if (qp->req.need_rd_atomic) {
			qp->comp.timeout_retry = 0;
			qp->req.need_rd_atomic = 0;
			rxe_sched_task(&qp->req.task);
		}
	}

	comp_check_sq_drain_done(qp);

	do_complete(qp, wqe);

	if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
+18 −14
Original line number Diff line number Diff line
@@ -108,17 +108,12 @@ void rnr_nak_timer(struct timer_list *t)
	rxe_sched_task(&qp->req.task);
}

static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
static void req_check_sq_drain_done(struct rxe_qp *qp)
{
	struct rxe_send_wqe *wqe;
	struct rxe_queue *q = qp->sq.queue;
	unsigned int index = qp->req.wqe_index;
	unsigned int cons;
	unsigned int prod;

	wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
	unsigned int cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
	struct rxe_send_wqe *wqe = queue_addr_from_index(q, cons);

	if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
		/* check to see if we are drained;
@@ -126,18 +121,14 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
		 */
		spin_lock_bh(&qp->state_lock);
		do {
			if (!qp->attr.sq_draining) {
			if (!qp->attr.sq_draining)
				/* comp just finished */
				spin_unlock_bh(&qp->state_lock);
				break;
			}

			if (wqe && ((index != cons) ||
				(wqe->state != wqe_state_posted))) {
				(wqe->state != wqe_state_posted)))
				/* comp not done yet */
				spin_unlock_bh(&qp->state_lock);
				break;
			}

			qp->attr.sq_draining = 0;
			spin_unlock_bh(&qp->state_lock);
@@ -151,9 +142,22 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
				qp->ibqp.event_handler(&ev,
					qp->ibqp.qp_context);
			}
			return;
		} while (0);
		spin_unlock_bh(&qp->state_lock);
	}
}

static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
	struct rxe_send_wqe *wqe;
	struct rxe_queue *q = qp->sq.queue;
	unsigned int index = qp->req.wqe_index;
	unsigned int prod;

	req_check_sq_drain_done(qp);

	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
	if (index == prod)
		return NULL;