Commit 49dc9c1f authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Cleanup reset state handling in rxe_resp.c

Cleanup the handling of qp in the error state, reset state and during
rxe_qp_do_cleanup. The error state does about the same thing as the others
but has code spread all over.

This patch combines them in a cleaner way.

Link: https://lore.kernel.org/r/20230304174533.11296-4-rpearsonhpe@gmail.com


Signed-off-by: default avatarIan Ziemba <ian.ziemba@hpe.com>
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 3946fc2a
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -133,7 +133,6 @@ enum resp_states {
	RESPST_ERR_LENGTH,
	RESPST_ERR_CQ_OVERFLOW,
	RESPST_ERROR,
	RESPST_RESET,
	RESPST_DONE,
	RESPST_EXIT,
};
+57 −50
Original line number Diff line number Diff line
@@ -42,7 +42,6 @@ static char *resp_state_name[] = {
	[RESPST_ERR_LENGTH]			= "ERR_LENGTH",
	[RESPST_ERR_CQ_OVERFLOW]		= "ERR_CQ_OVERFLOW",
	[RESPST_ERROR]				= "ERROR",
	[RESPST_RESET]				= "RESET",
	[RESPST_DONE]				= "DONE",
	[RESPST_EXIT]				= "EXIT",
};
@@ -69,17 +68,6 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
{
	struct sk_buff *skb;

	if (qp->resp.state == QP_STATE_ERROR) {
		while ((skb = skb_dequeue(&qp->req_pkts))) {
			rxe_put(qp);
			kfree_skb(skb);
			ib_device_put(qp->ibqp.device);
		}

		/* go drain recv wr queue */
		return RESPST_CHK_RESOURCE;
	}

	skb = skb_peek(&qp->req_pkts);
	if (!skb)
		return RESPST_EXIT;
@@ -334,24 +322,6 @@ static enum resp_states check_resource(struct rxe_qp *qp,
{
	struct rxe_srq *srq = qp->srq;

	if (qp->resp.state == QP_STATE_ERROR) {
		if (qp->resp.wqe) {
			qp->resp.status = IB_WC_WR_FLUSH_ERR;
			return RESPST_COMPLETE;
		} else if (!srq) {
			qp->resp.wqe = queue_head(qp->rq.queue,
					QUEUE_TYPE_FROM_CLIENT);
			if (qp->resp.wqe) {
				qp->resp.status = IB_WC_WR_FLUSH_ERR;
				return RESPST_COMPLETE;
			} else {
				return RESPST_EXIT;
			}
		} else {
			return RESPST_EXIT;
		}
	}

	if (pkt->mask & (RXE_READ_OR_ATOMIC_MASK | RXE_ATOMIC_WRITE_MASK)) {
		/* it is the requesters job to not send
		 * too many read/atomic ops, we just
@@ -1425,24 +1395,68 @@ static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
	}
}

static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
/* drain incoming request packet queue */
static void rxe_drain_req_pkts(struct rxe_qp *qp)
{
	struct sk_buff *skb;
	struct rxe_queue *q = qp->rq.queue;

	while ((skb = skb_dequeue(&qp->req_pkts))) {
		rxe_put(qp);
		kfree_skb(skb);
		ib_device_put(qp->ibqp.device);
	}
}

/* complete receive wqe with flush error */
static int complete_flush(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
{
	struct rxe_cqe cqe = {};
	struct ib_wc *wc = &cqe.ibwc;
	struct ib_uverbs_wc *uwc = &cqe.uibwc;

	if (notify)
	if (qp->rcq->is_user) {
		uwc->status = IB_WC_WR_FLUSH_ERR;
		uwc->qp_num = qp_num(qp);
		uwc->wr_id = wqe->wr_id;
	} else {
		wc->status = IB_WC_WR_FLUSH_ERR;
		wc->qp = &qp->ibqp;
		wc->wr_id = wqe->wr_id;
	}

	if (rxe_cq_post(qp->rcq, &cqe, 0))
		return -ENOMEM;

	return 0;
}

/* drain and optionally complete the recive queue
 * if unable to complete a wqe stop completing and
 * just flush the remaining wqes
 */
static void rxe_drain_recv_queue(struct rxe_qp *qp, bool notify)
{
	struct rxe_queue *q = qp->rq.queue;
	struct rxe_recv_wqe *wqe;
	int err;

	if (qp->srq)
		return;

	while (!qp->srq && q && queue_head(q, q->type))
	while ((wqe = queue_head(q, q->type))) {
		if (notify) {
			err = complete_flush(qp, wqe);
			if (err) {
				rxe_dbg_qp(qp, "complete failed for recv wqe");
				notify = 0;
			}
		}
		queue_advance_consumer(q, q->type);
	}

	qp->resp.wqe = NULL;
}

int rxe_responder(struct rxe_qp *qp)
{
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
@@ -1453,20 +1467,18 @@ int rxe_responder(struct rxe_qp *qp)
	if (!rxe_get(qp))
		return -EAGAIN;

	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;

	if (!qp->valid)
	if (!qp->valid || qp->resp.state == QP_STATE_ERROR ||
	    qp->resp.state == QP_STATE_RESET) {
		bool notify = qp->valid &&
				(qp->resp.state == QP_STATE_ERROR);
		rxe_drain_req_pkts(qp);
		rxe_drain_recv_queue(qp, notify);
		goto exit;
	}

	switch (qp->resp.state) {
	case QP_STATE_RESET:
		state = RESPST_RESET;
		break;
	qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;

	default:
	state = RESPST_GET_REQ;
		break;
	}

	while (1) {
		rxe_dbg_qp(qp, "state = %s\n", resp_state_name[state]);
@@ -1625,11 +1637,6 @@ int rxe_responder(struct rxe_qp *qp)

			goto exit;

		case RESPST_RESET:
			rxe_drain_req_pkts(qp, false);
			qp->resp.wqe = NULL;
			goto exit;

		case RESPST_ERROR:
			qp->resp.goto_error = 0;
			rxe_dbg_qp(qp, "moved to error state\n");