Commit 98e891b5 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Remove qp->req.state

The rxe driver has four different QP state variables,
    qp->attr.qp_state,
    qp->req.state,
    qp->comp.state, and
    qp->resp.state.
All of these basically carry the same information.

This patch replaces uses of qp->req.state by qp->attr.qp_state and enum
rxe_qp_state.  This is the third of three patches which will remove all
but the qp->attr.qp_state variable. This will bring the driver closer to
the IBA description.

Link: https://lore.kernel.org/r/20230405042611.6467-3-rpearsonhpe@gmail.com


Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent f55efc2e
Loading
Loading
Loading
Loading
+4 −5
Original line number Diff line number Diff line
@@ -491,12 +491,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
		}
	}

	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
	if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
		/* state_lock used by requester & completer */
		spin_lock_bh(&qp->state_lock);
		if ((qp->req.state == QP_STATE_DRAIN) &&
		    (qp->comp.psn == qp->req.psn)) {
			qp->req.state = QP_STATE_DRAINED;
		if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
			qp->attr.sq_draining = 0;
			spin_unlock_bh(&qp->state_lock);

			if (qp->ibqp.event_handler) {
@@ -723,7 +722,7 @@ int rxe_completer(struct rxe_qp *qp)
			 * (4) the timeout parameter is set
			 */
			if ((qp_type(qp) == IB_QPT_RC) &&
			    (qp->req.state == QP_STATE_READY) &&
			    (qp_state(qp) >= IB_QPS_RTS) &&
			    (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
			    qp->qp_timeout_jiffies)
				mod_timer(&qp->retrans_timer,
+2 −2
Original line number Diff line number Diff line
@@ -413,8 +413,8 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
	int is_request = pkt->mask & RXE_REQ_MASK;
	struct rxe_dev *rxe = to_rdev(qp->ibqp.device);

	if ((is_request && (qp->req.state != QP_STATE_READY)) ||
	    (!is_request && (qp_state(qp) <= IB_QPS_RTR))) {
	if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
	    (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
		rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
		goto drop;
	}
+14 −35
Original line number Diff line number Diff line
@@ -231,7 +231,6 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
	qp->req.wqe_index = queue_get_producer(qp->sq.queue,
					       QUEUE_TYPE_FROM_CLIENT);

	qp->req.state		= QP_STATE_RESET;
	qp->req.opcode		= -1;
	qp->comp.opcode		= -1;

@@ -394,13 +393,10 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
		goto err1;
	}

	if (mask & IB_QP_STATE) {
		if (cur_state == IB_QPS_SQD) {
			if (qp->req.state == QP_STATE_DRAIN &&
			    new_state != IB_QPS_ERR)
	if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
		if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
			goto err1;
	}
	}

	if (mask & IB_QP_PORT) {
		if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
@@ -474,9 +470,6 @@ static void rxe_qp_reset(struct rxe_qp *qp)
	rxe_disable_task(&qp->comp.task);
	rxe_disable_task(&qp->req.task);

	/* move qp to the reset state */
	qp->req.state = QP_STATE_RESET;

	/* drain work and packet queuesc */
	rxe_requester(qp);
	rxe_completer(qp);
@@ -512,22 +505,9 @@ static void rxe_qp_reset(struct rxe_qp *qp)
	rxe_enable_task(&qp->req.task);
}

/* drain the send queue */
static void rxe_qp_drain(struct rxe_qp *qp)
{
	if (qp->sq.queue) {
		if (qp->req.state != QP_STATE_DRAINED) {
			qp->req.state = QP_STATE_DRAIN;
			rxe_sched_task(&qp->comp.task);
			rxe_sched_task(&qp->req.task);
		}
	}
}

/* move the qp to the error state */
void rxe_qp_error(struct rxe_qp *qp)
{
	qp->req.state = QP_STATE_ERROR;
	qp->attr.qp_state = IB_QPS_ERR;

	/* drain work and packet queues */
@@ -540,6 +520,8 @@ void rxe_qp_error(struct rxe_qp *qp)
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
		     struct ib_udata *udata)
{
	enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
				attr->cur_qp_state : qp->attr.qp_state;
	int err;

	if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
@@ -656,7 +638,6 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,

		case IB_QPS_INIT:
			rxe_dbg_qp(qp, "state -> INIT\n");
			qp->req.state = QP_STATE_INIT;
			break;

		case IB_QPS_RTR:
@@ -665,12 +646,15 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,

		case IB_QPS_RTS:
			rxe_dbg_qp(qp, "state -> RTS\n");
			qp->req.state = QP_STATE_READY;
			break;

		case IB_QPS_SQD:
			rxe_dbg_qp(qp, "state -> SQD\n");
			rxe_qp_drain(qp);
			if (cur_state != IB_QPS_SQD) {
				qp->attr.sq_draining = 1;
				rxe_sched_task(&qp->comp.task);
				rxe_sched_task(&qp->req.task);
			}
			break;

		case IB_QPS_SQE:
@@ -708,16 +692,11 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
	rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
	rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);

	if (qp->req.state == QP_STATE_DRAIN) {
		attr->sq_draining = 1;
		/* applications that get this state
		 * typically spin on it. yield the
		 * processor
	/* Applications that get this state typically spin on it.
	 * Yield the processor
	 */
	if (qp->attr.sq_draining)
		cond_resched();
	} else {
		attr->sq_draining = 0;
	}

	rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining);

+5 −4
Original line number Diff line number Diff line
@@ -39,11 +39,12 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
	}

	if (pkt->mask & RXE_REQ_MASK) {
		if (unlikely(qp_state(qp) <= IB_QPS_RTR))
		if (unlikely(qp_state(qp) < IB_QPS_RTR))
			return -EINVAL;
	} else if (unlikely(qp->req.state < QP_STATE_READY ||
				qp->req.state > QP_STATE_DRAINED))
	} else {
		if (unlikely(qp_state(qp) < IB_QPS_RTS))
			return -EINVAL;
	}

	return 0;
}
+7 −8
Original line number Diff line number Diff line
@@ -120,13 +120,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
	cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
	prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);

	if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
	if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
		/* check to see if we are drained;
		 * state_lock used by requester and completer
		 */
		spin_lock_bh(&qp->state_lock);
		do {
			if (qp->req.state != QP_STATE_DRAIN) {
			if (!qp->attr.sq_draining) {
				/* comp just finished */
				spin_unlock_bh(&qp->state_lock);
				break;
@@ -139,7 +139,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
				break;
			}

			qp->req.state = QP_STATE_DRAINED;
			qp->attr.sq_draining = 0;
			spin_unlock_bh(&qp->state_lock);

			if (qp->ibqp.event_handler) {
@@ -159,8 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)

	wqe = queue_addr_from_index(q, index);

	if (unlikely((qp->req.state == QP_STATE_DRAIN ||
		      qp->req.state == QP_STATE_DRAINED) &&
	if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
		     (wqe->state != wqe_state_processing)))
		return NULL;

@@ -656,7 +655,7 @@ int rxe_requester(struct rxe_qp *qp)
	if (unlikely(!qp->valid))
		goto exit;

	if (unlikely(qp->req.state == QP_STATE_ERROR)) {
	if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
		wqe = req_next_wqe(qp);
		if (wqe)
			/*
@@ -667,7 +666,7 @@ int rxe_requester(struct rxe_qp *qp)
			goto exit;
	}

	if (unlikely(qp->req.state == QP_STATE_RESET)) {
	if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
		qp->req.wqe_index = queue_get_consumer(q,
						QUEUE_TYPE_FROM_CLIENT);
		qp->req.opcode = -1;
@@ -836,7 +835,7 @@ int rxe_requester(struct rxe_qp *qp)
	/* update wqe_index for each wqe completion */
	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
	wqe->state = wqe_state_error;
	qp->req.state = QP_STATE_ERROR;
	qp->attr.qp_state = IB_QPS_ERR;
	rxe_sched_task(&qp->comp.task);
exit:
	ret = -EAGAIN;
Loading