Commit dccb23f6 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe
Browse files

RDMA/rxe: Split rxe_run_task() into two subroutines

Split rxe_run_task(task, sched) into rxe_run_task(task) and
rxe_sched_task(task).

Link: https://lore.kernel.org/r/20221021200118.2163-5-rpearsonhpe@gmail.com


Signed-off-by: default avatarIan Ziemba <ian.ziemba@hpe.com>
Signed-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent de669ae8
Loading
Loading
Loading
Loading
+11 −8
Original line number Diff line number Diff line
@@ -118,7 +118,7 @@ void retransmit_timer(struct timer_list *t)

	if (qp->valid) {
		qp->comp.timeout = 1;
		rxe_run_task(&qp->comp.task, 1);
		rxe_sched_task(&qp->comp.task);
	}
}

@@ -132,7 +132,10 @@ void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
	if (must_sched != 0)
		rxe_counter_inc(SKB_TO_PKT(skb)->rxe, RXE_CNT_COMPLETER_SCHED);

	rxe_run_task(&qp->comp.task, must_sched);
	if (must_sched)
		rxe_sched_task(&qp->comp.task);
	else
		rxe_run_task(&qp->comp.task);
}

static inline enum comp_state get_wqe(struct rxe_qp *qp,
@@ -313,7 +316,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
					qp->comp.psn = pkt->psn;
					if (qp->req.wait_psn) {
						qp->req.wait_psn = 0;
						rxe_run_task(&qp->req.task, 0);
						rxe_run_task(&qp->req.task);
					}
				}
				return COMPST_ERROR_RETRY;
@@ -460,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
	 */
	if (qp->req.wait_fence) {
		qp->req.wait_fence = 0;
		rxe_run_task(&qp->req.task, 0);
		rxe_run_task(&qp->req.task);
	}
}

@@ -474,7 +477,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
		if (qp->req.need_rd_atomic) {
			qp->comp.timeout_retry = 0;
			qp->req.need_rd_atomic = 0;
			rxe_run_task(&qp->req.task, 0);
			rxe_run_task(&qp->req.task);
		}
	}

@@ -520,7 +523,7 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,

		if (qp->req.wait_psn) {
			qp->req.wait_psn = 0;
			rxe_run_task(&qp->req.task, 1);
			rxe_sched_task(&qp->req.task);
		}
	}

@@ -654,7 +657,7 @@ int rxe_completer(void *arg)

			if (qp->req.wait_psn) {
				qp->req.wait_psn = 0;
				rxe_run_task(&qp->req.task, 1);
				rxe_sched_task(&qp->req.task);
			}

			state = COMPST_DONE;
@@ -722,7 +725,7 @@ int rxe_completer(void *arg)
							RXE_CNT_COMP_RETRY);
					qp->req.need_retry = 1;
					qp->comp.started_retry = 1;
					rxe_run_task(&qp->req.task, 0);
					rxe_run_task(&qp->req.task);
				}
				goto done;

+2 −2
Original line number Diff line number Diff line
@@ -345,7 +345,7 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb)

	if (unlikely(qp->need_req_skb &&
		     skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
		rxe_run_task(&qp->req.task, 1);
		rxe_sched_task(&qp->req.task);

	rxe_put(qp);
}
@@ -429,7 +429,7 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
	if ((qp_type(qp) != IB_QPT_RC) &&
	    (pkt->mask & RXE_END_MASK)) {
		pkt->wqe->state = wqe_state_done;
		rxe_run_task(&qp->comp.task, 1);
		rxe_sched_task(&qp->comp.task);
	}

	rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
+5 −5
Original line number Diff line number Diff line
@@ -536,10 +536,10 @@ static void rxe_qp_drain(struct rxe_qp *qp)
		if (qp->req.state != QP_STATE_DRAINED) {
			qp->req.state = QP_STATE_DRAIN;
			if (qp_type(qp) == IB_QPT_RC)
				rxe_run_task(&qp->comp.task, 1);
				rxe_sched_task(&qp->comp.task);
			else
				__rxe_do_task(&qp->comp.task);
			rxe_run_task(&qp->req.task, 1);
			rxe_sched_task(&qp->req.task);
		}
	}
}
@@ -553,13 +553,13 @@ void rxe_qp_error(struct rxe_qp *qp)
	qp->attr.qp_state = IB_QPS_ERR;

	/* drain work and packet queues */
	rxe_run_task(&qp->resp.task, 1);
	rxe_sched_task(&qp->resp.task);

	if (qp_type(qp) == IB_QPT_RC)
		rxe_run_task(&qp->comp.task, 1);
		rxe_sched_task(&qp->comp.task);
	else
		__rxe_do_task(&qp->comp.task);
	rxe_run_task(&qp->req.task, 1);
	rxe_sched_task(&qp->req.task);
}

/* called by the modify qp verb */
+5 −5
Original line number Diff line number Diff line
@@ -105,7 +105,7 @@ void rnr_nak_timer(struct timer_list *t)
	/* request a send queue retry */
	qp->req.need_retry = 1;
	qp->req.wait_for_rnr_timer = 0;
	rxe_run_task(&qp->req.task, 1);
	rxe_sched_task(&qp->req.task);
}

static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
@@ -608,7 +608,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
	 * which can lead to a deadlock. So go ahead and complete
	 * it now.
	 */
	rxe_run_task(&qp->comp.task, 1);
	rxe_sched_task(&qp->comp.task);

	return 0;
}
@@ -733,7 +733,7 @@ int rxe_requester(void *arg)
						       qp->req.wqe_index);
			wqe->state = wqe_state_done;
			wqe->status = IB_WC_SUCCESS;
			rxe_run_task(&qp->comp.task, 0);
			rxe_run_task(&qp->comp.task);
			goto done;
		}
		payload = mtu;
@@ -795,7 +795,7 @@ int rxe_requester(void *arg)
		rollback_state(wqe, qp, &rollback_wqe, rollback_psn);

		if (err == -EAGAIN) {
			rxe_run_task(&qp->req.task, 1);
			rxe_sched_task(&qp->req.task);
			goto exit;
		}

@@ -817,7 +817,7 @@ int rxe_requester(void *arg)
	qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
	wqe->state = wqe_state_error;
	qp->req.state = QP_STATE_ERROR;
	rxe_run_task(&qp->comp.task, 0);
	rxe_run_task(&qp->comp.task);
exit:
	ret = -EAGAIN;
out:
+4 −1
Original line number Diff line number Diff line
@@ -91,7 +91,10 @@ void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
	must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
			(skb_queue_len(&qp->req_pkts) > 1);

	rxe_run_task(&qp->resp.task, must_sched);
	if (must_sched)
		rxe_sched_task(&qp->resp.task);
	else
		rxe_run_task(&qp->resp.task);
}

static inline enum resp_states get_req(struct rxe_qp *qp,
Loading