Commit c3aedd22 authored by James Smart's avatar James Smart Committed by Sagi Grimberg
Browse files

nvme_fc: cleanup io completion



There was some old cold that dealt with complete_rq being called
prior to the lldd returning the io completion. This is garbage code.
The complete_rq routine was being called after eh_timeouts were
called and it was due to eh_timeouts not being handled properly.
The timeouts were fixed in prior patches so that in general, a
timeout will initiate an abort and the reset timer restarted as
the abort operation will take care of completing things. Given the
reset timer restarted, the erroneous complete_rq calls were eliminated.

So remove the work that was synchronizing complete_rq with io
completion.

Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJames Smart <james.smart@broadcom.com>
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
parent 3efd6e8e
Loading
Loading
Loading
Loading
+12 −51
Original line number Diff line number Diff line
@@ -55,9 +55,7 @@ struct nvme_fc_queue {

enum nvme_fcop_flags {
	FCOP_FLAGS_TERMIO	= (1 << 0),
	FCOP_FLAGS_RELEASED	= (1 << 1),
	FCOP_FLAGS_COMPLETE	= (1 << 2),
	FCOP_FLAGS_AEN		= (1 << 3),
	FCOP_FLAGS_AEN		= (1 << 1),
};

struct nvmefc_ls_req_op {
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)

/* *********************** NVME Ctrl Routines **************************** */

static void __nvme_fc_final_op_cleanup(struct request *rq);
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);

static int
@@ -1544,25 +1541,20 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
		__nvme_fc_abort_op(ctrl, aen_op);
}

static inline int
static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
		struct nvme_fc_fcp_op *op, int opstate)
{
	unsigned long flags;
	bool complete_rq = false;

	if (opstate == FCPOP_STATE_ABORTED) {
		spin_lock_irqsave(&ctrl->lock, flags);
	if (opstate == FCPOP_STATE_ABORTED && ctrl->flags & FCCTRL_TERMIO) {
		if (ctrl->flags & FCCTRL_TERMIO) {
			if (!--ctrl->iocnt)
				wake_up(&ctrl->ioabort_wait);
		}
	if (op->flags & FCOP_FLAGS_RELEASED)
		complete_rq = true;
	else
		op->flags |= FCOP_FLAGS_COMPLETE;
		spin_unlock_irqrestore(&ctrl->lock, flags);

	return complete_rq;
	}
}

static void
@@ -1704,9 +1696,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
	     ctrl->ctrl.state == NVME_CTRL_CONNECTING))
		status |= cpu_to_le16(NVME_SC_DNR << 1);

	if (__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate))
		__nvme_fc_final_op_cleanup(rq);
	else
	__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
	nvme_end_request(rq, status, result);

check_error:
@@ -2394,45 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
}

static void
__nvme_fc_final_op_cleanup(struct request *rq)
nvme_fc_complete_rq(struct request *rq)
{
	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
	struct nvme_fc_ctrl *ctrl = op->ctrl;

	atomic_set(&op->state, FCPOP_STATE_IDLE);
	op->flags &= ~(FCOP_FLAGS_RELEASED | FCOP_FLAGS_COMPLETE);

	nvme_fc_unmap_data(ctrl, rq, op);
	nvme_complete_rq(rq);
	nvme_fc_ctrl_put(ctrl);

}

static void
nvme_fc_complete_rq(struct request *rq)
{
	struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
	struct nvme_fc_ctrl *ctrl = op->ctrl;
	unsigned long flags;
	bool completed = false;

	/*
	 * the core layer, on controller resets after calling
	 * nvme_shutdown_ctrl(), calls complete_rq without our
	 * calling blk_mq_complete_request(), thus there may still
	 * be live i/o outstanding with the LLDD. Means transport has
	 * to track complete calls vs fcpio_done calls to know what
	 * path to take on completes and dones.
	 */
	spin_lock_irqsave(&ctrl->lock, flags);
	if (op->flags & FCOP_FLAGS_COMPLETE)
		completed = true;
	else
		op->flags |= FCOP_FLAGS_RELEASED;
	spin_unlock_irqrestore(&ctrl->lock, flags);

	if (completed)
		__nvme_fc_final_op_cleanup(rq);
}

/*