Commit e1f44ac0 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: fold __blk_mq_try_issue_directly into its two callers



Due to the wildly different behavior based on the bypass_insert argument,
not a whole lot of code in __blk_mq_try_issue_directly is actually shared
between blk_mq_try_issue_directly and blk_mq_request_issue_directly.

Remove __blk_mq_try_issue_directly and fold the code into the two callers
instead.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230413064057.707578-14-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 2b71b877
Loading
Loading
Loading
Loading
+31 −41
Original line number Diff line number Diff line
@@ -2639,42 +2639,6 @@ static bool blk_mq_get_budget_and_tag(struct request *rq)
	return true;
}

static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
						struct request *rq,
						bool bypass_insert, bool last)
{
	struct request_queue *q = rq->q;
	bool run_queue = true;

	/*
	 * RCU or SRCU read lock is needed before checking quiesced flag.
	 *
	 * When queue is stopped or quiesced, ignore 'bypass_insert' from
	 * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
	 * and avoid driver to try to dispatch again.
	 */
	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
		run_queue = false;
		bypass_insert = false;
		goto insert;
	}

	if ((rq->rq_flags & RQF_ELV) && !bypass_insert)
		goto insert;

	if (!blk_mq_get_budget_and_tag(rq))
		goto insert;

	return __blk_mq_issue_directly(hctx, rq, last);
insert:
	if (bypass_insert)
		return BLK_STS_RESOURCE;

	blk_mq_insert_request(rq, false, run_queue, false);

	return BLK_STS_OK;
}

/**
 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
 * @hctx: Pointer of the associated hardware queue.
@@ -2688,18 +2652,44 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
		struct request *rq)
{
	blk_status_t ret =
		__blk_mq_try_issue_directly(hctx, rq, false, true);
	blk_status_t ret;

	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
		blk_mq_insert_request(rq, false, false, false);
		return;
	}

	if ((rq->rq_flags & RQF_ELV) || !blk_mq_get_budget_and_tag(rq)) {
		blk_mq_insert_request(rq, false, true, false);
		return;
	}

	if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
	ret = __blk_mq_issue_directly(hctx, rq, true);
	switch (ret) {
	case BLK_STS_OK:
		break;
	case BLK_STS_RESOURCE:
	case BLK_STS_DEV_RESOURCE:
		blk_mq_request_bypass_insert(rq, false, true);
	else if (ret != BLK_STS_OK)
		break;
	default:
		blk_mq_end_request(rq, ret);
		break;
	}
}

static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
{
	return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

	if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(rq->q)) {
		blk_mq_insert_request(rq, false, false, false);
		return BLK_STS_OK;
	}

	if (!blk_mq_get_budget_and_tag(rq))
		return BLK_STS_RESOURCE;
	return __blk_mq_issue_directly(hctx, rq, last);
}

static void blk_mq_plug_issue_direct(struct blk_plug *plug)