Commit c6699d6f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: factor out a "classic" poll helper



Factor the code to do the classic full metal polling out of blk_poll into
a separate blk_mq_poll_classic helper.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Tested-by: default avatarMark Wunderlich <mark.wunderlich@intel.com>
Link: https://lore.kernel.org/r/20211012111226.760968-7-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f70299f0
Loading
Loading
Loading
Loading
+56 −64
Original line number Diff line number Diff line
@@ -71,6 +71,14 @@ static inline struct blk_mq_hw_ctx *blk_qc_to_hctx(struct request_queue *q,
	return q->queue_hw_ctx[(qc & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT];
}

static inline struct request *blk_qc_to_rq(struct blk_mq_hw_ctx *hctx,
		blk_qc_t qc)
{
	if (blk_qc_t_is_internal(qc))
		return blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(qc));
	return blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(qc));
}

/*
 * Check if any of the ctx, dispatch list or elevator
 * have pending work in this hardware queue.
@@ -3975,15 +3983,20 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
	return ret;
}

static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
				     struct request *rq)
static bool blk_mq_poll_hybrid(struct request_queue *q, blk_qc_t qc)
{
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, qc);
	struct request *rq = blk_qc_to_rq(hctx, qc);
	struct hrtimer_sleeper hs;
	enum hrtimer_mode mode;
	unsigned int nsecs;
	ktime_t kt;

	if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
	/*
	 * If a request has completed on queue that uses an I/O scheduler, we
	 * won't get back a request from blk_qc_to_rq.
	 */
	if (!rq || (rq->rq_flags & RQF_MQ_POLL_SLEPT))
		return false;

	/*
@@ -4025,32 +4038,48 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,

	__set_current_state(TASK_RUNNING);
	destroy_hrtimer_on_stack(&hs.timer);

	/*
	 * If we sleep, have the caller restart the poll loop to reset the
	 * state.  Like for the other success return cases, the caller is
	 * responsible for checking if the IO completed.  If the IO isn't
	 * complete, we'll get called again and will go straight to the busy
	 * poll loop.
	 */
	return true;
}

static bool blk_mq_poll_hybrid(struct request_queue *q,
			       struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
static int blk_mq_poll_classic(struct request_queue *q, blk_qc_t cookie,
		bool spin)
{
	struct request *rq;
	struct blk_mq_hw_ctx *hctx = blk_qc_to_hctx(q, cookie);
	long state = get_current_state();
	int ret;

	if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
		return false;
	hctx->poll_considered++;

	if (!blk_qc_t_is_internal(cookie))
		rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
	else {
		rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
		/*
		 * With scheduling, if the request has completed, we'll
		 * get a NULL return here, as we clear the sched tag when
		 * that happens. The request still remains valid, like always,
		 * so we should be safe with just the NULL check.
		 */
		if (!rq)
			return false;
	do {
		hctx->poll_invoked++;

		ret = q->mq_ops->poll(hctx);
		if (ret > 0) {
			hctx->poll_success++;
			__set_current_state(TASK_RUNNING);
			return ret;
		}

	return blk_mq_poll_hybrid_sleep(q, rq);
		if (signal_pending_state(state, current))
			__set_current_state(TASK_RUNNING);
		if (task_is_running(current))
			return 1;

		if (ret < 0 || !spin)
			break;
		cpu_relax();
	} while (!need_resched());

	__set_current_state(TASK_RUNNING);
	return 0;
}

/**
@@ -4067,9 +4096,6 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
 */
int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
{
	struct blk_mq_hw_ctx *hctx;
	unsigned int state;

	if (!blk_qc_t_valid(cookie) ||
	    !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
		return 0;
@@ -4077,46 +4103,12 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
	if (current->plug)
		blk_flush_plug_list(current->plug, false);

	hctx = blk_qc_to_hctx(q, cookie);

	/*
	 * If we sleep, have the caller restart the poll loop to reset
	 * the state. Like for the other success return cases, the
	 * caller is responsible for checking if the IO completed. If
	 * the IO isn't complete, we'll get called again and will go
	 * straight to the busy poll loop. If specified not to spin,
	 * we also should not sleep.
	 */
	if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
	/* If specified not to spin, we also should not sleep. */
	if (spin && q->poll_nsec != BLK_MQ_POLL_CLASSIC) {
		if (blk_mq_poll_hybrid(q, cookie))
			return 1;

	hctx->poll_considered++;

	state = get_current_state();
	do {
		int ret;

		hctx->poll_invoked++;

		ret = q->mq_ops->poll(hctx);
		if (ret > 0) {
			hctx->poll_success++;
			__set_current_state(TASK_RUNNING);
			return ret;
	}

		if (signal_pending_state(state, current))
			__set_current_state(TASK_RUNNING);

		if (task_is_running(current))
			return 1;
		if (ret < 0 || !spin)
			break;
		cpu_relax();
	} while (!need_resched());

	__set_current_state(TASK_RUNNING);
	return 0;
	return blk_mq_poll_classic(q, cookie, spin);
}
EXPORT_SYMBOL_GPL(blk_poll);