Commit d92ca9d8 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: don't handle non-flush requests in blk_insert_flush



Return to the normal blk_mq_submit_bio flow if the bio did not end up
actually being a flush because the device didn't support it.  Note that
this is basically impossible to hit without special instrumentation given
that submit_bio_checks already clears these flags usually, so we'd need a
tight race to actually hit this code path.

With this the call to blk_mq_run_hw_queue for the flush requests can be
removed given that the actual flush requests are always issued via the
requeue workqueue which runs the queue unconditionally.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211019122553.2467817-1-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent dc5fc361
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 * @rq is being submitted.  Analyze what needs to be done and put it on the
 * right queue.
 */
void blk_insert_flush(struct request *rq)
bool blk_insert_flush(struct request *rq)
{
	struct request_queue *q = rq->q;
	unsigned long fflags = q->queue_flags;	/* may change, cache */
@@ -409,7 +409,7 @@ void blk_insert_flush(struct request *rq)
	 */
	if (!policy) {
		blk_mq_end_request(rq, 0);
		return;
		return true;
	}

	BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
@@ -420,10 +420,8 @@ void blk_insert_flush(struct request *rq)
	 * for normal execution.
	 */
	if ((policy & REQ_FSEQ_DATA) &&
	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
		blk_mq_request_bypass_insert(rq, false, false);
		return;
	}
	    !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
		return false;

	/*
	 * @rq should go through flush machinery.  Mark it part of flush
@@ -439,6 +437,8 @@ void blk_insert_flush(struct request *rq)
	spin_lock_irq(&fq->mq_flush_lock);
	blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
	spin_unlock_irq(&fq->mq_flush_lock);

	return true;
}

/**
+6 −8
Original line number Diff line number Diff line
@@ -2532,12 +2532,10 @@ void blk_mq_submit_bio(struct bio *bio)
		return;
	}

	if (unlikely(is_flush_fua)) {
		struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
		/* Bypass scheduler for flush requests */
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(hctx, true);
	} else if (plug && (q->nr_hw_queues == 1 ||
	if (is_flush_fua && blk_insert_flush(rq))
		return;

	if (plug && (q->nr_hw_queues == 1 ||
	    blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
	    q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
		/*
+1 −1
Original line number Diff line number Diff line
@@ -236,7 +236,7 @@ void __blk_account_io_done(struct request *req, u64 now);
 */
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)

void blk_insert_flush(struct request *rq);
bool blk_insert_flush(struct request *rq);

int elevator_switch_mq(struct request_queue *q,
			      struct elevator_type *new_e);