Commit 360f2648 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: defer to the normal submission path for non-flush flush commands



If blk_insert_flush decides that a command does not need to use the
flush state machine, return false and let blk_mq_submit_bio handle
it the normal way (including using an I/O scheduler) instead of doing
a bypass insert.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230519044050.107790-4-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c1075e54
Loading
Loading
Loading
Loading
+8 −14
Original line number Diff line number Diff line
@@ -385,22 +385,17 @@ static void blk_rq_init_flush(struct request *rq)
	rq->end_io = mq_flush_data_end_io;
}

/**
 * blk_insert_flush - insert a new PREFLUSH/FUA request
 * @rq: request to insert
 *
 * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions.
 * or __blk_mq_run_hw_queue() to dispatch request.
 * @rq is being submitted.  Analyze what needs to be done and put it on the
 * right queue.
/*
 * Insert a PREFLUSH/FUA request into the flush state machine.
 * Returns true if the request has been consumed by the flush state machine,
 * or false if the caller should continue to process it.
 */
void blk_insert_flush(struct request *rq)
bool blk_insert_flush(struct request *rq)
{
	struct request_queue *q = rq->q;
	unsigned long fflags = q->queue_flags;	/* may change, cache */
	unsigned int policy = blk_flush_policy(fflags, rq);
	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

	/* FLUSH/FUA request must never be merged */
	WARN_ON_ONCE(rq->bio != rq->biotail);
@@ -429,16 +424,14 @@ void blk_insert_flush(struct request *rq)
		 * complete the request.
		 */
		blk_mq_end_request(rq, 0);
		return;
		return true;
	case REQ_FSEQ_DATA:
		/*
		 * If there's data, but no flush is necessary, the request can
		 * be processed directly without going through flush machinery.
		 * Queue for normal execution.
		 */
		blk_mq_request_bypass_insert(rq, 0);
		blk_mq_run_hw_queue(hctx, false);
		return;
		return false;
	default:
		/*
		 * Mark the request as part of a flush sequence and submit it
@@ -448,6 +441,7 @@ void blk_insert_flush(struct request *rq)
		spin_lock_irq(&fq->mq_flush_lock);
		blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
		spin_unlock_irq(&fq->mq_flush_lock);
		return true;
	}
}

+4 −4
Original line number Diff line number Diff line
@@ -45,6 +45,8 @@
static DEFINE_PER_CPU(struct llist_head, blk_cpu_done);

static void blk_mq_insert_request(struct request *rq, blk_insert_t flags);
static void blk_mq_request_bypass_insert(struct request *rq,
		blk_insert_t flags);
static void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
		struct list_head *list);

@@ -2430,7 +2432,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
static void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags)
{
	struct blk_mq_hw_ctx *hctx = rq->mq_hctx;

@@ -2977,10 +2979,8 @@ void blk_mq_submit_bio(struct bio *bio)
		return;
	}

	if (op_is_flush(bio->bi_opf)) {
		blk_insert_flush(rq);
	if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
		return;
	}

	if (plug) {
		blk_add_rq_to_plug(plug, rq);
+0 −4
Original line number Diff line number Diff line
@@ -64,10 +64,6 @@ struct blk_mq_tags *blk_mq_alloc_map_and_rqs(struct blk_mq_tag_set *set,
void blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,
			     struct blk_mq_tags *tags,
			     unsigned int hctx_idx);
/*
 * Internal helpers for request insertion into sw queues
 */
void blk_mq_request_bypass_insert(struct request *rq, blk_insert_t flags);

/*
 * CPU -> queue mappings
+1 −1
Original line number Diff line number Diff line
@@ -269,7 +269,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
 */
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)

void blk_insert_flush(struct request *rq);
bool blk_insert_flush(struct request *rq);

int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
void elevator_disable(struct request_queue *q);