Commit be4c4278 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

blk-mq: use the I/O scheduler for writes from the flush state machine



Send write requests issued by the flush state machine through the normal
I/O submission path including the I/O scheduler (if present) so that I/O
scheduler policies are applied to writes with the FUA flag set.

Separate the I/O scheduler members from the flush members in struct
request since now a request may pass through both an I/O scheduler
and the flush machinery.

Note that the actual flush requests, which have no bio attached to the
request still bypass the I/O schedulers.

Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
[hch: rebased]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230519044050.107790-5-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 360f2648
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -458,7 +458,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
		 * Flush/passthrough requests are special and go directly to the
		 * dispatch list.
		 */
		if (!op_is_flush(data->cmd_flags) &&
		if ((data->cmd_flags & REQ_OP_MASK) != REQ_OP_FLUSH &&
		    !blk_op_is_passthrough(data->cmd_flags)) {
			struct elevator_mq_ops *ops = &q->elevator->type->ops;

@@ -2497,7 +2497,7 @@ static void blk_mq_insert_request(struct request *rq, blk_insert_t flags)
		 * dispatch it given we prioritize requests in hctx->dispatch.
		 */
		blk_mq_request_bypass_insert(rq, flags);
	} else if (rq->rq_flags & RQF_FLUSH_SEQ) {
	} else if (req_op(rq) == REQ_OP_FLUSH) {
		/*
		 * Firstly normal IO request is inserted to scheduler queue or
		 * sw queue, meantime we add flush request to dispatch queue(
+11 −16
Original line number Diff line number Diff line
@@ -169,14 +169,10 @@ struct request {
		void *completion_data;
	};


	/*
	 * Three pointers are available for the IO schedulers, if they need
	 * more they have to dynamically allocate it.  Flush requests are
	 * never put on the IO scheduler. So let the flush fields share
	 * space with the elevator data.
	 * more they have to dynamically allocate it.
	 */
	union {
	struct {
		struct io_cq		*icq;
		void			*priv[2];
@@ -187,7 +183,6 @@ struct request {
		struct list_head	list;
		rq_end_io_fn		*saved_end_io;
	} flush;
	};

	union {
		struct __call_single_data csd;