Commit 0effb390 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

block: mq-deadline: Handle requeued requests correctly



Start dispatching from the start of a zone instead of from the starting
position of the most recently dispatched request.

If a zoned write is requeued with an LBA that is lower than already
inserted zoned writes, make sure that it is submitted first.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Cc: Damien Le Moal <dlemoal@kernel.org>
Cc: Ming Lei <ming.lei@redhat.com>
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarDamien Le Moal <dlemoal@kernel.org>
Link: https://lore.kernel.org/r/20230517174230.897144-11-bvanassche@acm.org


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 83c46ed6
Loading
Loading
Loading
Loading
+32 −2
Original line number Diff line number Diff line
@@ -156,13 +156,28 @@ deadline_latter_request(struct request *rq)
	return NULL;
}

/* Return the first request for which blk_rq_pos() >= pos. */
/*
 * Return the first request for which blk_rq_pos() >= @pos. For zoned devices,
 * return the first request after the start of the zone containing @pos.
 */
static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
				enum dd_data_dir data_dir, sector_t pos)
{
	struct rb_node *node = per_prio->sort_list[data_dir].rb_node;
	struct request *rq, *res = NULL;

	if (!node)
		return NULL;

	rq = rb_entry_rq(node);
	/*
	 * A zoned write may have been requeued with a starting position that
	 * is below that of the most recently dispatched request. Hence, for
	 * zoned writes, start searching from the start of a zone.
	 */
	if (blk_rq_is_seq_zoned_write(rq))
		pos -= round_down(pos, rq->q->limits.chunk_sectors);

	while (node) {
		rq = rb_entry_rq(node);
		if (blk_rq_pos(rq) >= pos) {
@@ -806,6 +821,8 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
		list_add(&rq->queuelist, &per_prio->dispatch);
		rq->fifo_time = jiffies;
	} else {
		struct list_head *insert_before;

		deadline_add_rq_rb(per_prio, rq);

		if (rq_mergeable(rq)) {
@@ -818,7 +835,20 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
		 * set expire time and add to fifo list
		 */
		rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
		list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
		insert_before = &per_prio->fifo_list[data_dir];
#ifdef CONFIG_BLK_DEV_ZONED
		/*
		 * Insert zoned writes such that requests are sorted by
		 * position per zone.
		 */
		if (blk_rq_is_seq_zoned_write(rq)) {
			struct request *rq2 = deadline_latter_request(rq);

			if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
				insert_before = &rq2->queuelist;
		}
#endif
		list_add_tail(&rq->queuelist, insert_before);
	}
}