Commit 5c17f45e authored by Chengming Zhou's avatar Chengming Zhou Committed by Jens Axboe
Browse files

blk-mq: fix start_time_ns and alloc_time_ns for pre-allocated rq



The iocost rely on rq start_time_ns and alloc_time_ns to tell saturation
state of the block device. Most of the time request is allocated after
rq_qos_throttle() and its alloc_time_ns or start_time_ns won't be affected.

But for plug batched allocation introduced by the commit 47c122e3
("block: pre-allocate requests if plug is started and is a batch"), we can
rq_qos_throttle() after the allocation of the request. This is what the
blk_mq_get_cached_request() does.

In this case, the cached request alloc_time_ns or start_time_ns is much
ahead if blocked in any qos ->throttle().

Fix it by setting alloc_time_ns and start_time_ns to now when the allocated
request is actually used.

Signed-off-by: default avatarChengming Zhou <zhouchengming@bytedance.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20230710105516.2053478-1-chengming.zhou@linux.dev


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f673b4f5
Loading
Loading
Loading
Loading
+30 −17
Original line number Diff line number Diff line
@@ -328,8 +328,24 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
}
EXPORT_SYMBOL(blk_rq_init);

/* Set start and alloc time when the allocated request is actually used */
static inline void blk_mq_rq_time_init(struct request *rq, u64 alloc_time_ns)
{
	if (blk_mq_need_time_stamp(rq))
		rq->start_time_ns = ktime_get_ns();
	else
		rq->start_time_ns = 0;

#ifdef CONFIG_BLK_RQ_ALLOC_TIME
	if (blk_queue_rq_alloc_time(rq->q))
		rq->alloc_time_ns = alloc_time_ns ?: rq->start_time_ns;
	else
		rq->alloc_time_ns = 0;
#endif
}

static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
		struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
		struct blk_mq_tags *tags, unsigned int tag)
{
	struct blk_mq_ctx *ctx = data->ctx;
	struct blk_mq_hw_ctx *hctx = data->hctx;
@@ -356,14 +372,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
	}
	rq->timeout = 0;

	if (blk_mq_need_time_stamp(rq))
		rq->start_time_ns = ktime_get_ns();
	else
		rq->start_time_ns = 0;
	rq->part = NULL;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
	rq->alloc_time_ns = alloc_time_ns;
#endif
	rq->io_start_time_ns = 0;
	rq->stats_sectors = 0;
	rq->nr_phys_segments = 0;
@@ -393,8 +402,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
}

static inline struct request *
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
		u64 alloc_time_ns)
__blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
{
	unsigned int tag, tag_offset;
	struct blk_mq_tags *tags;
@@ -413,7 +421,7 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data,
		tag = tag_offset + i;
		prefetch(tags->static_rqs[tag]);
		tag_mask &= ~(1UL << i);
		rq = blk_mq_rq_ctx_init(data, tags, tag, alloc_time_ns);
		rq = blk_mq_rq_ctx_init(data, tags, tag);
		rq_list_add(data->cached_rq, rq);
		nr++;
	}
@@ -474,9 +482,11 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
	 * Try batched alloc if we want more than 1 tag.
	 */
	if (data->nr_tags > 1) {
		rq = __blk_mq_alloc_requests_batch(data, alloc_time_ns);
		if (rq)
		rq = __blk_mq_alloc_requests_batch(data);
		if (rq) {
			blk_mq_rq_time_init(rq, alloc_time_ns);
			return rq;
		}
		data->nr_tags = 1;
	}

@@ -499,8 +509,9 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
		goto retry;
	}

	return blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag,
					alloc_time_ns);
	rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
	blk_mq_rq_time_init(rq, alloc_time_ns);
	return rq;
}

static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
@@ -555,6 +566,7 @@ static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
			return NULL;

		plug->cached_rq = rq_list_next(rq);
		blk_mq_rq_time_init(rq, 0);
	}

	rq->cmd_flags = opf;
@@ -656,8 +668,8 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
	tag = blk_mq_get_tag(&data);
	if (tag == BLK_MQ_NO_TAG)
		goto out_queue_exit;
	rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
					alloc_time_ns);
	rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
	blk_mq_rq_time_init(rq, alloc_time_ns);
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;
@@ -2896,6 +2908,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
	plug->cached_rq = rq_list_next(rq);
	rq_qos_throttle(q, *bio);

	blk_mq_rq_time_init(rq, 0);
	rq->cmd_flags = (*bio)->bi_opf;
	INIT_LIST_HEAD(&rq->queuelist);
	return rq;