Commit 4b6a5d9c authored by Jens Axboe's avatar Jens Axboe
Browse files

block: enable batched allocation for blk_mq_alloc_request()



The filesystem IO path can take advantage of allocating batches of
requests, if the underlying submitter tells the block layer about it
through the blk_plug. For passthrough IO, the exported API is the
blk_mq_alloc_request() helper, and that one does not allow for
request caching.

Wire up request caching for blk_mq_alloc_request(), which is generally
done without having a bio available upfront.

Tested-by: default avatarAnuj Gupta <anuj20.g@samsung.com>
Reviewed-by: default avatarKeith Busch <kbusch@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e73a625b
Loading
Loading
Loading
Loading
+71 −9
Original line number Diff line number Diff line
@@ -510,16 +510,77 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
					alloc_time_ns);
}

static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
					    struct blk_plug *plug,
					    blk_opf_t opf,
					    blk_mq_req_flags_t flags)
{
	struct blk_mq_alloc_data data = {
		.q		= q,
		.flags		= flags,
		.cmd_flags	= opf,
		.nr_tags	= plug->nr_ios,
		.cached_rq	= &plug->cached_rq,
	};
	struct request *rq;

	if (blk_queue_enter(q, flags))
		return NULL;

	plug->nr_ios = 1;

	rq = __blk_mq_alloc_requests(&data);
	if (unlikely(!rq))
		blk_queue_exit(q);
	return rq;
}

static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
						   blk_opf_t opf,
						   blk_mq_req_flags_t flags)
{
	struct blk_plug *plug = current->plug;
	struct request *rq;

	if (!plug)
		return NULL;
	if (rq_list_empty(plug->cached_rq)) {
		if (plug->nr_ios == 1)
			return NULL;
		rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
		if (rq)
			goto got_it;
		return NULL;
	}
	rq = rq_list_peek(&plug->cached_rq);
	if (!rq || rq->q != q)
		return NULL;

	if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
		return NULL;
	if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
		return NULL;

	plug->cached_rq = rq_list_next(rq);
got_it:
	rq->cmd_flags = opf;
	INIT_LIST_HEAD(&rq->queuelist);
	return rq;
}

struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
		blk_mq_req_flags_t flags)
{
	struct request *rq;

	rq = blk_mq_alloc_cached_request(q, opf, flags);
	if (!rq) {
		struct blk_mq_alloc_data data = {
			.q		= q,
			.flags		= flags,
			.cmd_flags	= opf,
			.nr_tags	= 1,
		};
	struct request *rq;
		int ret;

		ret = blk_queue_enter(q, flags);
@@ -529,6 +590,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
		rq = __blk_mq_alloc_requests(&data);
		if (!rq)
			goto out_queue_exit;
	}
	rq->__data_len = 0;
	rq->__sector = (sector_t) -1;
	rq->bio = rq->biotail = NULL;