Commit 5b13bc8a authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: cleanup request allocation



Refactor the request alloction so that blk_mq_get_cached_request tries
to find a cached request first, and the entirely separate and now
self contained blk_mq_get_new_requests allocates one or more requests
if that is not possible.

There is a small change in behavior as submit_bio_checks is called
twice now if a cached request is present but can't be used, but that
is a small price to pay for unwinding this code.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211124062856.1444266-1-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 82d981d4
Loading
Loading
Loading
Loading
+38 −52
Original line number Diff line number Diff line
@@ -2717,8 +2717,12 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
	};
	struct request *rq;

	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
	if (unlikely(bio_queue_enter(bio)))
		return NULL;
	if (unlikely(!submit_bio_checks(bio)))
		goto queue_exit;
	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
		goto queue_exit;

	rq_qos_throttle(q, bio);

@@ -2729,65 +2733,45 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
	}

	rq = __blk_mq_alloc_requests(&data);
	if (rq)
	if (!rq)
		goto fail;
	return rq;

fail:
	rq_qos_cleanup(q, bio);
	if (bio->bi_opf & REQ_NOWAIT)
		bio_wouldblock_error(bio);

queue_exit:
	blk_queue_exit(q);
	return NULL;
}

static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
{
	if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
		return false;

	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
		return false;

	return true;
}

static inline struct request *blk_mq_get_request(struct request_queue *q,
						 struct blk_plug *plug,
						 struct bio *bio,
						 unsigned int nsegs)
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
		struct blk_plug *plug, struct bio *bio, unsigned int nsegs)
{
	struct request *rq;
	bool checked = false;

	if (plug) {
	if (!plug)
		return NULL;
	rq = rq_list_peek(&plug->cached_rq);
		if (rq && rq->q == q) {
	if (!rq || rq->q != q)
		return NULL;

	if (unlikely(!submit_bio_checks(bio)))
		return NULL;
	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
		return NULL;
			checked = true;
			if (!blk_mq_can_use_cached_rq(rq, bio))
				goto fallback;
	if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
		return NULL;
	if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
		return NULL;

	rq->cmd_flags = bio->bi_opf;
	plug->cached_rq = rq_list_next(rq);
	INIT_LIST_HEAD(&rq->queuelist);
	rq_qos_throttle(q, bio);
	return rq;
}
	}

fallback:
	if (unlikely(bio_queue_enter(bio)))
		return NULL;
	if (unlikely(!checked && !submit_bio_checks(bio)))
		goto out_put;
	rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
	if (rq)
		return rq;
out_put:
	blk_queue_exit(q);
	return NULL;
}

/**
 * blk_mq_submit_bio - Create and send a request to block device.
@@ -2805,9 +2789,9 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
void blk_mq_submit_bio(struct bio *bio)
{
	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
	struct blk_plug *plug = blk_mq_plug(q, bio);
	const int is_sync = op_is_sync(bio->bi_opf);
	struct request *rq;
	struct blk_plug *plug;
	unsigned int nr_segs = 1;
	blk_status_t ret;

@@ -2821,10 +2805,12 @@ void blk_mq_submit_bio(struct bio *bio)
	if (!bio_integrity_prep(bio))
		return;

	plug = blk_mq_plug(q, bio);
	rq = blk_mq_get_request(q, plug, bio, nr_segs);
	rq = blk_mq_get_cached_request(q, plug, bio, nr_segs);
	if (!rq) {
		rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
		if (unlikely(!rq))
			return;
	}

	trace_block_getrq(bio);