Commit 0c5bcc92 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

blk-mq: simplify the plug handling in blk_mq_submit_bio



blk_mq_submit_bio has two different plug cases, one that uses full
plugging and a limited plugging one.

The limited plugging case is only used for a corner case that does
not matter in real life:

 - no ->commit_rqs (so not NVMe)
 - no shared tags (so not SCSI)
 - not rotational (so no old disk or floppy driver)
 - must have multiple queues (so no eMMC)

Remove the limited merging case and all the related junk to simplify
blk_mq_submit_bio and the functions called from it.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211123160443.1315598-2-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a4561f9f
Loading
Loading
Loading
Loading
+1 −8
Original line number Diff line number Diff line
@@ -1067,7 +1067,6 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
 * @q: request_queue new bio is being queued at
 * @bio: new bio being queued
 * @nr_segs: number of segments in @bio
 * @same_queue_rq: output value, will be true if there's an existing request
 * from the passed in @q already in the plug list
 *
 * Determine whether @bio being queued on @q can be merged with the previous
@@ -1084,7 +1083,7 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
 * Caller must ensure !blk_queue_nomerges(q) beforehand.
 */
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs, bool *same_queue_rq)
		unsigned int nr_segs)
{
	struct blk_plug *plug;
	struct request *rq;
@@ -1096,12 +1095,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
	/* check the previously added entry for a quick merge attempt */
	rq = rq_list_peek(&plug->mq_list);
	if (rq->q == q) {
		/*
		 * Only blk-mq multiple hardware queues case checks the rq in
		 * the same queue, there should be only one such rq in a queue
		 */
		*same_queue_rq = true;

		if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
				BIO_MERGE_OK)
			return true;
+13 −55
Original line number Diff line number Diff line
@@ -2690,11 +2690,10 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
}

static bool blk_mq_attempt_bio_merge(struct request_queue *q,
				     struct bio *bio, unsigned int nr_segs,
				     bool *same_queue_rq)
				     struct bio *bio, unsigned int nr_segs)
{
	if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
		if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
		if (blk_attempt_plug_merge(q, bio, nr_segs))
			return true;
		if (blk_mq_sched_bio_merge(q, bio, nr_segs))
			return true;
@@ -2705,8 +2704,7 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
					       struct blk_plug *plug,
					       struct bio *bio,
					       unsigned int nsegs,
					       bool *same_queue_rq)
					       unsigned int nsegs)
{
	struct blk_mq_alloc_data data = {
		.q		= q,
@@ -2715,7 +2713,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
	};
	struct request *rq;

	if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
	if (blk_mq_attempt_bio_merge(q, bio, nsegs))
		return NULL;

	rq_qos_throttle(q, bio);
@@ -2751,8 +2749,7 @@ static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
static inline struct request *blk_mq_get_request(struct request_queue *q,
						 struct blk_plug *plug,
						 struct bio *bio,
						 unsigned int nsegs,
						 bool *same_queue_rq)
						 unsigned int nsegs)
{
	struct request *rq;
	bool checked = false;
@@ -2762,8 +2759,7 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
		if (rq && rq->q == q) {
			if (unlikely(!submit_bio_checks(bio)))
				return NULL;
			if (blk_mq_attempt_bio_merge(q, bio, nsegs,
						same_queue_rq))
			if (blk_mq_attempt_bio_merge(q, bio, nsegs))
				return NULL;
			checked = true;
			if (!blk_mq_can_use_cached_rq(rq, bio))
@@ -2781,7 +2777,7 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
		return NULL;
	if (unlikely(!checked && !submit_bio_checks(bio)))
		goto out_put;
	rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
	rq = blk_mq_get_new_requests(q, plug, bio, nsegs);
	if (rq)
		return rq;
out_put:
@@ -2808,7 +2804,6 @@ void blk_mq_submit_bio(struct bio *bio)
	const int is_sync = op_is_sync(bio->bi_opf);
	struct request *rq;
	struct blk_plug *plug;
	bool same_queue_rq = false;
	unsigned int nr_segs = 1;
	blk_status_t ret;

@@ -2823,7 +2818,7 @@ void blk_mq_submit_bio(struct bio *bio)
		return;

	plug = blk_mq_plug(q, bio);
	rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
	rq = blk_mq_get_request(q, plug, bio, nr_segs);
	if (unlikely(!rq))
		return;

@@ -2846,16 +2841,7 @@ void blk_mq_submit_bio(struct bio *bio)
		return;
	}

	if (plug && (q->nr_hw_queues == 1 ||
	    blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
	    q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
		/*
		 * Use plugging if we have a ->commit_rqs() hook as well, as
		 * we know the driver uses bd->last in a smart fashion.
		 *
		 * Use normal plugging if this disk is slow HDD, as sequential
		 * IO may benefit a lot from plug merging.
		 */
	if (plug) {
		unsigned int request_count = plug->rq_count;
		struct request *last = NULL;

@@ -2873,40 +2859,12 @@ void blk_mq_submit_bio(struct bio *bio)
		}

		blk_add_rq_to_plug(plug, rq);
	} else if (rq->rq_flags & RQF_ELV) {
		/* Insert the request at the IO scheduler queue */
	} else if ((rq->rq_flags & RQF_ELV) ||
		   (rq->mq_hctx->dispatch_busy &&
		    (q->nr_hw_queues == 1 || !is_sync))) {
		blk_mq_sched_insert_request(rq, false, true, true);
	} else if (plug && !blk_queue_nomerges(q)) {
		struct request *next_rq = NULL;

		/*
		 * We do limited plugging. If the bio can be merged, do that.
		 * Otherwise the existing request in the plug list will be
		 * issued. So the plug list will have one request at most
		 * The plug list might get flushed before this. If that happens,
		 * the plug list is empty, and same_queue_rq is invalid.
		 */
		if (same_queue_rq) {
			next_rq = rq_list_pop(&plug->mq_list);
			plug->rq_count--;
		}
		blk_add_rq_to_plug(plug, rq);
		trace_block_plug(q);

		if (next_rq) {
			trace_block_unplug(q, 1, true);
			blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
		}
	} else if ((q->nr_hw_queues > 1 && is_sync) ||
		   !rq->mq_hctx->dispatch_busy) {
		/*
		 * There is no scheduler and we can try to send directly
		 * to the hardware.
		 */
		blk_mq_try_issue_directly(rq->mq_hctx, rq);
	} else {
		/* Default case. */
		blk_mq_sched_insert_request(rq, false, true, true);
		blk_mq_try_issue_directly(rq->mq_hctx, rq);
	}
}

+1 −1
Original line number Diff line number Diff line
@@ -253,7 +253,7 @@ void blk_add_timer(struct request *req);
const char *blk_status_to_str(blk_status_t status);

bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
		unsigned int nr_segs, bool *same_queue_rq);
		unsigned int nr_segs);
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
			struct bio *bio, unsigned int nr_segs);