Commit abd45c15 authored by Jens Axboe's avatar Jens Axboe
Browse files

block: handle fast path of bio splitting inline



The fast path is no splitting needed. Separate the handling into a
check part we can inline, and an out-of-line handling path if we do
need to split.

Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 09ce8744
Loading
Loading
Loading
Loading
+6 −18
Original line number Diff line number Diff line
@@ -324,6 +324,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,

/**
 * __blk_queue_split - split a bio and submit the second half
 * @q:       [in] request_queue new bio is being queued at
 * @bio:     [in, out] bio to be split
 * @nr_segs: [out] number of segments in the first bio
 *
@@ -334,9 +335,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
 * of the caller to ensure that q->bio_split is only released after processing
 * of the split bio has finished.
 */
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
void __blk_queue_split(struct request_queue *q, struct bio **bio,
		       unsigned int *nr_segs)
{
	struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
	struct bio *split = NULL;

	switch (bio_op(*bio)) {
@@ -353,21 +354,6 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
				nr_segs);
		break;
	default:
		/*
		 * All drivers must accept single-segments bios that are <=
		 * PAGE_SIZE.  This is a quick and dirty check that relies on
		 * the fact that bi_io_vec[0] is always valid if a bio has data.
		 * The check might lead to occasional false negatives when bios
		 * are cloned, but compared to the performance impact of cloned
		 * bios themselves the loop below doesn't matter anyway.
		 */
		if (!q->limits.chunk_sectors &&
		    (*bio)->bi_vcnt == 1 &&
		    ((*bio)->bi_io_vec[0].bv_len +
		     (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
			*nr_segs = 1;
			break;
		}
		split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
		break;
	}
@@ -397,9 +383,11 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
 */
void blk_queue_split(struct bio **bio)
{
	struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
	unsigned int nr_segs;

	__blk_queue_split(bio, &nr_segs);
	if (blk_may_split(q, *bio))
		__blk_queue_split(q, bio, &nr_segs);
}
EXPORT_SYMBOL(blk_queue_split);

+3 −2
Original line number Diff line number Diff line
@@ -2259,11 +2259,12 @@ void blk_mq_submit_bio(struct bio *bio)
	struct request *rq;
	struct blk_plug *plug;
	struct request *same_queue_rq = NULL;
	unsigned int nr_segs;
	unsigned int nr_segs = 1;
	blk_status_t ret;

	blk_queue_bounce(q, &bio);
	__blk_queue_split(&bio, &nr_segs);
	if (blk_may_split(q, bio))
		__blk_queue_split(q, &bio, &nr_segs);

	if (!bio_integrity_prep(bio))
		goto queue_exit;
+26 −1
Original line number Diff line number Diff line
@@ -266,7 +266,32 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *,
				const char *, size_t);

void __blk_queue_split(struct bio **bio, unsigned int *nr_segs);
static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
{
	switch (bio_op(bio)) {
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
	case REQ_OP_WRITE_ZEROES:
	case REQ_OP_WRITE_SAME:
		return true; /* non-trivial splitting decisions */
	default:
		break;
	}

	/*
	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
	 * This is a quick and dirty check that relies on the fact that
	 * bi_io_vec[0] is always valid if a bio has data.  The check might
	 * lead to occasional false negatives when bios are cloned, but compared
	 * to the performance impact of cloned bios themselves the loop below
	 * doesn't matter anyway.
	 */
	return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}

void __blk_queue_split(struct request_queue *q, struct bio **bio,
			unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
		unsigned int nr_segs);
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,