Commit 609be106 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: pass a block_device and opf to bio_alloc_bioset



Pass the block_device and operation that we plan to use this bio for to
bio_alloc_bioset to optimize the assigment.  NULL/0 can be passed, both
for the passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.

Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-16-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 0a3140ea
Loading
Loading
Loading
Loading
+18 −12
Original line number Diff line number Diff line
@@ -417,8 +417,10 @@ static void punt_bios_to_rescuer(struct bio_set *bs)

/**
 * bio_alloc_bioset - allocate a bio for I/O
 * @bdev:	block device to allocate the bio for (can be %NULL)
 * @nr_vecs:	number of bvecs to pre-allocate
 * @opf:	operation and flags for bio
 * @gfp_mask:   the GFP_* mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 * @bs:		the bio_set to allocate from.
 *
 * Allocate a bio from the mempools in @bs.
@@ -447,15 +449,16 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
 *
 * Returns: Pointer to new bio on success, NULL on failure.
 */
struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
			     unsigned int opf, gfp_t gfp_mask,
			     struct bio_set *bs)
{
	gfp_t saved_gfp = gfp_mask;
	struct bio *bio;
	void *p;

	/* should not use nobvec bioset for nr_iovecs > 0 */
	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0))
	/* should not use nobvec bioset for nr_vecs > 0 */
	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
		return NULL;

	/*
@@ -492,26 +495,29 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs,
		return NULL;

	bio = p + bs->front_pad;
	if (nr_iovecs > BIO_INLINE_VECS) {
	if (nr_vecs > BIO_INLINE_VECS) {
		struct bio_vec *bvl = NULL;

		bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
		if (!bvl && gfp_mask != saved_gfp) {
			punt_bios_to_rescuer(bs);
			gfp_mask = saved_gfp;
			bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask);
			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
		}
		if (unlikely(!bvl))
			goto err_free;

		bio_init(bio, bvl, nr_iovecs);
	} else if (nr_iovecs) {
		bio_init(bio, bvl, nr_vecs);
	} else if (nr_vecs) {
		bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
	} else {
		bio_init(bio, NULL, 0);
	}

	bio->bi_pool = bs;
	if (bdev)
		bio_set_dev(bio, bdev);
	bio->bi_opf = opf;
	return bio;

err_free:
@@ -767,7 +773,7 @@ struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
{
	struct bio *b;

	b = bio_alloc_bioset(gfp_mask, 0, bs);
	b = bio_alloc_bioset(NULL, 0, 0, gfp_mask, bs);
	if (!b)
		return NULL;

@@ -1743,7 +1749,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
	struct bio *bio;

	if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS)
		return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
		return bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);

	cache = per_cpu_ptr(bs->cache, get_cpu());
	if (cache->free_list) {
@@ -1757,7 +1763,7 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
		return bio;
	}
	put_cpu();
	bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
	bio = bio_alloc_bioset(NULL, nr_vecs, 0, GFP_KERNEL, bs);
	bio_set_flag(bio, BIO_PERCPU_CACHE);
	return bio;
}
+2 −4
Original line number Diff line number Diff line
@@ -165,12 +165,10 @@ static struct bio *bounce_clone_bio(struct bio *bio_src)
	 *    asking for trouble and would force extra work on
	 *    __bio_clone_fast() anyways.
	 */
	bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src),
			       &bounce_bio_set);
	bio->bi_bdev		= bio_src->bi_bdev;
	bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
			       bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);
	if (bio_flagged(bio_src, BIO_REMAPPED))
		bio_set_flag(bio, BIO_REMAPPED);
	bio->bi_opf		= bio_src->bi_opf;
	bio->bi_ioprio		= bio_src->bi_ioprio;
	bio->bi_write_hint	= bio_src->bi_write_hint;
	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
+2 −3
Original line number Diff line number Diff line
@@ -138,15 +138,14 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
		op_flags |= REQ_FUA | REQ_PREFLUSH;
	op_flags |= REQ_SYNC;

	bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
	bio_set_dev(bio, bdev->md_bdev);
	bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO,
			       &drbd_md_io_bio_set);
	bio->bi_iter.bi_sector = sector;
	err = -EIO;
	if (bio_add_page(bio, device->md_io.page, size, 0) != size)
		goto out;
	bio->bi_private = device;
	bio->bi_end_io = drbd_md_endio;
	bio_set_op_attrs(bio, op, op_flags);

	if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL)
		/* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */
+3 −4
Original line number Diff line number Diff line
@@ -976,12 +976,13 @@ static void drbd_bm_endio(struct bio *bio)

static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_hold(local)
{
	struct bio *bio = bio_alloc_bioset(GFP_NOIO, 1, &drbd_md_io_bio_set);
	struct drbd_device *device = ctx->device;
	unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;
	struct bio *bio = bio_alloc_bioset(device->ldev->md_bdev, 1, op,
					   GFP_NOIO, &drbd_md_io_bio_set);
	struct drbd_bitmap *b = device->bitmap;
	struct page *page;
	unsigned int len;
	unsigned int op = (ctx->flags & BM_AIO_READ) ? REQ_OP_READ : REQ_OP_WRITE;

	sector_t on_disk_sector =
		device->ldev->md.md_offset + device->ldev->md.bm_offset;
@@ -1006,14 +1007,12 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
		bm_store_page_idx(page, page_nr);
	} else
		page = b->bm_pages[page_nr];
	bio_set_dev(bio, device->ldev->md_bdev);
	bio->bi_iter.bi_sector = on_disk_sector;
	/* bio_add_page of a single page to an empty bio will always succeed,
	 * according to api.  Do we want to assert that? */
	bio_add_page(bio, page, len, 0);
	bio->bi_private = ctx;
	bio->bi_end_io = drbd_bm_endio;
	bio_set_op_attrs(bio, op, 0);

	if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
		bio_io_error(bio);
+5 −7
Original line number Diff line number Diff line
@@ -913,14 +913,13 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
	/* btree_search_recurse()'s btree iterator is no good anymore */
	ret = miss == bio ? MAP_DONE : -EINTR;

	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
	cache_bio = bio_alloc_bioset(miss->bi_bdev,
			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
			&dc->disk.bio_split);
			0, GFP_NOWAIT, &dc->disk.bio_split);
	if (!cache_bio)
		goto out_submit;

	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
	bio_copy_dev(cache_bio, miss);
	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;

	cache_bio->bi_end_io	= backing_request_endio;
@@ -1025,16 +1024,15 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
			 */
			struct bio *flush;

			flush = bio_alloc_bioset(GFP_NOIO, 0,
						 &dc->disk.bio_split);
			flush = bio_alloc_bioset(bio->bi_bdev, 0,
						 REQ_OP_WRITE | REQ_PREFLUSH,
						 GFP_NOIO, &dc->disk.bio_split);
			if (!flush) {
				s->iop.status = BLK_STS_RESOURCE;
				goto insert_data;
			}
			bio_copy_dev(flush, bio);
			flush->bi_end_io = backing_request_endio;
			flush->bi_private = cl;
			flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
			/* I/O request sent to backing device */
			closure_bio_submit(s->iop.c, flush, cl);
		}
Loading