Commit 066ff571 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: turn bio_kmalloc into a simple kmalloc wrapper



Remove the magic autofree semantics and require the callers to explicitly
call bio_init to initialize the bio.

This allows bio_free to catch accidental bio_put calls on bio_init()ed
bios as well.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarColy Li <colyli@suse.de>
Acked-by: default avatarMike Snitzer <snitzer@kernel.org>
Link: https://lore.kernel.org/r/20220406061228.410163-5-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 7655db80
Loading
Loading
Loading
Loading
+18 −29
Original line number Diff line number Diff line
@@ -224,24 +224,13 @@ EXPORT_SYMBOL(bio_uninit);
static void bio_free(struct bio *bio)
{
	struct bio_set *bs = bio->bi_pool;
	void *p;
	void *p = bio;

	bio_uninit(bio);
	WARN_ON_ONCE(!bs);

	if (bs) {
	bio_uninit(bio);
	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);

		/*
		 * If we have front padding, adjust the bio pointer before freeing
		 */
		p = bio;
		p -= bs->front_pad;

		mempool_free(p, &bs->bio_pool);
	} else {
		/* Bio was allocated by bio_kmalloc() */
		kfree(bio);
	}
	mempool_free(p - bs->front_pad, &bs->bio_pool);
}

/*
@@ -568,28 +557,28 @@ struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
EXPORT_SYMBOL(bio_alloc_bioset);

/**
 * bio_kmalloc - kmalloc a bio for I/O
 * bio_kmalloc - kmalloc a bio
 * @nr_vecs:	number of bio_vecs to allocate
 * @gfp_mask:   the GFP_* mask given to the slab allocator
 * @nr_iovecs:	number of iovecs to pre-allocate
 *
 * Use kmalloc to allocate and initialize a bio.
 * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
 * using bio_init() before use.  To free a bio returned from this function use
 * kfree() after calling bio_uninit().  A bio returned from this function can
 * be reused by calling bio_uninit() before calling bio_init() again.
 *
 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
 * function are not backed by a mempool can can fail.  Do not use this function
 * for allocations in the file system I/O path.
 *
 * Returns: Pointer to new bio on success, NULL on failure.
 */
struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs)
struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
{
	struct bio *bio;

	if (nr_iovecs > UIO_MAXIOV)
	if (nr_vecs > UIO_MAXIOV)
		return NULL;

	bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask);
	if (unlikely(!bio))
		return NULL;
	bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs,
		 0);
	bio->bi_pool = NULL;
	return bio;
	return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
}
EXPORT_SYMBOL(bio_kmalloc);

+8 −6
Original line number Diff line number Diff line
@@ -152,23 +152,25 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)

	src_bio->bi_status = enc_bio->bi_status;

	bio_put(enc_bio);
	bio_uninit(enc_bio);
	kfree(enc_bio);
	bio_endio(src_bio);
}

static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
{
	unsigned int nr_segs = bio_segments(bio_src);
	struct bvec_iter iter;
	struct bio_vec bv;
	struct bio *bio;

	bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src));
	bio = bio_kmalloc(nr_segs, GFP_NOIO);
	if (!bio)
		return NULL;
	bio->bi_bdev		= bio_src->bi_bdev;
	bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
		 bio_src->bi_opf);
	if (bio_flagged(bio_src, BIO_REMAPPED))
		bio_set_flag(bio, BIO_REMAPPED);
	bio->bi_opf		= bio_src->bi_opf;
	bio->bi_ioprio		= bio_src->bi_ioprio;
	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
	bio->bi_iter.bi_size	= bio_src->bi_iter.bi_size;
@@ -363,8 +365,8 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
	blk_crypto_put_keyslot(slot);
out_put_enc_bio:
	if (enc_bio)
		bio_put(enc_bio);

		bio_uninit(enc_bio);
	kfree(enc_bio);
	return ret;
}

+27 −15
Original line number Diff line number Diff line
@@ -152,10 +152,10 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
	nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));

	ret = -ENOMEM;
	bio = bio_kmalloc(gfp_mask, nr_pages);
	bio = bio_kmalloc(nr_pages, gfp_mask);
	if (!bio)
		goto out_bmd;
	bio->bi_opf |= req_op(rq);
	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));

	if (map_data) {
		nr_pages = 1 << map_data->page_order;
@@ -224,7 +224,8 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
cleanup:
	if (!map_data)
		bio_free_pages(bio);
	bio_put(bio);
	bio_uninit(bio);
	kfree(bio);
out_bmd:
	kfree(bmd);
	return ret;
@@ -234,6 +235,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
		gfp_t gfp_mask)
{
	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
	struct bio *bio;
	int ret;
	int j;
@@ -241,10 +243,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
	if (!iov_iter_count(iter))
		return -EINVAL;

	bio = bio_kmalloc(gfp_mask, iov_iter_npages(iter, BIO_MAX_VECS));
	bio = bio_kmalloc(nr_vecs, gfp_mask);
	if (!bio)
		return -ENOMEM;
	bio->bi_opf |= req_op(rq);
	bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));

	while (iov_iter_count(iter)) {
		struct page **pages;
@@ -303,7 +305,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,

 out_unmap:
	bio_release_pages(bio, false);
	bio_put(bio);
	bio_uninit(bio);
	kfree(bio);
	return ret;
}

@@ -323,7 +326,8 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
static void bio_map_kern_endio(struct bio *bio)
{
	bio_invalidate_vmalloc_pages(bio);
	bio_put(bio);
	bio_uninit(bio);
	kfree(bio);
}

/**
@@ -348,9 +352,10 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
	int offset, i;
	struct bio *bio;

	bio = bio_kmalloc(gfp_mask, nr_pages);
	bio = bio_kmalloc(nr_pages, gfp_mask);
	if (!bio)
		return ERR_PTR(-ENOMEM);
	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);

	if (is_vmalloc) {
		flush_kernel_vmap_range(data, len);
@@ -374,7 +379,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
		if (bio_add_pc_page(q, bio, page, bytes,
				    offset) < bytes) {
			/* we don't support partial mappings */
			bio_put(bio);
			bio_uninit(bio);
			kfree(bio);
			return ERR_PTR(-EINVAL);
		}

@@ -390,7 +396,8 @@ static struct bio *bio_map_kern(struct request_queue *q, void *data,
static void bio_copy_kern_endio(struct bio *bio)
{
	bio_free_pages(bio);
	bio_put(bio);
	bio_uninit(bio);
	kfree(bio);
}

static void bio_copy_kern_endio_read(struct bio *bio)
@@ -435,9 +442,10 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
		return ERR_PTR(-EINVAL);

	nr_pages = end - start;
	bio = bio_kmalloc(gfp_mask, nr_pages);
	bio = bio_kmalloc(nr_pages, gfp_mask);
	if (!bio)
		return ERR_PTR(-ENOMEM);
	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);

	while (len) {
		struct page *page;
@@ -471,7 +479,8 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,

cleanup:
	bio_free_pages(bio);
	bio_put(bio);
	bio_uninit(bio);
	kfree(bio);
	return ERR_PTR(-ENOMEM);
}

@@ -602,7 +611,8 @@ int blk_rq_unmap_user(struct bio *bio)

		next_bio = bio;
		bio = bio->bi_next;
		bio_put(next_bio);
		bio_uninit(next_bio);
		kfree(next_bio);
	}

	return ret;
@@ -648,8 +658,10 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
	bio->bi_opf |= req_op(rq);

	ret = blk_rq_append_bio(rq, bio);
	if (unlikely(ret))
		bio_put(bio);
	if (unlikely(ret)) {
		bio_uninit(bio);
		kfree(bio);
	}
	return ret;
}
EXPORT_SYMBOL(blk_rq_map_kern);
+13 −12
Original line number Diff line number Diff line
@@ -522,9 +522,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
		goto no_pkt;

	pkt->frames = frames;
	pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
	pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
	if (!pkt->w_bio)
		goto no_bio;
	bio_init(pkt->w_bio, NULL, pkt->w_bio->bi_inline_vecs, frames, 0);

	for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
		pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
@@ -536,10 +537,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
	bio_list_init(&pkt->orig_bios);

	for (i = 0; i < frames; i++) {
		struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
		struct bio *bio = bio_kmalloc(1, GFP_KERNEL);
		if (!bio)
			goto no_rd_bio;

		bio_init(bio, NULL, bio->bi_inline_vecs, 1, 0);
		pkt->r_bios[i] = bio;
	}

@@ -547,16 +548,16 @@ static struct packet_data *pkt_alloc_packet_data(int frames)

no_rd_bio:
	for (i = 0; i < frames; i++) {
		struct bio *bio = pkt->r_bios[i];
		if (bio)
			bio_put(bio);
		if (pkt->r_bios[i])
			bio_uninit(pkt->r_bios[i]);
		kfree(pkt->r_bios[i]);
	}

no_page:
	for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
		if (pkt->pages[i])
			__free_page(pkt->pages[i]);
	bio_put(pkt->w_bio);
	bio_uninit(pkt->w_bio);
	kfree(pkt->w_bio);
no_bio:
	kfree(pkt);
no_pkt:
@@ -571,13 +572,13 @@ static void pkt_free_packet_data(struct packet_data *pkt)
	int i;

	for (i = 0; i < pkt->frames; i++) {
		struct bio *bio = pkt->r_bios[i];
		if (bio)
			bio_put(bio);
		bio_uninit(pkt->r_bios[i]);
		kfree(pkt->r_bios[i]);
	}
	for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
		__free_page(pkt->pages[i]);
	bio_put(pkt->w_bio);
	bio_uninit(pkt->w_bio);
	kfree(pkt->w_bio);
	kfree(pkt);
}

+6 −4
Original line number Diff line number Diff line
@@ -107,15 +107,16 @@ void bch_btree_verify(struct btree *b)

void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
	unsigned int nr_segs = bio_segments(bio);
	struct bio *check;
	struct bio_vec bv, cbv;
	struct bvec_iter iter, citer = { 0 };

	check = bio_kmalloc(GFP_NOIO, bio_segments(bio));
	check = bio_kmalloc(nr_segs, GFP_NOIO);
	if (!check)
		return;
	bio_set_dev(check, bio->bi_bdev);
	check->bi_opf = REQ_OP_READ;
	bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
		 REQ_OP_READ);
	check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
	check->bi_iter.bi_size = bio->bi_iter.bi_size;

@@ -146,7 +147,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)

	bio_free_pages(check);
out_put:
	bio_put(check);
	bio_uninit(check);
	kfree(check);
}

#endif
Loading