Commit 07888c66 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe
Browse files

block: pass a block_device and opf to bio_alloc



Pass the block_device and operation that we plan to use this bio for to
bio_alloc to optimize the assignment.  NULL/0 can be passed, both for the
passthrough case on a raw request_queue and to temporarily avoid
refactoring some nasty code.

Also move the gfp_mask argument after the nr_vecs argument for a much
more logical calling convention matching what most of the kernel does.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarChaitanya Kulkarni <kch@nvidia.com>
Link: https://lore.kernel.org/r/20220124091107.642561-18-hch@lst.de


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b77c88c2
Loading
Loading
Loading
Loading
+1 −4
Original line number Diff line number Diff line
@@ -347,10 +347,7 @@ EXPORT_SYMBOL(bio_chain);
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
		unsigned int nr_pages, unsigned int opf, gfp_t gfp)
{
	struct bio *new = bio_alloc(gfp, nr_pages);

	bio_set_dev(new, bdev);
	new->bi_opf = opf;
	struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);

	if (bio) {
		bio_chain(bio, new);
+1 −3
Original line number Diff line number Diff line
@@ -256,9 +256,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
		}
		atomic_inc(&dio->ref);
		submit_bio(bio);
		bio = bio_alloc(GFP_KERNEL, nr_pages);
		bio_set_dev(bio, bdev);
		bio->bi_opf = opf;
		bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL);
	}

	blk_finish_plug(&plug);
+4 −6
Original line number Diff line number Diff line
@@ -1279,7 +1279,8 @@ static void one_flush_endio(struct bio *bio)

static void submit_one_flush(struct drbd_device *device, struct issue_flush_context *ctx)
{
	struct bio *bio = bio_alloc(GFP_NOIO, 0);
	struct bio *bio = bio_alloc(device->ldev->backing_bdev, 0,
				    REQ_OP_FLUSH | REQ_PREFLUSH, GFP_NOIO);
	struct one_flush_context *octx = kmalloc(sizeof(*octx), GFP_NOIO);

	if (!octx) {
@@ -1297,10 +1298,8 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont

	octx->device = device;
	octx->ctx = ctx;
	bio_set_dev(bio, device->ldev->backing_bdev);
	bio->bi_private = octx;
	bio->bi_end_io = one_flush_endio;
	bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;

	device->flush_jif = jiffies;
	set_bit(FLUSH_PENDING, &device->flags);
@@ -1685,11 +1684,10 @@ int drbd_submit_peer_request(struct drbd_device *device,
	 * generated bio, but a bio allocated on behalf of the peer.
	 */
next_bio:
	bio = bio_alloc(GFP_NOIO, nr_pages);
	bio = bio_alloc(device->ldev->backing_bdev, nr_pages, op | op_flags,
			GFP_NOIO);
	/* > peer_req->i.sector, unless this is the first bio */
	bio->bi_iter.bi_sector = sector;
	bio_set_dev(bio, device->ldev->backing_bdev);
	bio_set_op_attrs(bio, op, op_flags);
	bio->bi_private = peer_req;
	bio->bi_end_io = drbd_peer_request_endio;

+2 −3
Original line number Diff line number Diff line
@@ -149,7 +149,8 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,
	priv->sess_dev = sess_dev;
	priv->id = id;

	bio = bio_alloc(GFP_KERNEL, 1);
	bio = bio_alloc(sess_dev->rnbd_dev->bdev, 1,
			rnbd_to_bio_flags(le32_to_cpu(msg->rw)), GFP_KERNEL);
	if (bio_add_page(bio, virt_to_page(data), datalen,
			offset_in_page(data)) != datalen) {
		rnbd_srv_err(sess_dev, "Failed to map data to bio\n");
@@ -159,13 +160,11 @@ static int process_rdma(struct rnbd_srv_session *srv_sess,

	bio->bi_end_io = rnbd_dev_bi_end_io;
	bio->bi_private = priv;
	bio->bi_opf = rnbd_to_bio_flags(le32_to_cpu(msg->rw));
	bio->bi_iter.bi_sector = le64_to_cpu(msg->sector);
	bio->bi_iter.bi_size = le32_to_cpu(msg->bi_size);
	prio = srv_sess->ver < RNBD_PROTO_VER_MAJOR ||
	       usrlen < sizeof(*msg) ? 0 : le16_to_cpu(msg->prio);
	bio_set_prio(bio, prio);
	bio_set_dev(bio, sess_dev->rnbd_dev->bdev);

	submit_bio(bio);

+5 −6
Original line number Diff line number Diff line
@@ -1326,13 +1326,13 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
				     pages[i]->page,
				     seg[i].nsec << 9,
				     seg[i].offset) == 0)) {
			bio = bio_alloc(GFP_KERNEL, bio_max_segs(nseg - i));
			bio = bio_alloc(preq.bdev, bio_max_segs(nseg - i),
					operation | operation_flags,
					GFP_KERNEL);
			biolist[nbio++] = bio;
			bio_set_dev(bio, preq.bdev);
			bio->bi_private = pending_req;
			bio->bi_end_io  = end_block_io_op;
			bio->bi_iter.bi_sector  = preq.sector_number;
			bio_set_op_attrs(bio, operation, operation_flags);
		}

		preq.sector_number += seg[i].nsec;
@@ -1342,12 +1342,11 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
	if (!bio) {
		BUG_ON(operation_flags != REQ_PREFLUSH);

		bio = bio_alloc(GFP_KERNEL, 0);
		bio = bio_alloc(preq.bdev, 0, operation | operation_flags,
				GFP_KERNEL);
		biolist[nbio++] = bio;
		bio_set_dev(bio, preq.bdev);
		bio->bi_private = pending_req;
		bio->bi_end_io  = end_block_io_op;
		bio_set_op_attrs(bio, operation, operation_flags);
	}

	atomic_set(&pending_req->pendcnt, nbio);
Loading