Commit 1420c4a5 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe
Browse files

fs/buffer: Combine two submit_bh() and ll_rw_block() arguments



Both submit_bh() and ll_rw_block() accept a request operation type and
request flags as their first two arguments. Micro-optimize these two
functions by combining these first two arguments into a single argument.
This patch does not change the behavior of any of the modified code.

Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Jan Kara <jack@suse.cz>
Acked-by: Song Liu <song@kernel.org> (for the md changes)
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-48-bvanassche@acm.org


Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3ae72869
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -302,7 +302,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
			submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
			submit_bh(REQ_OP_WRITE | REQ_SYNC, bh);
			bh = bh->b_this_page;
		}

@@ -394,7 +394,7 @@ static int read_page(struct file *file, unsigned long index,
			atomic_inc(&bitmap->pending_writes);
			set_buffer_locked(bh);
			set_buffer_mapped(bh);
			submit_bh(REQ_OP_READ, 0, bh);
			submit_bh(REQ_OP_READ, bh);
		}
		blk_cur++;
		bh = bh->b_this_page;
+27 −26
Original line number Diff line number Diff line
@@ -52,8 +52,8 @@
#include "internal.h"

static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
			 struct buffer_head *bh, struct writeback_control *wbc);
static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
			 struct writeback_control *wbc);

#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)

@@ -562,7 +562,7 @@ void write_boundary_block(struct block_device *bdev,
	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
	if (bh) {
		if (buffer_dirty(bh))
			ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
			ll_rw_block(REQ_OP_WRITE, 1, &bh);
		put_bh(bh);
	}
}
@@ -1174,7 +1174,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
	} else {
		get_bh(bh);
		bh->b_end_io = end_buffer_read_sync;
		submit_bh(REQ_OP_READ, 0, bh);
		submit_bh(REQ_OP_READ, bh);
		wait_on_buffer(bh);
		if (buffer_uptodate(bh))
			return bh;
@@ -1342,7 +1342,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
	struct buffer_head *bh = __getblk(bdev, block, size);
	if (likely(bh)) {
		ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
		ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
		brelse(bh);
	}
}
@@ -1353,7 +1353,7 @@ void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size,
{
	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
	if (likely(bh)) {
		ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
		ll_rw_block(REQ_OP_READ | REQ_RAHEAD, 1, &bh);
		brelse(bh);
	}
}
@@ -1804,7 +1804,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
	do {
		struct buffer_head *next = bh->b_this_page;
		if (buffer_async_write(bh)) {
			submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
			nr_underway++;
		}
		bh = next;
@@ -1858,7 +1858,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
		struct buffer_head *next = bh->b_this_page;
		if (buffer_async_write(bh)) {
			clear_buffer_dirty(bh);
			submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, wbc);
			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
			nr_underway++;
		}
		bh = next;
@@ -2033,7 +2033,7 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
		    !buffer_unwritten(bh) &&
		     (block_start < from || block_end > to)) {
			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
			ll_rw_block(REQ_OP_READ, 1, &bh);
			*wait_bh++=bh;
		}
	}
@@ -2334,7 +2334,7 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
		if (buffer_uptodate(bh))
			end_buffer_async_read(bh, 1);
		else
			submit_bh(REQ_OP_READ, 0, bh);
			submit_bh(REQ_OP_READ, bh);
	}
	return 0;
}
@@ -2665,7 +2665,7 @@ int nobh_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
		if (block_start < from || block_end > to) {
			lock_buffer(bh);
			bh->b_end_io = end_buffer_read_nobh;
			submit_bh(REQ_OP_READ, 0, bh);
			submit_bh(REQ_OP_READ, bh);
			nr_reads++;
		}
	}
@@ -2915,7 +2915,7 @@ int block_truncate_page(struct address_space *mapping,

	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
		err = -EIO;
		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
		ll_rw_block(REQ_OP_READ, 1, &bh);
		wait_on_buffer(bh);
		/* Uhhuh. Read error. Complain and punt. */
		if (!buffer_uptodate(bh))
@@ -2994,9 +2994,10 @@ static void end_bio_bh_io_sync(struct bio *bio)
	bio_put(bio);
}

static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
			 struct buffer_head *bh, struct writeback_control *wbc)
static int submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
			 struct writeback_control *wbc)
{
	const enum req_op op = opf & REQ_OP_MASK;
	struct bio *bio;

	BUG_ON(!buffer_locked(bh));
@@ -3012,11 +3013,11 @@ static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
		clear_buffer_write_io_error(bh);

	if (buffer_meta(bh))
		op_flags |= REQ_META;
		opf |= REQ_META;
	if (buffer_prio(bh))
		op_flags |= REQ_PRIO;
		opf |= REQ_PRIO;

	bio = bio_alloc(bh->b_bdev, 1, op | op_flags, GFP_NOIO);
	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);

	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);

@@ -3040,9 +3041,9 @@ static int submit_bh_wbc(enum req_op op, blk_opf_t op_flags,
	return 0;
}

int submit_bh(enum req_op op, blk_opf_t op_flags, struct buffer_head *bh)
int submit_bh(blk_opf_t opf, struct buffer_head *bh)
{
	return submit_bh_wbc(op, op_flags, bh, NULL);
	return submit_bh_wbc(opf, bh, NULL);
}
EXPORT_SYMBOL(submit_bh);

@@ -3072,9 +3073,9 @@ EXPORT_SYMBOL(submit_bh);
 * All of the buffers must be for the same device, and must also be a
 * multiple of the current approved size for the device.
 */
void ll_rw_block(enum req_op op, blk_opf_t op_flags, int nr,
		 struct buffer_head *bhs[])
void ll_rw_block(const blk_opf_t opf, int nr, struct buffer_head *bhs[])
{
	const enum req_op op = opf & REQ_OP_MASK;
	int i;

	for (i = 0; i < nr; i++) {
@@ -3086,14 +3087,14 @@ void ll_rw_block(enum req_op op, blk_opf_t op_flags, int nr,
			if (test_clear_buffer_dirty(bh)) {
				bh->b_end_io = end_buffer_write_sync;
				get_bh(bh);
				submit_bh(op, op_flags, bh);
				submit_bh(opf, bh);
				continue;
			}
		} else {
			if (!buffer_uptodate(bh)) {
				bh->b_end_io = end_buffer_read_sync;
				get_bh(bh);
				submit_bh(op, op_flags, bh);
				submit_bh(opf, bh);
				continue;
			}
		}
@@ -3111,7 +3112,7 @@ void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
	}
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
	submit_bh(REQ_OP_WRITE, op_flags, bh);
	submit_bh(REQ_OP_WRITE | op_flags, bh);
}
EXPORT_SYMBOL(write_dirty_buffer);

@@ -3138,7 +3139,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)

		get_bh(bh);
		bh->b_end_io = end_buffer_write_sync;
		ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
		ret = submit_bh(REQ_OP_WRITE | op_flags, bh);
		wait_on_buffer(bh);
		if (!ret && !buffer_uptodate(bh))
			ret = -EIO;
@@ -3366,7 +3367,7 @@ int bh_submit_read(struct buffer_head *bh)

	get_bh(bh);
	bh->b_end_io = end_buffer_read_sync;
	submit_bh(REQ_OP_READ, 0, bh);
	submit_bh(REQ_OP_READ, bh);
	wait_on_buffer(bh);
	if (buffer_uptodate(bh))
		return 0;
+1 −1
Original line number Diff line number Diff line
@@ -668,7 +668,7 @@ static void ext4_fc_submit_bh(struct super_block *sb, bool is_tail)
	set_buffer_dirty(bh);
	set_buffer_uptodate(bh);
	bh->b_end_io = ext4_end_buffer_io_sync;
	submit_bh(REQ_OP_WRITE, write_flags, bh);
	submit_bh(REQ_OP_WRITE | write_flags, bh);
	EXT4_SB(sb)->s_fc_bh = NULL;
}

+1 −1
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
	lock_buffer(bh);
	bh->b_end_io = end_buffer_write_sync;
	get_bh(bh);
	submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
	submit_bh(REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, bh);
	wait_on_buffer(bh);
	sb_end_write(sb);
	if (unlikely(!buffer_uptodate(bh)))
+3 −3
Original line number Diff line number Diff line
@@ -171,7 +171,7 @@ static inline void __ext4_read_bh(struct buffer_head *bh, int op_flags,

	bh->b_end_io = end_io ? end_io : end_buffer_read_sync;
	get_bh(bh);
	submit_bh(REQ_OP_READ, op_flags, bh);
	submit_bh(REQ_OP_READ | op_flags, bh);
}

void ext4_read_bh_nowait(struct buffer_head *bh, int op_flags,
@@ -5939,8 +5939,8 @@ static int ext4_commit_super(struct super_block *sb)
	/* Clear potential dirty bit if it was journalled update */
	clear_buffer_dirty(sbh);
	sbh->b_end_io = end_buffer_write_sync;
	submit_bh(REQ_OP_WRITE,
		  REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
	submit_bh(REQ_OP_WRITE | REQ_SYNC |
		  (test_opt(sb, BARRIER) ? REQ_FUA : 0), sbh);
	wait_on_buffer(sbh);
	if (buffer_write_io_error(sbh)) {
		ext4_msg(sb, KERN_ERR, "I/O error while writing "
Loading