Commit 113fa05c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba
Browse files

btrfs: remove the io_pages field in struct extent_buffer



No need to track the number of pages under I/O now that each
extent_buffer is read and written using a single bio.  For the
read side we need to grab an extra reference for the duration of
the I/O to prevent eviction, though.

Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 31d89399
Loading
Loading
Loading
Loading
+5 −12
Original line number Diff line number Diff line
@@ -1767,8 +1767,6 @@ static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
		struct page *page = bvec->bv_page;
		u32 len = bvec->bv_len;

		atomic_dec(&eb->io_pages);

		if (!uptodate) {
			btrfs_page_clear_uptodate(fs_info, page, start, len);
			btrfs_page_set_error(fs_info, page, start, len);
@@ -1791,7 +1789,6 @@ static void prepare_eb_write(struct extent_buffer *eb)
	unsigned long end;

	clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
	atomic_set(&eb->io_pages, num_extent_pages(eb));

	/* Set btree blocks beyond nritems with 0 to avoid stale content */
	nritems = btrfs_header_nritems(eb);
@@ -3235,8 +3232,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)

static int extent_buffer_under_io(const struct extent_buffer *eb)
{
	return (atomic_read(&eb->io_pages) ||
		test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
	return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
		test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
}

@@ -3372,7 +3368,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,

	spin_lock_init(&eb->refs_lock);
	atomic_set(&eb->refs, 1);
	atomic_set(&eb->io_pages, 0);

	ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);

@@ -3489,9 +3484,9 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
	 * adequately protected by the refcount, but the TREE_REF bit and
	 * its corresponding reference are not. To protect against this
	 * class of races, we call check_buffer_tree_ref from the codepaths
	 * which trigger io after they set eb->io_pages. Note that once io is
	 * initiated, TREE_REF can no longer be cleared, so that is the
	 * moment at which any such race is best fixed.
	 * which trigger io. Note that once io is initiated, TREE_REF can no
	 * longer be cleared, so that is the moment at which any such race is
	 * best fixed.
	 */
	refs = atomic_read(&eb->refs);
	if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -4062,7 +4057,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
	struct bio_vec *bvec;
	u32 bio_offset = 0;

	atomic_inc(&eb->refs);
	eb->read_mirror = bbio->mirror_num;

	if (uptodate &&
@@ -4077,7 +4071,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
	}

	bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
		atomic_dec(&eb->io_pages);
		end_page_read(bvec->bv_page, uptodate, eb->start + bio_offset,
			      bvec->bv_len);
		bio_offset += bvec->bv_len;
@@ -4100,8 +4093,8 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,

	clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
	eb->read_mirror = 0;
	atomic_set(&eb->io_pages, num_pages);
	check_buffer_tree_ref(eb);
	atomic_inc(&eb->refs);

	bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
			       REQ_OP_READ | REQ_META, eb->fs_info,
+0 −1
Original line number Diff line number Diff line
@@ -79,7 +79,6 @@ struct extent_buffer {
	struct btrfs_fs_info *fs_info;
	spinlock_t refs_lock;
	atomic_t refs;
	atomic_t io_pages;
	int read_mirror;
	struct rcu_head rcu_head;
	pid_t lock_owner;