Commit 0079c3b1 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

btrfs: Convert from set_page_dirty to dirty_folio



Optimise the non-DEBUG case to just call filemap_dirty_folio
directly.  The DEBUG case doesn't actually compile, but convert
it to dirty_folio anyway.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent 8fb72b4a
Loading
Loading
Loading
Loading
+13 −12
Original line number Diff line number Diff line
@@ -1014,26 +1014,25 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset,
	}
}

static int btree_set_page_dirty(struct page *page)
{
#ifdef DEBUG
	struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
static bool btree_dirty_folio(struct address_space *mapping,
		struct folio *folio)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
	struct btrfs_subpage *subpage;
	struct extent_buffer *eb;
	int cur_bit = 0;
	u64 page_start = page_offset(page);
	u64 page_start = folio_pos(folio);

	if (fs_info->sectorsize == PAGE_SIZE) {
		BUG_ON(!PagePrivate(page));
		eb = (struct extent_buffer *)page->private;
		eb = folio_get_private(folio);
		BUG_ON(!eb);
		BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
		BUG_ON(!atomic_read(&eb->refs));
		btrfs_assert_tree_write_locked(eb);
		return __set_page_dirty_nobuffers(page);
		return filemap_dirty_folio(mapping, folio);
	}
	ASSERT(PagePrivate(page) && page->private);
	subpage = (struct btrfs_subpage *)page->private;
	subpage = folio_get_private(folio);

	ASSERT(subpage->dirty_bitmap);
	while (cur_bit < BTRFS_SUBPAGE_BITMAP_SIZE) {
@@ -1059,9 +1058,11 @@ static int btree_set_page_dirty(struct page *page)

		cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits);
	}
#endif
	return __set_page_dirty_nobuffers(page);
	return filemap_dirty_folio(mapping, folio);
}
#else
#define btree_dirty_folio filemap_dirty_folio
#endif

static const struct address_space_operations btree_aops = {
	.writepages	= btree_writepages,
@@ -1070,7 +1071,7 @@ static const struct address_space_operations btree_aops = {
#ifdef CONFIG_MIGRATION
	.migratepage	= btree_migratepage,
#endif
	.set_page_dirty = btree_set_page_dirty,
	.dirty_folio = btree_dirty_folio,
};

struct extent_buffer *btrfs_find_create_tree_block(