Commit f913cff3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

btrfs: Convert to release_folio



I've only converted the outer layers of the btrfs release_folio paths
to use folios; the use of folios should be pushed further down into
btrfs from here.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent 508cae68
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -1005,12 +1005,12 @@ static int btree_writepages(struct address_space *mapping,
	return btree_write_cache_pages(mapping, wbc);
}

static int btree_releasepage(struct page *page, gfp_t gfp_flags)
static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
{
	if (PageWriteback(page) || PageDirty(page))
		return 0;
	if (folio_test_writeback(folio) || folio_test_dirty(folio))
		return false;

	return try_release_extent_buffer(page);
	return try_release_extent_buffer(&folio->page);
}

static void btree_invalidate_folio(struct folio *folio, size_t offset,
@@ -1019,7 +1019,7 @@ static void btree_invalidate_folio(struct folio *folio, size_t offset,
	struct extent_io_tree *tree;
	tree = &BTRFS_I(folio->mapping->host)->io_tree;
	extent_invalidate_folio(tree, folio, offset);
	btree_releasepage(&folio->page, GFP_NOFS);
	btree_release_folio(folio, GFP_NOFS);
	if (folio_get_private(folio)) {
		btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
			   "folio private not zero on folio %llu",
@@ -1080,7 +1080,7 @@ static bool btree_dirty_folio(struct address_space *mapping,

static const struct address_space_operations btree_aops = {
	.writepages	= btree_writepages,
	.releasepage	= btree_releasepage,
	.release_folio	= btree_release_folio,
	.invalidate_folio = btree_invalidate_folio,
#ifdef CONFIG_MIGRATION
	.migratepage	= btree_migratepage,
+7 −7
Original line number Diff line number Diff line
@@ -5271,7 +5271,7 @@ int extent_invalidate_folio(struct extent_io_tree *tree,
}

/*
 * a helper for releasepage, this tests for areas of the page that
 * a helper for release_folio, this tests for areas of the page that
 * are locked or under IO and drops the related state bits if it is safe
 * to drop the page.
 */
@@ -5307,7 +5307,7 @@ static int try_release_extent_state(struct extent_io_tree *tree,
}

/*
 * a helper for releasepage.  As long as there are no locked extents
 * a helper for release_folio.  As long as there are no locked extents
 * in the range corresponding to the page, both state records and extent
 * map records are removed
 */
@@ -6001,10 +6001,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
	 *
	 * It is only cleared in two cases: freeing the last non-tree
	 * reference to the extent_buffer when its STALE bit is set or
	 * calling releasepage when the tree reference is the only reference.
	 * calling release_folio when the tree reference is the only reference.
	 *
	 * In both cases, care is taken to ensure that the extent_buffer's
	 * pages are not under io. However, releasepage can be concurrently
	 * pages are not under io. However, release_folio can be concurrently
	 * called with creating new references, which is prone to race
	 * conditions between the calls to check_buffer_tree_ref in those
	 * codepaths and clearing TREE_REF in try_release_extent_buffer.
@@ -6257,7 +6257,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
		/*
		 * We can't unlock the pages just yet since the extent buffer
		 * hasn't been properly inserted in the radix tree, this
		 * opens a race with btree_releasepage which can free a page
		 * opens a race with btree_release_folio which can free a page
		 * while we are still filling in all pages for the buffer and
		 * we could crash.
		 */
@@ -6289,7 +6289,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,

	/*
	 * Now it's safe to unlock the pages because any calls to
	 * btree_releasepage will correctly detect that a page belongs to a
	 * btree_release_folio will correctly detect that a page belongs to a
	 * live buffer and won't free them prematurely.
	 */
	for (i = 0; i < num_pages; i++)
@@ -6659,7 +6659,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
	eb->read_mirror = 0;
	atomic_set(&eb->io_pages, num_reads);
	/*
	 * It is possible for releasepage to clear the TREE_REF bit before we
	 * It is possible for release_folio to clear the TREE_REF bit before we
	 * set io_pages. See check_buffer_tree_ref for a more detailed comment.
	 */
	check_buffer_tree_ref(eb);
+1 −1
Original line number Diff line number Diff line
@@ -1323,7 +1323,7 @@ static int prepare_uptodate_page(struct inode *inode,

		/*
		 * Since btrfs_read_folio() will unlock the folio before it
		 * returns, there is a window where btrfs_releasepage() can be
		 * returns, there is a window where btrfs_release_folio() can be
		 * called to release the page.  Here we check both inode
		 * mapping and PagePrivate() to make sure the page was not
		 * released.
+12 −12
Original line number Diff line number Diff line
@@ -8172,7 +8172,7 @@ static void btrfs_readahead(struct readahead_control *rac)
}

/*
 * For releasepage() and invalidate_folio() we have a race window where
 * For release_folio() and invalidate_folio() we have a race window where
 * folio_end_writeback() is called but the subpage spinlock is not yet released.
 * If we continue to release/invalidate the page, we could cause use-after-free
 * for subpage spinlock.  So this function is to spin and wait for subpage
@@ -8204,22 +8204,22 @@ static void wait_subpage_spinlock(struct page *page)
	spin_unlock_irq(&subpage->lock);
}

static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
	int ret = try_release_extent_mapping(page, gfp_flags);
	int ret = try_release_extent_mapping(&folio->page, gfp_flags);

	if (ret == 1) {
		wait_subpage_spinlock(page);
		clear_page_extent_mapped(page);
		wait_subpage_spinlock(&folio->page);
		clear_page_extent_mapped(&folio->page);
	}
	return ret;
}

static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
static bool btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
	if (PageWriteback(page) || PageDirty(page))
		return 0;
	return __btrfs_releasepage(page, gfp_flags);
	if (folio_test_writeback(folio) || folio_test_dirty(folio))
		return false;
	return __btrfs_release_folio(folio, gfp_flags);
}

#ifdef CONFIG_MIGRATION
@@ -8290,7 +8290,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
	 * still safe to wait for ordered extent to finish.
	 */
	if (!(offset == 0 && length == folio_size(folio))) {
		btrfs_releasepage(&folio->page, GFP_NOFS);
		btrfs_release_folio(folio, GFP_NOFS);
		return;
	}

@@ -8414,7 +8414,7 @@ static void btrfs_invalidate_folio(struct folio *folio, size_t offset,
	ASSERT(!folio_test_ordered(folio));
	btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio));
	if (!inode_evicting)
		__btrfs_releasepage(&folio->page, GFP_NOFS);
		__btrfs_release_folio(folio, GFP_NOFS);
	clear_page_extent_mapped(&folio->page);
}

@@ -11364,7 +11364,7 @@ static const struct address_space_operations btrfs_aops = {
	.readahead	= btrfs_readahead,
	.direct_IO	= noop_direct_IO,
	.invalidate_folio = btrfs_invalidate_folio,
	.releasepage	= btrfs_releasepage,
	.release_folio	= btrfs_release_folio,
#ifdef CONFIG_MIGRATION
	.migratepage	= btrfs_migratepage,
#endif