Commit 8597447d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

iomap: Convert to release_folio



Change all the filesystems which used iomap_releasepage to use the
new function.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent fa29000b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -768,7 +768,7 @@ static const struct address_space_operations gfs2_aops = {
	.read_folio = gfs2_read_folio,
	.readahead = gfs2_readahead,
	.dirty_folio = filemap_dirty_folio,
	.releasepage = iomap_releasepage,
	.release_folio = iomap_release_folio,
	.invalidate_folio = iomap_invalidate_folio,
	.bmap = gfs2_bmap,
	.direct_IO = noop_direct_IO,
+10 −12
Original line number Diff line number Diff line
@@ -452,25 +452,23 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
}
EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);

int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
{
	struct folio *folio = page_folio(page);

	trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
	trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
			folio_size(folio));

	/*
	 * mm accommodates an old ext3 case where clean pages might not have had
	 * the dirty bit cleared. Thus, it can send actual dirty pages to
	 * ->releasepage() via shrink_active_list(); skip those here.
	 * mm accommodates an old ext3 case where clean folios might
	 * not have had the dirty bit cleared.  Thus, it can send actual
	 * dirty folios to ->release_folio() via shrink_active_list();
	 * skip those here.
	 */
	if (folio_test_dirty(folio) || folio_test_writeback(folio))
		return 0;
		return false;
	iomap_page_release(folio);
	return 1;
	return true;
}
EXPORT_SYMBOL_GPL(iomap_releasepage);
EXPORT_SYMBOL_GPL(iomap_release_folio);

void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
@@ -1483,7 +1481,7 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
		 * Skip the page if it's fully outside i_size, e.g. due to a
		 * truncate operation that's in progress. We must redirty the
		 * page so that reclaim stops reclaiming it. Otherwise
		 * iomap_vm_releasepage() is called on it and gets confused.
		 * iomap_release_folio() is called on it and gets confused.
		 *
		 * Note that the end_index is unsigned long.  If the given
		 * offset is greater than 16TB on a 32-bit system then if we
+1 −1
Original line number Diff line number Diff line
@@ -80,7 +80,7 @@ DEFINE_EVENT(iomap_range_class, name, \
	TP_PROTO(struct inode *inode, loff_t off, u64 len),\
	TP_ARGS(inode, off, len))
DEFINE_RANGE_EVENT(iomap_writepage);
DEFINE_RANGE_EVENT(iomap_releasepage);
DEFINE_RANGE_EVENT(iomap_release_folio);
DEFINE_RANGE_EVENT(iomap_invalidate_folio);
DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail);

+1 −1
Original line number Diff line number Diff line
@@ -568,7 +568,7 @@ const struct address_space_operations xfs_address_space_operations = {
	.readahead		= xfs_vm_readahead,
	.writepages		= xfs_vm_writepages,
	.dirty_folio		= filemap_dirty_folio,
	.releasepage		= iomap_releasepage,
	.release_folio		= iomap_release_folio,
	.invalidate_folio	= iomap_invalidate_folio,
	.bmap			= xfs_vm_bmap,
	.direct_IO		= noop_direct_IO,
+1 −1
Original line number Diff line number Diff line
@@ -197,7 +197,7 @@ static const struct address_space_operations zonefs_file_aops = {
	.writepage		= zonefs_writepage,
	.writepages		= zonefs_writepages,
	.dirty_folio		= filemap_dirty_folio,
	.releasepage		= iomap_releasepage,
	.release_folio		= iomap_release_folio,
	.invalidate_folio	= iomap_invalidate_folio,
	.migratepage		= iomap_migrate_page,
	.is_partially_uptodate	= iomap_is_partially_uptodate,
Loading