Commit 68189fef authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

fs: Change try_to_free_buffers() to take a folio



All but two of the callers already have a folio; pass a folio into
try_to_free_buffers().  This removes the last user of cancel_dirty_page()
so remove that wrapper function too.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent 73122255
Loading
Loading
Loading
Loading
+21 −21
Original line number Diff line number Diff line
@@ -955,7 +955,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
						size);
			goto done;
		}
		if (!try_to_free_buffers(page))
		if (!try_to_free_buffers(page_folio(page)))
			goto failed;
	}

@@ -3155,20 +3155,20 @@ int sync_dirty_buffer(struct buffer_head *bh)
EXPORT_SYMBOL(sync_dirty_buffer);

/*
 * try_to_free_buffers() checks if all the buffers on this particular page
 * try_to_free_buffers() checks if all the buffers on this particular folio
 * are unused, and releases them if so.
 *
 * Exclusion against try_to_free_buffers may be obtained by either
 * locking the page or by holding its mapping's private_lock.
 * locking the folio or by holding its mapping's private_lock.
 *
 * If the page is dirty but all the buffers are clean then we need to
 * be sure to mark the page clean as well.  This is because the page
 * If the folio is dirty but all the buffers are clean then we need to
 * be sure to mark the folio clean as well.  This is because the folio
 * may be against a block device, and a later reattachment of buffers
 * to a dirty page will set *all* buffers dirty.  Which would corrupt
 * to a dirty folio will set *all* buffers dirty.  Which would corrupt
 * filesystem data on the same device.
 *
 * The same applies to regular filesystem pages: if all the buffers are
 * clean then we set the page clean and proceed.  To do that, we require
 * The same applies to regular filesystem folios: if all the buffers are
 * clean then we set the folio clean and proceed.  To do that, we require
 * total exclusion from block_dirty_folio().  That is obtained with
 * private_lock.
 *
@@ -3207,40 +3207,40 @@ drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
	return 0;
}

int try_to_free_buffers(struct page *page)
bool try_to_free_buffers(struct folio *folio)
{
	struct address_space * const mapping = page->mapping;
	struct address_space * const mapping = folio->mapping;
	struct buffer_head *buffers_to_free = NULL;
	int ret = 0;
	bool ret = 0;

	BUG_ON(!PageLocked(page));
	if (PageWriteback(page))
		return 0;
	BUG_ON(!folio_test_locked(folio));
	if (folio_test_writeback(folio))
		return false;

	if (mapping == NULL) {		/* can this still happen? */
		ret = drop_buffers(page, &buffers_to_free);
		ret = drop_buffers(&folio->page, &buffers_to_free);
		goto out;
	}

	spin_lock(&mapping->private_lock);
	ret = drop_buffers(page, &buffers_to_free);
	ret = drop_buffers(&folio->page, &buffers_to_free);

	/*
	 * If the filesystem writes its buffers by hand (eg ext3)
	 * then we can have clean buffers against a dirty page.  We
	 * clean the page here; otherwise the VM will never notice
	 * then we can have clean buffers against a dirty folio.  We
	 * clean the folio here; otherwise the VM will never notice
	 * that the filesystem did any IO at all.
	 *
	 * Also, during truncate, discard_buffer will have marked all
	 * the page's buffers clean.  We discover that here and clean
	 * the page also.
	 * the folio's buffers clean.  We discover that here and clean
	 * the folio also.
	 *
	 * private_lock must be held over this entire operation in order
	 * to synchronise against block_dirty_folio and prevent the
	 * dirty bit from being lost.
	 */
	if (ret)
		cancel_dirty_page(page);
		folio_cancel_dirty(folio);
	spin_unlock(&mapping->private_lock);
out:
	if (buffers_to_free) {
+1 −1
Original line number Diff line number Diff line
@@ -3255,7 +3255,7 @@ static bool ext4_release_folio(struct folio *folio, gfp_t wait)
	if (journal)
		return jbd2_journal_try_to_free_buffers(journal, folio);
	else
		return try_to_free_buffers(&folio->page);
		return try_to_free_buffers(folio);
}

static bool ext4_inode_datasync_dirty(struct inode *inode)
+1 −1
Original line number Diff line number Diff line
@@ -757,7 +757,7 @@ bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
	} while (bh != head);
	gfs2_log_unlock(sdp);

	return try_to_free_buffers(&folio->page);
	return try_to_free_buffers(folio);

cannot_release:
	gfs2_log_unlock(sdp);
+1 −1
Original line number Diff line number Diff line
@@ -124,7 +124,7 @@ static bool hfs_release_folio(struct folio *folio, gfp_t mask)
		} while (--i && nidx < tree->node_count);
		spin_unlock(&tree->hash_lock);
	}
	return res ? try_to_free_buffers(&folio->page) : false;
	return res ? try_to_free_buffers(folio) : false;
}

static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+1 −1
Original line number Diff line number Diff line
@@ -121,7 +121,7 @@ static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
		} while (--i && nidx < tree->node_count);
		spin_unlock(&tree->hash_lock);
	}
	return res ? try_to_free_buffers(&folio->page) : false;
	return res ? try_to_free_buffers(folio) : false;
}

static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
Loading