Commit e621900a authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

fs: Convert __set_page_dirty_buffers to block_dirty_folio



Convert all callers; mostly this is just changing the aops to point
at it, but a few implementations need a little more work.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent af7afdc7
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -429,7 +429,7 @@ static int blkdev_writepages(struct address_space *mapping,
}

const struct address_space_operations def_blk_aops = {
	.set_page_dirty	= __set_page_dirty_buffers,
	.dirty_folio	= block_dirty_folio,
	.invalidate_folio = block_invalidate_folio,
	.readpage	= blkdev_readpage,
	.readahead	= blkdev_readahead,
+1 −1
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ static sector_t _adfs_bmap(struct address_space *mapping, sector_t block)
}

static const struct address_space_operations adfs_aops = {
	.set_page_dirty	= __set_page_dirty_buffers,
	.dirty_folio	= block_dirty_folio,
	.invalidate_folio = block_invalidate_folio,
	.readpage	= adfs_readpage,
	.writepage	= adfs_writepage,
+2 −2
Original line number Diff line number Diff line
@@ -453,7 +453,7 @@ static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
}

const struct address_space_operations affs_aops = {
	.set_page_dirty	= __set_page_dirty_buffers,
	.dirty_folio	= block_dirty_folio,
	.invalidate_folio = block_invalidate_folio,
	.readpage = affs_readpage,
	.writepage = affs_writepage,
@@ -835,7 +835,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
}

const struct address_space_operations affs_aops_ofs = {
	.set_page_dirty	= __set_page_dirty_buffers,
	.dirty_folio	= block_dirty_folio,
	.invalidate_folio = block_invalidate_folio,
	.readpage = affs_readpage_ofs,
	//.writepage = affs_writepage_ofs,
+1 −1
Original line number Diff line number Diff line
@@ -188,7 +188,7 @@ static sector_t bfs_bmap(struct address_space *mapping, sector_t block)
}

const struct address_space_operations bfs_aops = {
	.set_page_dirty	= __set_page_dirty_buffers,
	.dirty_folio	= block_dirty_folio,
	.invalidate_folio = block_invalidate_folio,
	.readpage	= bfs_readpage,
	.writepage	= bfs_writepage,
+15 −18
Original line number Diff line number Diff line
@@ -613,17 +613,14 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
 * address_space though.
 */
int __set_page_dirty_buffers(struct page *page)
bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
{
	int newly_dirty;
	struct address_space *mapping = page_mapping(page);

	if (unlikely(!mapping))
		return !TestSetPageDirty(page);
	struct buffer_head *head;
	bool newly_dirty;

	spin_lock(&mapping->private_lock);
	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
	head = folio_buffers(folio);
	if (head) {
		struct buffer_head *bh = head;

		do {
@@ -635,21 +632,21 @@ int __set_page_dirty_buffers(struct page *page)
	 * Lock out page's memcg migration to keep PageDirty
	 * synchronized with per-memcg dirty page counters.
	 */
	lock_page_memcg(page);
	newly_dirty = !TestSetPageDirty(page);
	folio_memcg_lock(folio);
	newly_dirty = !folio_test_set_dirty(folio);
	spin_unlock(&mapping->private_lock);

	if (newly_dirty)
		__set_page_dirty(page, mapping, 1);
		__folio_mark_dirty(folio, mapping, 1);

	unlock_page_memcg(page);
	folio_memcg_unlock(folio);

	if (newly_dirty)
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);

	return newly_dirty;
}
EXPORT_SYMBOL(__set_page_dirty_buffers);
EXPORT_SYMBOL(block_dirty_folio);

/*
 * Write out and wait upon a list of buffers.
@@ -1548,7 +1545,7 @@ EXPORT_SYMBOL(block_invalidate_folio);

/*
 * We attach and possibly dirty the buffers atomically wrt
 * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
 * block_dirty_folio() via private_lock.  try_to_free_buffers
 * is already excluded via the page lock.
 */
void create_empty_buffers(struct page *page,
@@ -1723,12 +1720,12 @@ int __block_write_full_page(struct inode *inode, struct page *page,
					(1 << BH_Dirty)|(1 << BH_Uptodate));

	/*
	 * Be very careful.  We have no exclusion from __set_page_dirty_buffers
	 * Be very careful.  We have no exclusion from block_dirty_folio
	 * here, and the (potentially unmapped) buffers may become dirty at
	 * any time.  If a buffer becomes dirty here after we've inspected it
	 * then we just miss that fact, and the page stays dirty.
	 *
	 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
	 * Buffers outside i_size may be dirtied by block_dirty_folio;
	 * handle that here by just cleaning them.
	 */

@@ -3182,7 +3179,7 @@ EXPORT_SYMBOL(sync_dirty_buffer);
 *
 * The same applies to regular filesystem pages: if all the buffers are
 * clean then we set the page clean and proceed.  To do that, we require
 * total exclusion from __set_page_dirty_buffers().  That is obtained with
 * total exclusion from block_dirty_folio().  That is obtained with
 * private_lock.
 *
 * try_to_free_buffers() is non-blocking.
@@ -3249,7 +3246,7 @@ int try_to_free_buffers(struct page *page)
	 * the page also.
	 *
	 * private_lock must be held over this entire operation in order
	 * to synchronise against __set_page_dirty_buffers and prevent the
	 * to synchronise against block_dirty_folio and prevent the
	 * dirty bit from being lost.
	 */
	if (ret)
Loading