Commit 78f42660 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

truncate: Add invalidate_complete_folio2()



Convert invalidate_complete_page2() to invalidate_complete_folio2().
Use filemap_free_folio() to free the page instead of calling ->freepage
manually.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent fae9bc4a
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -229,8 +229,7 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
	page_cache_delete(mapping, folio, shadow);
}

static void filemap_free_folio(struct address_space *mapping,
				struct folio *folio)
void filemap_free_folio(struct address_space *mapping, struct folio *folio)
{
	void (*freepage)(struct page *);

+1 −0
Original line number Diff line number Diff line
@@ -92,6 +92,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping,

unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
		pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
void filemap_free_folio(struct address_space *mapping, struct folio *folio);
int truncate_inode_folio(struct address_space *mapping, struct folio *folio);

/**
+10 −13
Original line number Diff line number Diff line
@@ -571,31 +571,29 @@ void invalidate_mapping_pagevec(struct address_space *mapping,
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
static int invalidate_complete_folio2(struct address_space *mapping,
					struct folio *folio)
{
	if (page->mapping != mapping)
	if (folio->mapping != mapping)
		return 0;

	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
	if (folio_has_private(folio) &&
	    !filemap_release_folio(folio, GFP_KERNEL))
		return 0;

	spin_lock(&mapping->host->i_lock);
	xa_lock_irq(&mapping->i_pages);
	if (PageDirty(page))
	if (folio_test_dirty(folio))
		goto failed;

	BUG_ON(page_has_private(page));
	__delete_from_page_cache(page, NULL);
	BUG_ON(folio_has_private(folio));
	__filemap_remove_folio(folio, NULL);
	xa_unlock_irq(&mapping->i_pages);
	if (mapping_shrinkable(mapping))
		inode_add_lru(mapping->host);
	spin_unlock(&mapping->host->i_lock);

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

	put_page(page);	/* pagecache ref */
	filemap_free_folio(mapping, folio);
	return 1;
failed:
	xa_unlock_irq(&mapping->i_pages);
@@ -679,8 +677,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,

			ret2 = do_launder_page(mapping, &folio->page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping,
								&folio->page))
				if (!invalidate_complete_folio2(mapping, folio))
					ret2 = -EBUSY;
			}
			if (ret2 < 0)