Commit 5784f09b authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

hfs: Convert to release_folio



Use a folio throughout hfs_release_folio().

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
parent e45c20d1
Loading
Loading
Loading
Loading
+12 −11
Original line number Diff line number Diff line
@@ -69,14 +69,15 @@ static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
	return generic_block_bmap(mapping, block, hfs_get_block);
}

static int hfs_releasepage(struct page *page, gfp_t mask)
static bool hfs_release_folio(struct folio *folio, gfp_t mask)
{
	struct inode *inode = page->mapping->host;
	struct inode *inode = folio->mapping->host;
	struct super_block *sb = inode->i_sb;
	struct hfs_btree *tree;
	struct hfs_bnode *node;
	u32 nidx;
	int i, res = 1;
	int i;
	bool res = true;

	switch (inode->i_ino) {
	case HFS_EXT_CNID:
@@ -87,27 +88,27 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
		break;
	default:
		BUG();
		return 0;
		return false;
	}

	if (!tree)
		return 0;
		return false;

	if (tree->node_size >= PAGE_SIZE) {
		nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT);
		nidx = folio->index >> (tree->node_size_shift - PAGE_SHIFT);
		spin_lock(&tree->hash_lock);
		node = hfs_bnode_findhash(tree, nidx);
		if (!node)
			;
		else if (atomic_read(&node->refcnt))
			res = 0;
			res = false;
		if (res && node) {
			hfs_bnode_unhash(node);
			hfs_bnode_free(node);
		}
		spin_unlock(&tree->hash_lock);
	} else {
		nidx = page->index << (PAGE_SHIFT - tree->node_size_shift);
		nidx = folio->index << (PAGE_SHIFT - tree->node_size_shift);
		i = 1 << (PAGE_SHIFT - tree->node_size_shift);
		spin_lock(&tree->hash_lock);
		do {
@@ -115,7 +116,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
			if (!node)
				continue;
			if (atomic_read(&node->refcnt)) {
				res = 0;
				res = false;
				break;
			}
			hfs_bnode_unhash(node);
@@ -123,7 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
		} while (--i && nidx < tree->node_count);
		spin_unlock(&tree->hash_lock);
	}
	return res ? try_to_free_buffers(page) : 0;
	return res ? try_to_free_buffers(&folio->page) : false;
}

static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
@@ -165,7 +166,7 @@ const struct address_space_operations hfs_btree_aops = {
	.write_begin	= hfs_write_begin,
	.write_end	= generic_write_end,
	.bmap		= hfs_bmap,
	.releasepage	= hfs_releasepage,
	.release_folio	= hfs_release_folio,
};

const struct address_space_operations hfs_aops = {