Commit 95c4cd05 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

iomap: Convert to_iomap_page to take a folio



The big comment about only using a head page can go away now that
it takes a folio argument.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent d1bd0b4e
Loading
Loading
Loading
Loading
+15 −17
Original line number Diff line number Diff line
@@ -22,8 +22,8 @@
#include "../internal.h"

/*
 * Structure allocated for each page or THP when block size < page size
 * to track sub-page uptodate status and I/O completions.
 * Structure allocated for each folio when block size < folio size
 * to track sub-folio uptodate status and I/O completions.
 */
struct iomap_page {
	atomic_t		read_bytes_pending;
@@ -32,17 +32,10 @@ struct iomap_page {
	unsigned long		uptodate[];
};

static inline struct iomap_page *to_iomap_page(struct page *page)
static inline struct iomap_page *to_iomap_page(struct folio *folio)
{
	/*
	 * per-block data is stored in the head page.  Callers should
	 * not be dealing with tail pages, and if they are, they can
	 * call thp_head() first.
	 */
	VM_BUG_ON_PGFLAGS(PageTail(page), page);

	if (page_has_private(page))
		return (struct iomap_page *)page_private(page);
	if (folio_test_private(folio))
		return folio_get_private(folio);
	return NULL;
}

@@ -51,7 +44,8 @@ static struct bio_set iomap_ioend_bioset;
static struct iomap_page *
iomap_page_create(struct inode *inode, struct page *page)
{
	struct iomap_page *iop = to_iomap_page(page);
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);
	unsigned int nr_blocks = i_blocks_per_page(inode, page);

	if (iop || nr_blocks <= 1)
@@ -144,7 +138,8 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
static void
iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
{
	struct iomap_page *iop = to_iomap_page(page);
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);
	struct inode *inode = page->mapping->host;
	unsigned first = off >> inode->i_blkbits;
	unsigned last = (off + len - 1) >> inode->i_blkbits;
@@ -173,7 +168,8 @@ static void
iomap_read_page_end_io(struct bio_vec *bvec, int error)
{
	struct page *page = bvec->bv_page;
	struct iomap_page *iop = to_iomap_page(page);
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);

	if (unlikely(error)) {
		ClearPageUptodate(page);
@@ -438,7 +434,8 @@ int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
		unsigned long count)
{
	struct iomap_page *iop = to_iomap_page(page);
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);
	struct inode *inode = page->mapping->host;
	unsigned len, first, last;
	unsigned i;
@@ -1012,7 +1009,8 @@ static void
iomap_finish_page_writeback(struct inode *inode, struct page *page,
		int error, unsigned int len)
{
	struct iomap_page *iop = to_iomap_page(page);
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);

	if (error) {
		SetPageError(page);