Commit 8ffd74e9 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

iomap: Convert bio completions to use folios



Use bio_for_each_folio() to iterate over each folio in the bio
instead of iterating over each page.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent cd1e5afe
Loading
Loading
Loading
Loading
+21 −29
Original line number Diff line number Diff line
@@ -161,34 +161,29 @@ static void iomap_set_range_uptodate(struct page *page,
		SetPageUptodate(page);
}

static void
iomap_read_page_end_io(struct bio_vec *bvec, int error)
static void iomap_finish_folio_read(struct folio *folio, size_t offset,
		size_t len, int error)
{
	struct page *page = bvec->bv_page;
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);

	if (unlikely(error)) {
		ClearPageUptodate(page);
		SetPageError(page);
		folio_clear_uptodate(folio);
		folio_set_error(folio);
	} else {
		iomap_set_range_uptodate(page, iop, bvec->bv_offset,
						bvec->bv_len);
		iomap_set_range_uptodate(&folio->page, iop, offset, len);
	}

	if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
		unlock_page(page);
	if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
		folio_unlock(folio);
}

static void
iomap_read_end_io(struct bio *bio)
static void iomap_read_end_io(struct bio *bio)
{
	int error = blk_status_to_errno(bio->bi_status);
	struct bio_vec *bvec;
	struct bvec_iter_all iter_all;
	struct folio_iter fi;

	bio_for_each_segment_all(bvec, bio, iter_all)
		iomap_read_page_end_io(bvec, error);
	bio_for_each_folio_all(fi, bio)
		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
	bio_put(bio);
}

@@ -1019,23 +1014,21 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);

static void
iomap_finish_page_writeback(struct inode *inode, struct page *page,
		int error, unsigned int len)
static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
		size_t len, int error)
{
	struct folio *folio = page_folio(page);
	struct iomap_page *iop = to_iomap_page(folio);

	if (error) {
		SetPageError(page);
		folio_set_error(folio);
		mapping_set_error(inode->i_mapping, error);
	}

	WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
	WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
	WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);

	if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
		end_page_writeback(page);
		folio_end_writeback(folio);
}

/*
@@ -1054,8 +1047,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
	bool quiet = bio_flagged(bio, BIO_QUIET);

	for (bio = &ioend->io_inline_bio; bio; bio = next) {
		struct bio_vec *bv;
		struct bvec_iter_all iter_all;
		struct folio_iter fi;

		/*
		 * For the last bio, bi_private points to the ioend, so we
@@ -1066,10 +1058,10 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
		else
			next = bio->bi_private;

		/* walk each page on bio, ending page IO on them */
		bio_for_each_segment_all(bv, bio, iter_all)
			iomap_finish_page_writeback(inode, bv->bv_page, error,
					bv->bv_len);
		/* walk all folios in bio, ending page IO on them */
		bio_for_each_folio_all(fi, bio)
			iomap_finish_folio_write(inode, fi.folio, fi.length,
					error);
		bio_put(bio);
	}
	/* The ioend has been freed by bio_put() */