Commit eabf038f authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

orangefs: Convert launder_page to launder_folio



OrangeFS launders its pages from a number of locations, so add a
small amount of folio usage to its callers where it makes sense.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Tested-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@opensource.wdc.com>
Tested-by: Mike Marshall <hubcap@omnibond.com> # orangefs
Tested-by: David Howells <dhowells@redhat.com> # afs
parent 15a30ab2
Loading
Loading
Loading
Loading
+36 −33
Original line number Diff line number Diff line
@@ -243,7 +243,7 @@ static int orangefs_writepages(struct address_space *mapping,
	return ret;
}

static int orangefs_launder_page(struct page *);
static int orangefs_launder_folio(struct folio *);

static void orangefs_readahead(struct readahead_control *rac)
{
@@ -290,14 +290,15 @@ static void orangefs_readahead(struct readahead_control *rac)

static int orangefs_readpage(struct file *file, struct page *page)
{
	struct folio *folio = page_folio(page);
	struct inode *inode = page->mapping->host;
	struct iov_iter iter;
	struct bio_vec bv;
	ssize_t ret;
	loff_t off; /* offset into this page */

	if (PageDirty(page))
		orangefs_launder_page(page);
	if (folio_test_dirty(folio))
		orangefs_launder_folio(folio);

	off = page_offset(page);
	bv.bv_page = page;
@@ -330,6 +331,7 @@ static int orangefs_write_begin(struct file *file,
    void **fsdata)
{
	struct orangefs_write_range *wr;
	struct folio *folio;
	struct page *page;
	pgoff_t index;
	int ret;
@@ -341,27 +343,28 @@ static int orangefs_write_begin(struct file *file,
		return -ENOMEM;

	*pagep = page;
	folio = page_folio(page);

	if (PageDirty(page) && !PagePrivate(page)) {
	if (folio_test_dirty(folio) && !folio_test_private(folio)) {
		/*
		 * Should be impossible.  If it happens, launder the page
		 * since we don't know what's dirty.  This will WARN in
		 * orangefs_writepage_locked.
		 */
		ret = orangefs_launder_page(page);
		ret = orangefs_launder_folio(folio);
		if (ret)
			return ret;
	}
	if (PagePrivate(page)) {
	if (folio_test_private(folio)) {
		struct orangefs_write_range *wr;
		wr = (struct orangefs_write_range *)page_private(page);
		wr = folio_get_private(folio);
		if (wr->pos + wr->len == pos &&
		    uid_eq(wr->uid, current_fsuid()) &&
		    gid_eq(wr->gid, current_fsgid())) {
			wr->len += len;
			goto okay;
		} else {
			ret = orangefs_launder_page(page);
			ret = orangefs_launder_folio(folio);
			if (ret)
				return ret;
		}
@@ -375,7 +378,7 @@ static int orangefs_write_begin(struct file *file,
	wr->len = len;
	wr->uid = current_fsuid();
	wr->gid = current_fsgid();
	attach_page_private(page, wr);
	folio_attach_private(folio, wr);
okay:
	return 0;
}
@@ -481,7 +484,7 @@ static void orangefs_invalidate_folio(struct folio *folio,
	 * Thus the following runs if wr was modified above.
	 */

	orangefs_launder_page(&folio->page);
	orangefs_launder_folio(folio);
}

static int orangefs_releasepage(struct page *page, gfp_t foo)
@@ -494,17 +497,17 @@ static void orangefs_freepage(struct page *page)
	kfree(detach_page_private(page));
}

static int orangefs_launder_page(struct page *page)
static int orangefs_launder_folio(struct folio *folio)
{
	int r = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 0,
	};
	wait_on_page_writeback(page);
	if (clear_page_dirty_for_io(page)) {
		r = orangefs_writepage_locked(page, &wbc);
		end_page_writeback(page);
	folio_wait_writeback(folio);
	if (folio_clear_dirty_for_io(folio)) {
		r = orangefs_writepage_locked(&folio->page, &wbc);
		folio_end_writeback(folio);
	}
	return r;
}
@@ -637,13 +640,13 @@ static const struct address_space_operations orangefs_address_operations = {
	.invalidate_folio = orangefs_invalidate_folio,
	.releasepage = orangefs_releasepage,
	.freepage = orangefs_freepage,
	.launder_page = orangefs_launder_page,
	.launder_folio = orangefs_launder_folio,
	.direct_IO = orangefs_direct_IO,
};

vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
{
	struct page *page = vmf->page;
	struct folio *folio = page_folio(vmf->page);
	struct inode *inode = file_inode(vmf->vma->vm_file);
	struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
	unsigned long *bitlock = &orangefs_inode->bitlock;
@@ -657,27 +660,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
		goto out;
	}

	lock_page(page);
	if (PageDirty(page) && !PagePrivate(page)) {
	folio_lock(folio);
	if (folio_test_dirty(folio) && !folio_test_private(folio)) {
		/*
		 * Should be impossible.  If it happens, launder the page
		 * Should be impossible.  If it happens, launder the folio
		 * since we don't know what's dirty.  This will WARN in
		 * orangefs_writepage_locked.
		 */
		if (orangefs_launder_page(page)) {
		if (orangefs_launder_folio(folio)) {
			ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
			goto out;
		}
	}
	if (PagePrivate(page)) {
		wr = (struct orangefs_write_range *)page_private(page);
	if (folio_test_private(folio)) {
		wr = folio_get_private(folio);
		if (uid_eq(wr->uid, current_fsuid()) &&
		    gid_eq(wr->gid, current_fsgid())) {
			wr->pos = page_offset(page);
			wr->pos = page_offset(vmf->page);
			wr->len = PAGE_SIZE;
			goto okay;
		} else {
			if (orangefs_launder_page(page)) {
			if (orangefs_launder_folio(folio)) {
				ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
				goto out;
			}
@@ -688,27 +691,27 @@ vm_fault_t orangefs_page_mkwrite(struct vm_fault *vmf)
		ret = VM_FAULT_LOCKED|VM_FAULT_RETRY;
		goto out;
	}
	wr->pos = page_offset(page);
	wr->pos = page_offset(vmf->page);
	wr->len = PAGE_SIZE;
	wr->uid = current_fsuid();
	wr->gid = current_fsgid();
	attach_page_private(page, wr);
	folio_attach_private(folio, wr);
okay:

	file_update_time(vmf->vma->vm_file);
	if (page->mapping != inode->i_mapping) {
		unlock_page(page);
	if (folio->mapping != inode->i_mapping) {
		folio_unlock(folio);
		ret = VM_FAULT_LOCKED|VM_FAULT_NOPAGE;
		goto out;
	}

	/*
	 * We mark the page dirty already here so that when freeze is in
	 * We mark the folio dirty already here so that when freeze is in
	 * progress, we are guaranteed that writeback during freezing will
	 * see the dirty page and writeprotect it again.
	 * see the dirty folio and writeprotect it again.
	 */
	set_page_dirty(page);
	wait_for_stable_page(page);
	folio_mark_dirty(folio);
	folio_wait_stable(folio);
	ret = VM_FAULT_LOCKED;
out:
	sb_end_pagefault(inode->i_sb);