Commit de74976e authored by Yin Fengwei's avatar Yin Fengwei Committed by Andrew Morton
Browse files

filemap: add filemap_map_folio_range()

filemap_map_folio_range() maps partial/full folio.  Comparing to original
filemap_map_pages(), it updates refcount once per folio instead of per
page and gets minor performance improvement for large folio.

With a will-it-scale.page_fault3 like app (change file write fault testing
to read fault testing.  Trying to upstream it to will-it-scale at [1]),
got 2% performance gain on a 48C/96T Cascade Lake test box with 96
processes running against xfs.

[1]: https://github.com/antonblanchard/will-it-scale/pull/37

Link: https://lkml.kernel.org/r/20230802151406.3735276-35-willy@infradead.org


Signed-off-by: default avatarYin Fengwei <fengwei.yin@intel.com>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9f1f5b60
Loading
Loading
Loading
Loading
+55 −54
Original line number Diff line number Diff line
@@ -2168,16 +2168,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
}
EXPORT_SYMBOL(filemap_get_folios);

static inline
bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
{
	if (!folio_test_large(folio) || folio_test_hugetlb(folio))
		return false;
	if (index >= max)
		return false;
	return index < folio_next_index(folio) - 1;
}

/**
 * filemap_get_folios_contig - Get a batch of contiguous folios
 * @mapping:	The address_space to search
@@ -3436,10 +3426,10 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
	return false;
}

static struct folio *next_uptodate_page(struct folio *folio,
				       struct address_space *mapping,
				       struct xa_state *xas, pgoff_t end_pgoff)
static struct folio *next_uptodate_folio(struct xa_state *xas,
		struct address_space *mapping, pgoff_t end_pgoff)
{
	struct folio *folio = xas_next_entry(xas, end_pgoff);
	unsigned long max_idx;

	do {
@@ -3477,20 +3467,51 @@ static struct folio *next_uptodate_page(struct folio *folio,
	return NULL;
}

static inline struct folio *first_map_page(struct address_space *mapping,
					  struct xa_state *xas,
					  pgoff_t end_pgoff)
/*
 * Map page range [start_page, start_page + nr_pages) of folio.
 * start_page is gotten from start by folio_page(folio, start)
 */
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
			struct folio *folio, unsigned long start,
			unsigned long addr, unsigned int nr_pages)
{
	return next_uptodate_page(xas_find(xas, end_pgoff),
				  mapping, xas, end_pgoff);
}
	vm_fault_t ret = 0;
	struct vm_area_struct *vma = vmf->vma;
	struct file *file = vma->vm_file;
	struct page *page = folio_page(folio, start);
	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
	unsigned int ref_count = 0, count = 0;

static inline struct folio *next_map_page(struct address_space *mapping,
					 struct xa_state *xas,
					 pgoff_t end_pgoff)
{
	return next_uptodate_page(xas_next_entry(xas, end_pgoff),
				  mapping, xas, end_pgoff);
	do {
		if (PageHWPoison(page))
			continue;

		if (mmap_miss > 0)
			mmap_miss--;

		/*
		 * NOTE: If there're PTE markers, we'll leave them to be
		 * handled in the specific fault path, and it'll prohibit the
		 * fault-around logic.
		 */
		if (!pte_none(*vmf->pte))
			continue;

		if (vmf->address == addr)
			ret = VM_FAULT_NOPAGE;

		ref_count++;
		do_set_pte(vmf, page, addr);
		update_mmu_cache(vma, addr, vmf->pte);
	} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < nr_pages);

	/* Restore the vmf->pte */
	vmf->pte -= nr_pages;

	folio_ref_add(folio, ref_count);
	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);

	return ret;
}

vm_fault_t filemap_map_pages(struct vm_fault *vmf,
@@ -3503,12 +3524,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
	unsigned long addr;
	XA_STATE(xas, &mapping->i_pages, start_pgoff);
	struct folio *folio;
	struct page *page;
	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
	vm_fault_t ret = 0;
	int nr_pages = 0;

	rcu_read_lock();
	folio = first_map_page(mapping, &xas, end_pgoff);
	folio = next_uptodate_folio(&xas, mapping, end_pgoff);
	if (!folio)
		goto out;

@@ -3525,17 +3545,13 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
		goto out;
	}
	do {
again:
		page = folio_file_page(folio, xas.xa_index);
		if (PageHWPoison(page))
			goto unlock;

		if (mmap_miss > 0)
			mmap_miss--;
		unsigned long end;

		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
		vmf->pte += xas.xa_index - last_pgoff;
		last_pgoff = xas.xa_index;
		end = folio->index + folio_nr_pages(folio) - 1;
		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;

		/*
		 * NOTE: If there're PTE markers, we'll leave them to be
@@ -3545,32 +3561,17 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
		if (!pte_none(ptep_get(vmf->pte)))
			goto unlock;

		/* We're about to handle the fault */
		if (vmf->address == addr)
			ret = VM_FAULT_NOPAGE;
		ret |= filemap_map_folio_range(vmf, folio,
				xas.xa_index - folio->index, addr, nr_pages);

		do_set_pte(vmf, page, addr);
		/* no need to invalidate: a not-present page won't be cached */
		update_mmu_cache(vma, addr, vmf->pte);
		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
			xas.xa_index++;
			folio_ref_inc(folio);
			goto again;
		}
		folio_unlock(folio);
		continue;
unlock:
		if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
			xas.xa_index++;
			goto again;
		}
		folio_unlock(folio);
		folio_put(folio);
	} while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
		folio = next_uptodate_folio(&xas, mapping, end_pgoff);
	} while (folio);
	pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
	rcu_read_unlock();
	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
	return ret;
}
EXPORT_SYMBOL(filemap_map_pages);