Commit 82e5d378 authored by Joao Martins's avatar Joao Martins Committed by Linus Torvalds
Browse files

mm/hugetlb: refactor subpage recording

For a given hugepage backing a VA, there's a rather ineficient loop which
is solely responsible for storing subpages in GUP @pages/@vmas array.  For
each subpage we check whether it's within range or size of @pages and keep
increment @pfn_offset and a couple other variables per subpage iteration.

Simplify this logic and minimize the cost of each iteration to just store
the output page/vma.  Instead of incrementing number of @refs iteratively,
we do it through pre-calculation of @refs and only with a tight loop for
storing pinned subpages/vmas.

Additionally, retain existing behaviour with using mem_map_offset() when
recording the subpages for configurations that don't have a contiguous
mem_map.

pinning consequently improves bringing us close to
{pin,get}_user_pages_fast:

  - 16G with 1G huge page size
  gup_test -f /mnt/huge/file -m 16384 -r 30 -L -S -n 512 -w

PIN_LONGTERM_BENCHMARK: ~12.8k us -> ~5.8k us
PIN_FAST_BENCHMARK: ~3.7k us

Link: https://lkml.kernel.org/r/20210128182632.24562-3-joao.m.martins@oracle.com


Signed-off-by: default avatarJoao Martins <joao.m.martins@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0fa5bc40
Loading
Loading
Loading
Loading
+28 −21
Original line number Diff line number Diff line
@@ -4787,6 +4787,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
	goto out;
}

static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
				 int refs, struct page **pages,
				 struct vm_area_struct **vmas)
{
	int nr;

	for (nr = 0; nr < refs; nr++) {
		if (likely(pages))
			pages[nr] = mem_map_offset(page, nr);
		if (vmas)
			vmas[nr] = vma;
	}
}

long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			 struct page **pages, struct vm_area_struct **vmas,
			 unsigned long *position, unsigned long *nr_pages,
@@ -4916,28 +4930,16 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			continue;
		}

		refs = 0;
		refs = min3(pages_per_huge_page(h) - pfn_offset,
			    (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);

same_page:
		if (pages)
			pages[i] = mem_map_offset(page, pfn_offset);
		if (pages || vmas)
			record_subpages_vmas(mem_map_offset(page, pfn_offset),
					     vma, refs,
					     likely(pages) ? pages + i : NULL,
					     vmas ? vmas + i : NULL);

		if (vmas)
			vmas[i] = vma;

		vaddr += PAGE_SIZE;
		++pfn_offset;
		--remainder;
		++i;
		++refs;
		if (vaddr < vma->vm_end && remainder &&
				pfn_offset < pages_per_huge_page(h)) {
			/*
			 * We use pfn_offset to avoid touching the pageframes
			 * of this compound page.
			 */
			goto same_page;
		} else if (pages) {
		if (pages) {
			/*
			 * try_grab_compound_head() should always succeed here,
			 * because: a) we hold the ptl lock, and b) we've just
@@ -4948,7 +4950,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
			 * any way. So this page must be available at this
			 * point, unless the page refcount overflowed:
			 */
			if (WARN_ON_ONCE(!try_grab_compound_head(pages[i-1],
			if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
								 refs,
								 flags))) {
				spin_unlock(ptl);
@@ -4957,6 +4959,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
				break;
			}
		}

		vaddr += (refs << PAGE_SHIFT);
		remainder -= refs;
		i += refs;

		spin_unlock(ptl);
	}
	*nr_pages = remainder;