Commit 14455eab authored by Cheng Li's avatar Cheng Li Committed by Andrew Morton
Browse files

mm: use nth_page instead of mem_map_offset mem_map_next

To handle the discontiguous case, mem_map_next() has a parameter named
`offset`.  As a function caller, one would be confused why "get next
entry" needs a parameter named "offset".  The other drawback of
mem_map_next() is that the callers must take care of the map between
parameter "iter" and "offset", otherwise we may get an hole or duplication
during iteration.  So we use nth_page instead of mem_map_next.

And replace mem_map_offset with nth_page() per Matthew's comments.

Link: https://lkml.kernel.org/r/1662708669-9395-1-git-send-email-lic121@chinatelecom.cn


Signed-off-by: default avatarCheng Li <lic121@chinatelecom.cn>
Fixes: 69d177c2 ("hugetlbfs: handle pages higher order than MAX_ORDER")
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0d83b2d8
Loading
Loading
Loading
Loading
+17 −12
Original line number Original line Diff line number Diff line
@@ -1306,12 +1306,13 @@ static void __destroy_compound_gigantic_page(struct page *page,
{
{
	int i;
	int i;
	int nr_pages = 1 << order;
	int nr_pages = 1 << order;
	struct page *p = page + 1;
	struct page *p;


	atomic_set(compound_mapcount_ptr(page), 0);
	atomic_set(compound_mapcount_ptr(page), 0);
	atomic_set(compound_pincount_ptr(page), 0);
	atomic_set(compound_pincount_ptr(page), 0);


	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
	for (i = 1; i < nr_pages; i++) {
		p = nth_page(page, i);
		p->mapping = NULL;
		p->mapping = NULL;
		clear_compound_head(p);
		clear_compound_head(p);
		if (!demote)
		if (!demote)
@@ -1532,7 +1533,7 @@ static void add_hugetlb_page(struct hstate *h, struct page *page,
static void __update_and_free_page(struct hstate *h, struct page *page)
static void __update_and_free_page(struct hstate *h, struct page *page)
{
{
	int i;
	int i;
	struct page *subpage = page;
	struct page *subpage;


	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
		return;
		return;
@@ -1563,8 +1564,8 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
	if (unlikely(PageHWPoison(page)))
	if (unlikely(PageHWPoison(page)))
		hugetlb_clear_page_hwpoison(page);
		hugetlb_clear_page_hwpoison(page);


	for (i = 0; i < pages_per_huge_page(h);
	for (i = 0; i < pages_per_huge_page(h); i++) {
	     i++, subpage = mem_map_next(subpage, page, i)) {
		subpage = nth_page(page, i);
		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
				1 << PG_referenced | 1 << PG_dirty |
				1 << PG_referenced | 1 << PG_dirty |
				1 << PG_active | 1 << PG_private |
				1 << PG_active | 1 << PG_private |
@@ -1771,13 +1772,15 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
{
{
	int i, j;
	int i, j;
	int nr_pages = 1 << order;
	int nr_pages = 1 << order;
	struct page *p = page + 1;
	struct page *p;


	/* we rely on prep_new_huge_page to set the destructor */
	/* we rely on prep_new_huge_page to set the destructor */
	set_compound_order(page, order);
	set_compound_order(page, order);
	__ClearPageReserved(page);
	__ClearPageReserved(page);
	__SetPageHead(page);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
	for (i = 1; i < nr_pages; i++) {
		p = nth_page(page, i);

		/*
		/*
		 * For gigantic hugepages allocated through bootmem at
		 * For gigantic hugepages allocated through bootmem at
		 * boot, it's safer to be consistent with the not-gigantic
		 * boot, it's safer to be consistent with the not-gigantic
@@ -1824,14 +1827,16 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,


out_error:
out_error:
	/* undo tail page modifications made above */
	/* undo tail page modifications made above */
	p = page + 1;
	for (j = 1; j < i; j++) {
	for (j = 1; j < i; j++, p = mem_map_next(p, page, j)) {
		p = nth_page(page, j);
		clear_compound_head(p);
		clear_compound_head(p);
		set_page_refcounted(p);
		set_page_refcounted(p);
	}
	}
	/* need to clear PG_reserved on remaining tail pages  */
	/* need to clear PG_reserved on remaining tail pages  */
	for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
	for (; j < nr_pages; j++) {
		p = nth_page(page, j);
		__ClearPageReserved(p);
		__ClearPageReserved(p);
	}
	set_compound_order(page, 0);
	set_compound_order(page, 0);
#ifdef CONFIG_64BIT
#ifdef CONFIG_64BIT
	page[1].compound_nr = 0;
	page[1].compound_nr = 0;
@@ -6128,7 +6133,7 @@ static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,


	for (nr = 0; nr < refs; nr++) {
	for (nr = 0; nr < refs; nr++) {
		if (likely(pages))
		if (likely(pages))
			pages[nr] = mem_map_offset(page, nr);
			pages[nr] = nth_page(page, nr);
		if (vmas)
		if (vmas)
			vmas[nr] = vma;
			vmas[nr] = vma;
	}
	}
@@ -6292,7 +6297,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);
		    (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT);


		if (pages || vmas)
		if (pages || vmas)
			record_subpages_vmas(mem_map_offset(page, pfn_offset),
			record_subpages_vmas(nth_page(page, pfn_offset),
					     vma, refs,
					     vma, refs,
					     likely(pages) ? pages + i : NULL,
					     likely(pages) ? pages + i : NULL,
					     vmas ? vmas + i : NULL);
					     vmas ? vmas + i : NULL);
+0 −28
Original line number Original line Diff line number Diff line
@@ -638,34 +638,6 @@ static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
}
}
#endif /* !CONFIG_MMU */
#endif /* !CONFIG_MMU */


/*
 * Return the mem_map entry representing the 'offset' subpage within
 * the maximally aligned gigantic page 'base'.  Handle any discontiguity
 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
 */
static inline struct page *mem_map_offset(struct page *base, int offset)
{
	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
		return nth_page(base, offset);
	return base + offset;
}

/*
 * Iterator over all subpages within the maximally aligned gigantic
 * page 'base'.  Handle any discontiguity in the mem_map.
 */
static inline struct page *mem_map_next(struct page *iter,
						struct page *base, int offset)
{
	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
		unsigned long pfn = page_to_pfn(base) + offset;
		if (!pfn_valid(pfn))
			return NULL;
		return pfn_to_page(pfn);
	}
	return iter + 1;
}

/* Memory initialisation debug and verification */
/* Memory initialisation debug and verification */
enum mminit_level {
enum mminit_level {
	MMINIT_WARNING,
	MMINIT_WARNING,
+10 −11
Original line number Original line Diff line number Diff line
@@ -5690,11 +5690,11 @@ static void clear_gigantic_page(struct page *page,
				unsigned int pages_per_huge_page)
				unsigned int pages_per_huge_page)
{
{
	int i;
	int i;
	struct page *p = page;
	struct page *p;


	might_sleep();
	might_sleep();
	for (i = 0; i < pages_per_huge_page;
	for (i = 0; i < pages_per_huge_page; i++) {
	     i++, p = mem_map_next(p, page, i)) {
		p = nth_page(page, i);
		cond_resched();
		cond_resched();
		clear_user_highpage(p, addr + i * PAGE_SIZE);
		clear_user_highpage(p, addr + i * PAGE_SIZE);
	}
	}
@@ -5730,13 +5730,12 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
	struct page *dst_base = dst;
	struct page *dst_base = dst;
	struct page *src_base = src;
	struct page *src_base = src;


	for (i = 0; i < pages_per_huge_page; ) {
	for (i = 0; i < pages_per_huge_page; i++) {
		dst = nth_page(dst_base, i);
		src = nth_page(src_base, i);

		cond_resched();
		cond_resched();
		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);

		i++;
		dst = mem_map_next(dst, dst_base, i);
		src = mem_map_next(src, src_base, i);
	}
	}
}
}


@@ -5783,10 +5782,10 @@ long copy_huge_page_from_user(struct page *dst_page,
	void *page_kaddr;
	void *page_kaddr;
	unsigned long i, rc = 0;
	unsigned long i, rc = 0;
	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
	struct page *subpage = dst_page;
	struct page *subpage;


	for (i = 0; i < pages_per_huge_page;
	for (i = 0; i < pages_per_huge_page; i++) {
	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
		subpage = nth_page(dst_page, i);
		if (allow_pagefault)
		if (allow_pagefault)
			page_kaddr = kmap(subpage);
			page_kaddr = kmap(subpage);
		else
		else