Commit ec4858e0 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

mm/mempolicy: Use vma_alloc_folio() in new_page()



Simplify new_page() by unifying the THP and base page cases, and
handle orders other than 0 and HPAGE_PMD_ORDER correctly.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent f584b680
Loading
Loading
Loading
Loading
+11 −14
Original line number Diff line number Diff line
@@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 */
static struct page *new_page(struct page *page, unsigned long start)
{
	struct folio *dst, *src = page_folio(page);
	struct vm_area_struct *vma;
	unsigned long address;
	gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;

	vma = find_vma(current->mm, start);
	while (vma) {
@@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
		vma = vma->vm_next;
	}

	if (PageHuge(page)) {
		return alloc_huge_page_vma(page_hstate(compound_head(page)),
	if (folio_test_hugetlb(src))
		return alloc_huge_page_vma(page_hstate(&src->page),
				vma, address);
	} else if (PageTransHuge(page)) {
		struct page *thp;

		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
					 HPAGE_PMD_ORDER);
		if (!thp)
			return NULL;
		prep_transhuge_page(thp);
		return thp;
	}
	if (folio_test_large(src))
		gfp = GFP_TRANSHUGE;

	/*
	 * if !vma, alloc_page_vma() will use task or system default policy
	 * if !vma, vma_alloc_folio() will use task or system default policy
	 */
	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
			vma, address);
	dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
			folio_test_large(src));
	return &dst->page;
}
#else