Commit ed03d924 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds
Browse files

mm/gup: use a standard migration target allocation callback



There is a well-defined migration target allocation callback. Use it.

Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Roman Gushchin <guro@fb.com>
Link: http://lkml.kernel.org/r/1596180906-8442-3-git-send-email-iamjoonsoo.kim@lge.com


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent bbe88753
Loading
Loading
Loading
Loading
+6 −48
Original line number Diff line number Diff line
@@ -1609,52 +1609,6 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
}

#ifdef CONFIG_CMA
static struct page *new_non_cma_page(struct page *page, unsigned long private)
{
	/*
	 * We want to make sure we allocate the new page from the same node
	 * as the source page.
	 */
	int nid = page_to_nid(page);
	/*
	 * Trying to allocate a page for migration. Ignore allocation
	 * failure warnings. We don't force __GFP_THISNODE here because
	 * this node here is the node where we have CMA reservation and
	 * in some case these nodes will have really less non CMA
	 * allocation memory.
	 *
	 * Note that CMA region is prohibited by allocation scope.
	 */
	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN;

	if (PageHighMem(page))
		gfp_mask |= __GFP_HIGHMEM;

#ifdef CONFIG_HUGETLB_PAGE
	if (PageHuge(page)) {
		struct hstate *h = page_hstate(page);

		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
		return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
	}
#endif
	if (PageTransHuge(page)) {
		struct page *thp;
		/*
		 * ignore allocation failure warnings
		 */
		gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;

		thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
		if (!thp)
			return NULL;
		prep_transhuge_page(thp);
		return thp;
	}

	return __alloc_pages_node(nid, gfp_mask, 0);
}

static long check_and_migrate_cma_pages(struct task_struct *tsk,
					struct mm_struct *mm,
					unsigned long start,
@@ -1669,6 +1623,10 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
	bool migrate_allow = true;
	LIST_HEAD(cma_page_list);
	long ret = nr_pages;
	struct migration_target_control mtc = {
		.nid = NUMA_NO_NODE,
		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
	};

check_again:
	for (i = 0; i < nr_pages;) {
@@ -1714,8 +1672,8 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
		for (i = 0; i < nr_pages; i++)
			put_page(pages[i]);

		if (migrate_pages(&cma_page_list, new_non_cma_page,
				  NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
		if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
			(unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
			/*
			 * some of the pages failed migration. Do get_user_pages
			 * without migration.