Commit 6e7f34eb authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Linus Torvalds
Browse files

mm/gup: check for isolation errors

It is still possible that we pin movable CMA pages if there are
isolation errors and cma_page_list stays empty when we check again.

Check for isolation errors, and return success only when there are no
isolation errors, and cma_page_list is empty after checking.

Because isolation errors are transient, we retry indefinitely.

Link: https://lkml.kernel.org/r/20210215161349.246722-5-pasha.tatashin@soleen.com


Fixes: 9a4e9f3b ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region")
Signed-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: James Morris <jmorris@namei.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sasha Levin <sashal@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Tyler Hicks <tyhicks@linux.microsoft.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f0f44638
Loading
Loading
Loading
Loading
+34 −26
Original line number Original line Diff line number Diff line
@@ -1608,8 +1608,8 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
					struct vm_area_struct **vmas,
					struct vm_area_struct **vmas,
					unsigned int gup_flags)
					unsigned int gup_flags)
{
{
	unsigned long i;
	unsigned long i, isolation_error_count;
	bool drain_allow = true;
	bool drain_allow;
	LIST_HEAD(cma_page_list);
	LIST_HEAD(cma_page_list);
	long ret = nr_pages;
	long ret = nr_pages;
	struct page *prev_head, *head;
	struct page *prev_head, *head;
@@ -1620,6 +1620,8 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,


check_again:
check_again:
	prev_head = NULL;
	prev_head = NULL;
	isolation_error_count = 0;
	drain_allow = true;
	for (i = 0; i < nr_pages; i++) {
	for (i = 0; i < nr_pages; i++) {
		head = compound_head(pages[i]);
		head = compound_head(pages[i]);
		if (head == prev_head)
		if (head == prev_head)
@@ -1631,15 +1633,19 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
		 * of the CMA zone if possible.
		 * of the CMA zone if possible.
		 */
		 */
		if (is_migrate_cma_page(head)) {
		if (is_migrate_cma_page(head)) {
			if (PageHuge(head))
			if (PageHuge(head)) {
				isolate_huge_page(head, &cma_page_list);
				if (!isolate_huge_page(head, &cma_page_list))
			else {
					isolation_error_count++;
			} else {
				if (!PageLRU(head) && drain_allow) {
				if (!PageLRU(head) && drain_allow) {
					lru_add_drain_all();
					lru_add_drain_all();
					drain_allow = false;
					drain_allow = false;
				}
				}


				if (!isolate_lru_page(head)) {
				if (isolate_lru_page(head)) {
					isolation_error_count++;
					continue;
				}
				list_add_tail(&head->lru, &cma_page_list);
				list_add_tail(&head->lru, &cma_page_list);
				mod_node_page_state(page_pgdat(head),
				mod_node_page_state(page_pgdat(head),
						    NR_ISOLATED_ANON +
						    NR_ISOLATED_ANON +
@@ -1648,7 +1654,13 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
			}
			}
		}
		}
	}
	}
	}

	/*
	 * If list is empty, and no isolation errors, means that all pages are
	 * in the correct zone.
	 */
	if (list_empty(&cma_page_list) && !isolation_error_count)
		return ret;


	if (!list_empty(&cma_page_list)) {
	if (!list_empty(&cma_page_list)) {
		/*
		/*
@@ -1669,23 +1681,19 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
			return ret > 0 ? -ENOMEM : ret;
			return ret > 0 ? -ENOMEM : ret;
		}
		}


		/*
		/* We unpinned pages before migration, pin them again */
		 * We did migrate all the pages, Try to get the page references
		ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
		 * again migrating any new CMA pages which we failed to isolate
					      NULL, gup_flags);
		 * earlier.
		if (ret <= 0)
		 */
			return ret;
		ret = __get_user_pages_locked(mm, start, nr_pages,
						   pages, vmas, NULL,
						   gup_flags);

		if (ret > 0) {
		nr_pages = ret;
		nr_pages = ret;
			drain_allow = true;
			goto check_again;
		}
	}
	}


	return ret;
	/*
	 * check again because pages were unpinned, and we also might have
	 * had isolation errors and need more pages to migrate.
	 */
	goto check_again;
}
}
#else
#else
static long check_and_migrate_cma_pages(struct mm_struct *mm,
static long check_and_migrate_cma_pages(struct mm_struct *mm,