Commit f9f38f78 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Matthew Wilcox (Oracle)
Browse files

mm: refactor check_and_migrate_movable_pages

Remove up to two levels of indentation by using continue statements
and move variables to local scope where possible.

Link: https://lkml.kernel.org/r/20220210072828.2930359-11-hch@lst.de


Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatar"Sierra Guiza, Alejandro (Alex)" <alex.sierra@amd.com>

Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Chaitanya Kulkarni <kch@nvidia.com>
Cc: Christian Knig <christian.koenig@amd.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Felix Kuehling <Felix.Kuehling@amd.com>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Karol Herbst <kherbst@redhat.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: "Pan, Xinhui" <Xinhui.Pan@amd.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent 5cbf9942
Loading
Loading
Loading
Loading
+44 −37
Original line number Diff line number Diff line
@@ -1841,32 +1841,31 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
					    struct page **pages,
					    unsigned int gup_flags)
{
	unsigned long i;
	unsigned long isolation_error_count = 0;
	bool drain_allow = true;
	LIST_HEAD(movable_page_list);
	long ret = 0;
	unsigned long isolation_error_count = 0, i;
	struct page *prev_head = NULL;
	struct page *head;
	struct migration_target_control mtc = {
		.nid = NUMA_NO_NODE,
		.gfp_mask = GFP_USER | __GFP_NOWARN,
	};
	LIST_HEAD(movable_page_list);
	bool drain_allow = true;
	int ret = 0;

	for (i = 0; i < nr_pages; i++) {
		head = compound_head(pages[i]);
		struct page *head = compound_head(pages[i]);

		if (head == prev_head)
			continue;
		prev_head = head;

		if (is_pinnable_page(head))
			continue;

		/*
		 * If we get a movable page, since we are going to be pinning
		 * these entries, try to move them out if possible.
		 * Try to move out any movable page before pinning the range.
		 */
		if (!is_pinnable_page(head)) {
		if (PageHuge(head)) {
			if (!isolate_huge_page(head, &movable_page_list))
				isolation_error_count++;
			} else {
			continue;
		}

		if (!PageLRU(head) && drain_allow) {
			lru_add_drain_all();
			drain_allow = false;
@@ -1878,35 +1877,43 @@ static long check_and_migrate_movable_pages(unsigned long nr_pages,
		}
		list_add_tail(&head->lru, &movable_page_list);
		mod_node_page_state(page_pgdat(head),
						    NR_ISOLATED_ANON +
						    page_is_file_lru(head),
				    NR_ISOLATED_ANON + page_is_file_lru(head),
				    thp_nr_pages(head));
	}
		}
	}

	if (!list_empty(&movable_page_list) || isolation_error_count)
		goto unpin_pages;

	/*
	 * If list is empty, and no isolation errors, means that all pages are
	 * in the correct zone.
	 */
	if (list_empty(&movable_page_list) && !isolation_error_count)
	return nr_pages;

unpin_pages:
	if (gup_flags & FOLL_PIN) {
		unpin_user_pages(pages, nr_pages);
	} else {
		for (i = 0; i < nr_pages; i++)
			put_page(pages[i]);
	}

	if (!list_empty(&movable_page_list)) {
		struct migration_target_control mtc = {
			.nid = NUMA_NO_NODE,
			.gfp_mask = GFP_USER | __GFP_NOWARN,
		};

		ret = migrate_pages(&movable_page_list, alloc_migration_target,
				    NULL, (unsigned long)&mtc, MIGRATE_SYNC,
				    MR_LONGTERM_PIN, NULL);
		if (ret && !list_empty(&movable_page_list))
			putback_movable_pages(&movable_page_list);
		if (ret > 0) /* number of pages not migrated */
			ret = -ENOMEM;
	}

	return ret > 0 ? -ENOMEM : ret;
	if (ret && !list_empty(&movable_page_list))
		putback_movable_pages(&movable_page_list);
	return ret;
}
#else
static long check_and_migrate_movable_pages(unsigned long nr_pages,