Commit 844fbae6 authored by Zi Yan's avatar Zi Yan Committed by Andrew Morton
Browse files

mm: page_isolation: check specified range for unmovable pages

Enable set_migratetype_isolate() to check specified range for unmovable
pages during isolation to prepare arbitrary range page isolation.  The
functionality will take effect in upcoming commits by adjusting the
callers of start_isolate_page_range(), which uses
set_migratetype_isolate().

For example, alloc_contig_range(), which calls start_isolate_page_range(),
accepts unaligned ranges, but because page isolation is currently done at
MAX_ORDER_NR_PAEGS granularity, pages that are out of the specified range
but withint MAX_ORDER_NR_PAEGS alignment might be attempted for isolation
and the failure of isolating these unrelated pages fails the whole
operation undesirably.

Link: https://lkml.kernel.org/r/20220425143118.2850746-3-zi.yan@sent.com


Signed-off-by: default avatarZi Yan <ziy@nvidia.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand <david@redhat.com>
Cc: Eric Ren <renzhengeek@gmail.com>
Cc: kernel test robot <lkp@intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b48d8a8e
Loading
Loading
Loading
Loading
+34 −13
Original line number Diff line number Diff line
@@ -16,7 +16,9 @@
#include <trace/events/page_isolation.h>

/*
 * This function checks whether pageblock includes unmovable pages or not.
 * This function checks whether the range [start_pfn, end_pfn) includes
 * unmovable pages or not. The range must fall into a single pageblock and
 * consequently belong to a single zone.
 *
 * PageLRU check without isolation or lru_lock could race so that
 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
@@ -28,12 +30,15 @@
 * cannot get removed (e.g., via memory unplug) concurrently.
 *
 */
static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
				int migratetype, int flags)
{
	unsigned long iter = 0;
	unsigned long pfn = page_to_pfn(page);
	unsigned long offset = pfn % pageblock_nr_pages;
	struct page *page = pfn_to_page(start_pfn);
	struct zone *zone = page_zone(page);
	unsigned long pfn;

	VM_BUG_ON(ALIGN_DOWN(start_pfn, pageblock_nr_pages) !=
		  ALIGN_DOWN(end_pfn - 1, pageblock_nr_pages));

	if (is_migrate_cma_page(page)) {
		/*
@@ -47,8 +52,8 @@ static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
		return page;
	}

	for (; iter < pageblock_nr_pages - offset; iter++) {
		page = pfn_to_page(pfn + iter);
	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
		page = pfn_to_page(pfn);

		/*
		 * Both, bootmem allocations and memory holes are marked
@@ -85,7 +90,7 @@ static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
			}

			skip_pages = compound_nr(head) - (page - head);
			iter += skip_pages - 1;
			pfn += skip_pages - 1;
			continue;
		}

@@ -97,7 +102,7 @@ static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
		 */
		if (!page_ref_count(page)) {
			if (PageBuddy(page))
				iter += (1 << buddy_order(page)) - 1;
				pfn += (1 << buddy_order(page)) - 1;
			continue;
		}

@@ -134,11 +139,18 @@ static struct page *has_unmovable_pages(struct zone *zone, struct page *page,
	return NULL;
}

static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags)
/*
 * This function set pageblock migratetype to isolate if no unmovable page is
 * present in [start_pfn, end_pfn). The pageblock must intersect with
 * [start_pfn, end_pfn).
 */
static int set_migratetype_isolate(struct page *page, int migratetype, int isol_flags,
			unsigned long start_pfn, unsigned long end_pfn)
{
	struct zone *zone = page_zone(page);
	struct page *unmovable;
	unsigned long flags;
	unsigned long check_unmovable_start, check_unmovable_end;

	spin_lock_irqsave(&zone->lock, flags);

@@ -155,8 +167,16 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
	/*
	 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
	 * We just check MOVABLE pages.
	 *
	 * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock
	 * to avoid redundant checks.
	 */
	unmovable = has_unmovable_pages(zone, page, migratetype, isol_flags);
	check_unmovable_start = max(page_to_pfn(page), start_pfn);
	check_unmovable_end = min(ALIGN(page_to_pfn(page) + 1, pageblock_nr_pages),
				  end_pfn);

	unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
			migratetype, isol_flags);
	if (!unmovable) {
		unsigned long nr_pages;
		int mt = get_pageblock_migratetype(page);
@@ -313,7 +333,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
	     pfn < end_pfn;
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (page && set_migratetype_isolate(page, migratetype, flags)) {
		if (page && set_migratetype_isolate(page, migratetype, flags,
					start_pfn, end_pfn)) {
			undo_isolate_page_range(start_pfn, pfn, migratetype);
			return -EBUSY;
		}