Commit 596500bd authored by Liu Shixin's avatar Liu Shixin
Browse files

dhugetlb: skip unexpected migration

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8YWPT


CVE: NA

--------------------------------

With dynamic hugetlb feature, some memory is isolated in the dynamic pool.
When try to compact memory, the kcompactd thread will scan all memory,
althougt some memory is belonging to dynamic pool, kcompactd still try to
migrate them. After migration, these memory will free to dynamic pool
rather than buddy system, which results the free pages in buddy system
decreased.

Since it is unnecessary to compact the memory in the dynamic pool, skip
migrate them to fix the problem.

The same problem also existed in alloc_contig_range(), offline_pages() and
numa balancing. Skip it again in these three scenarios.

In addition to this, we have to consider the migration of hugepage, if
a hugepage is from dynamic pool, we should not allow to migrate it.

Fixes: 0bc0d0d5 ("dhugetlb: backport dynamic hugetlb feature")
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
parent 096ad875
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -38,9 +38,13 @@ static inline struct page *new_page_nodemask(struct page *page,
	unsigned int order = 0;
	struct page *new_page = NULL;

	if (PageHuge(page))
	if (PageHuge(page)) {
		if (page_belong_to_dynamic_hugetlb(page))
			return NULL;

		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
				preferred_nid, nodemask);
	}

	if (PageTransHuge(page)) {
		gfp_mask |= GFP_TRANSHUGE;
+3 −0
Original line number Diff line number Diff line
@@ -1271,6 +1271,9 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
		if (!page)
			continue;

		if (page_belong_to_dynamic_hugetlb(page))
			continue;

		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;
+8 −2
Original line number Diff line number Diff line
@@ -1040,10 +1040,13 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
/* page allocation callback for NUMA node migration */
struct page *alloc_new_node_page(struct page *page, unsigned long node)
{
	if (PageHuge(page))
	if (PageHuge(page)) {
		if (page_belong_to_dynamic_hugetlb(page))
			return NULL;

		return alloc_huge_page_node(page_hstate(compound_head(page)),
					node);
	else if (PageTransHuge(page)) {
	} else if (PageTransHuge(page)) {
		struct page *thp;

		thp = alloc_pages_node(node,
@@ -1217,6 +1220,9 @@ static struct page *new_page(struct page *page, unsigned long start)
	}

	if (PageHuge(page)) {
		if (page_belong_to_dynamic_hugetlb(page))
			return NULL;

		return alloc_huge_page_vma(page_hstate(compound_head(page)),
				vma, address);
	} else if (PageTransHuge(page)) {
+3 −0
Original line number Diff line number Diff line
@@ -1907,6 +1907,9 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
	if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
		return 0;

	if (page_belong_to_dynamic_hugetlb(page))
		return 0;

	if (isolate_lru_page(page))
		return 0;

+2 −1
Original line number Diff line number Diff line
@@ -220,7 +220,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (page) {
			if (set_migratetype_isolate(page, migratetype, flags)) {
			if (page_belong_to_dynamic_hugetlb(page) ||
			    set_migratetype_isolate(page, migratetype, flags)) {
				undo_pfn = pfn;
				goto undo;
			}