Commit 03778c6d authored by Liu Shixin's avatar Liu Shixin Committed by openeuler-sync-bot
Browse files

mm/dynamic_hugetlb: skip unexpected migration

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8YUPE


CVE: NA

--------------------------------

With dynamic hugetlb feature, some memory is isolated in the dynamic pool.
When try to compact memory, the kcompactd thread will scan all memory,
althougt some memory is belonging to dynamic pool, kcompactd still try to
migrate them. After migration, these memory will free to dynamic pool
rather than buddy system, which results the free pages in buddy system
decreased.

Since it is unnecessary to compact the memory in the dynamic pool, skip
migrate them to fix the problem.

The same problem also existed in alloc_contig_range(), offline_pages() and
numa balancing. Skip it again in these three scenarios.

In addition to this, we have to consider the migration of hugepage, if
a hugepage is from dynamic pool, we should not allow to migrate it.

Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
(cherry picked from commit 4122daea)
parent 9c2e42d5
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -184,12 +184,12 @@ static inline
void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve)
{
}
#endif

static inline
bool page_belong_to_dynamic_hugetlb(struct page *page)
{
	return false;
}
#endif

#endif /* CONFIG_DYNAMIC_HUGETLB */
#endif /* __LINUX_DYNAMIC_HUGETLB_H */
+4 −0
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@
#include <linux/freezer.h>
#include <linux/page_owner.h>
#include <linux/psi.h>
#include <linux/dynamic_hugetlb.h>
#include "internal.h"

#ifdef CONFIG_COMPACTION
@@ -1870,6 +1871,9 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
		if (!page)
			continue;

		if (page_belong_to_dynamic_hugetlb(page))
			continue;

		/*
		 * If isolation recently failed, do not retry. Only check the
		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
+7 −0
Original line number Diff line number Diff line
@@ -49,6 +49,7 @@
#include <linux/sched/mm.h>
#include <linux/ptrace.h>
#include <linux/oom.h>
#include <linux/dynamic_hugetlb.h>

#include <asm/tlbflush.h>

@@ -1564,6 +1565,9 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
	if (PageHuge(page)) {
		struct hstate *h = page_hstate(compound_head(page));

		if (page_belong_to_dynamic_hugetlb(page))
			return NULL;

		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
	}
@@ -2040,6 +2044,9 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
	if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
		return 0;

	if (page_belong_to_dynamic_hugetlb(page))
		return 0;

	if (isolate_lru_page(page))
		return 0;

+3 −1
Original line number Diff line number Diff line
@@ -10,6 +10,7 @@
#include <linux/hugetlb.h>
#include <linux/page_owner.h>
#include <linux/migrate.h>
#include <linux/dynamic_hugetlb.h>
#include "internal.h"

#define CREATE_TRACE_POINTS
@@ -195,7 +196,8 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
	     pfn += pageblock_nr_pages) {
		page = __first_valid_page(pfn, pageblock_nr_pages);
		if (page) {
			if (set_migratetype_isolate(page, migratetype, flags)) {
			if (page_belong_to_dynamic_hugetlb(page) ||
			    set_migratetype_isolate(page, migratetype, flags)) {
				undo_pfn = pfn;
				goto undo;
			}