Unverified Commit b05af249 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!14117 mm/page_alloc: Separate THP PCP into movable and non-movable categories

Merge Pull Request from: @ci-robot 
 
PR sync from: Jinjiang Tu <tujinjiang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/NAFSZ6N7QEXRIPDYMSWZKCUOS7ZC2EEC/ 
Jinjiang Tu (1):
  mm: fix kabi breakage due to struct per_cpu_pages

yangge (1):
  mm/page_alloc: Separate THP PCP into movable and non-movable
    categories


-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/IAD6H2 
 
Link:https://gitee.com/openeuler/kernel/pulls/14117

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents 2c78b27d 2f312afa
Loading
Loading
Loading
Loading
+11 −5
Original line number Diff line number Diff line
@@ -678,20 +678,22 @@ enum zone_watermarks {
};

/*
 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list
 * for THP which will usually be GFP_MOVABLE. Even if it is another type,
 * it should not contribute to serious fragmentation causing THP allocation
 * failures.
 * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists
 * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list
 * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE.
 */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define NR_PCP_THP 1
#define NR_PCP_THP_UNMOVABLE 1
#define NR_PCP_ORDERS (PAGE_ALLOC_COSTLY_ORDER + 2)
#else
#define NR_PCP_THP 0
#define NR_PCP_THP_UNMOVABLE 0
#define NR_PCP_ORDERS (PAGE_ALLOC_COSTLY_ORDER + 1)
#endif
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * NR_PCP_ORDERS)
#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
#define NR_PCP_OLD_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
#define NR_PCP_LISTS (NR_PCP_OLD_LISTS + NR_PCP_THP_UNMOVABLE)

#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
@@ -728,7 +730,11 @@ struct per_cpu_pages {
	short free_count;	/* consecutive free count */

	/* Lists of pages, one per migrate type stored on the pcp-lists */
#ifndef __GENKSYMS__
	struct list_head lists[NR_PCP_LISTS];
#else
	struct list_head lists[NR_PCP_OLD_LISTS];
#endif
} ____cacheline_aligned_in_smp;

struct per_cpu_zonestat {
+7 −2
Original line number Diff line number Diff line
@@ -528,10 +528,15 @@ static void bad_page(struct page *page, const char *reason)

static inline unsigned int order_to_pindex(int migratetype, int order)
{
	bool __maybe_unused movable;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	if (order > PAGE_ALLOC_COSTLY_ORDER + 1) {
		VM_BUG_ON(order != HPAGE_PMD_ORDER);
		return NR_LOWORDER_PCP_LISTS;

		movable = migratetype == MIGRATE_MOVABLE;

		return NR_LOWORDER_PCP_LISTS + movable;
	}
#else
	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
@@ -545,7 +550,7 @@ static inline int pindex_to_order(unsigned int pindex)
	int order = pindex / MIGRATE_PCPTYPES;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	if (pindex == NR_LOWORDER_PCP_LISTS)
	if (pindex >= NR_LOWORDER_PCP_LISTS)
		order = HPAGE_PMD_ORDER;
#else
	VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);