Unverified Commit 010e974a authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!15047 mm: Backport some mm patchs

Merge Pull Request from: @wedm23414 
 
Catalin Marinas (1):
  mm: kmemleak: fix upper boundary check for physical address objects

Guo Weikang (1):
  mm/memmap: prevent double scanning of memmap by kmemleak

liuye (1):
  mm/vmscan: fix hard LOCKUP in function isolate_lru_folios

yangge (1):
  mm: compaction: use the proper flag to determine watermarks 
 
Link:https://gitee.com/openeuler/kernel/pulls/15047

 

Reviewed-by: default avatarZhang Peng <zhangpeng362@huawei.com>
Signed-off-by: default avatarZhang Peng <zhangpeng362@huawei.com>
parents de1a4d93 f0cd8087
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -401,6 +401,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
/* Flags for memblock allocation APIs */
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
#define MEMBLOCK_ALLOC_ACCESSIBLE	0
/*
 *  MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
 *  MEMBLOCK_ALLOC_ACCESSIBLE
 */
#define MEMBLOCK_ALLOC_NOLEAKTRACE	1

/* We are using top down, so it is safe to use 0 here */
+1 −0
Original line number Diff line number Diff line
@@ -235,6 +235,7 @@ enum {
};

#define SWAP_CLUSTER_MAX 32UL
#define SWAP_CLUSTER_MAX_SKIPPED (SWAP_CLUSTER_MAX << 10)
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX

/* Bit flag in swap_map */
+25 −4
Original line number Diff line number Diff line
@@ -2492,7 +2492,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
 */
static enum compact_result
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
				 int highest_zoneidx, unsigned int alloc_flags)
				 int highest_zoneidx, unsigned int alloc_flags,
				 bool async)
{
	unsigned long watermark;

@@ -2501,6 +2502,23 @@ compaction_suit_allocation_order(struct zone *zone, unsigned int order,
			      alloc_flags))
		return COMPACT_SUCCESS;

	/*
	 * For unmovable allocations (without ALLOC_CMA), check if there is enough
	 * free memory in the non-CMA pageblocks. Otherwise compaction could form
	 * the high-order page in CMA pageblocks, which would not help the
	 * allocation to succeed. However, limit the check to costly order async
	 * compaction (such as opportunistic THP attempts) because there is the
	 * possibility that compaction would migrate pages from non-CMA to CMA
	 * pageblock.
	 */
	if (order > PAGE_ALLOC_COSTLY_ORDER && async &&
	    !(alloc_flags & ALLOC_CMA)) {
		watermark = low_wmark_pages(zone) + compact_gap(order);
		if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
					   0, zone_page_state(zone, NR_FREE_PAGES)))
			return COMPACT_SKIPPED;
	}

	if (!compaction_suitable(zone, order, highest_zoneidx))
		return COMPACT_SKIPPED;

@@ -2536,7 +2554,8 @@ compact_zone(struct compact_control *cc, struct capture_control *capc)
	if (!is_via_compact_memory(cc->order)) {
		ret = compaction_suit_allocation_order(cc->zone, cc->order,
						       cc->highest_zoneidx,
						       cc->alloc_flags);
						       cc->alloc_flags,
						       cc->mode == MIGRATE_ASYNC);
		if (ret != COMPACT_CONTINUE)
			return ret;
	}
@@ -3047,7 +3066,8 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)

		ret = compaction_suit_allocation_order(zone,
				pgdat->kcompactd_max_order,
				highest_zoneidx, ALLOC_WMARK_MIN);
				highest_zoneidx, ALLOC_WMARK_MIN,
				false);
		if (ret == COMPACT_CONTINUE)
			return true;
	}
@@ -3088,7 +3108,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
			continue;

		ret = compaction_suit_allocation_order(zone,
				cc.order, zoneid, ALLOC_WMARK_MIN);
				cc.order, zoneid, ALLOC_WMARK_MIN,
				false);
		if (ret != COMPACT_CONTINUE)
			continue;

+1 −1
Original line number Diff line number Diff line
@@ -1603,7 +1603,7 @@ static void kmemleak_scan(void)
			unsigned long phys = object->pointer;

			if (PHYS_PFN(phys) < min_low_pfn ||
			    PHYS_PFN(phys + object->size) >= max_low_pfn)
			    PHYS_PFN(phys + object->size) > max_low_pfn)
				__paint_it(object, KMEMLEAK_BLACK);
		}

+6 −2
Original line number Diff line number Diff line
@@ -1642,13 +1642,17 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
{
	void *ptr;

	/*
	 * Kmemleak will explicitly scan mem_map by traversing all valid
	 * `struct *page`,so memblock does not need to be added to the scan list.
	 */
	if (exact_nid)
		ptr = memblock_alloc_exact_nid_raw(size, align, min_addr,
						   MEMBLOCK_ALLOC_ACCESSIBLE,
						   MEMBLOCK_ALLOC_NOLEAKTRACE,
						   nid);
	else
		ptr = memblock_alloc_try_nid_raw(size, align, min_addr,
						 MEMBLOCK_ALLOC_ACCESSIBLE,
						 MEMBLOCK_ALLOC_NOLEAKTRACE,
						 nid);

	if (ptr && size > 0)
Loading