Commit 74120095 authored by Ma Wupeng's avatar Ma Wupeng Committed by Zheng Zengkai
Browse files

mm: mem_reliable: Start fallback if no suitable zone found

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S


CVE: NA

--------------------------------

For reliable memory allocation bind to nodes which do not hvve any
reliable zones, its memory allocation will fail and then warn message
will be produced at the end of __alloc_pages_slowpath().

Though this memory allocation can fallback to movable zone in
check_after_alloc() if fallback is enabled, something should be done to
prevent this pointless warn log.

To solve this problem, fallback to movable zone if no suitable zone found.

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: default avatartong tiangen <tongtiangen@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 573cb6d6
Loading
Loading
Loading
Loading
+28 −2
Original line number Diff line number Diff line
@@ -4674,6 +4674,25 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
}

#ifdef CONFIG_MEMORY_RELIABLE
/*
 * if fallback is enabled, fallback to movable zone if no dma/normal zone
 * found
 */
static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask,
						      struct alloc_context *ac)
{
	if (!reliable_allow_fb_enabled())
		return NULL;

	if (!(gfp_mask & GFP_RELIABLE))
		return NULL;

	ac->highest_zoneidx = gfp_zone(gfp_mask & ~GFP_RELIABLE);
	ac->preferred_zoneref = first_zones_zonelist(
		ac->zonelist, ac->highest_zoneidx, ac->nodemask);
	return ac->preferred_zoneref->zone;
}

static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
						  struct alloc_context *ac)
{
@@ -4691,6 +4710,11 @@ static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
	}
}
#else
static inline struct zone *mem_reliable_fallback_zone(gfp_t gfp_mask,
						      struct alloc_context *ac)
{
	return NULL;
}
static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
						  struct alloc_context *ac) {}
#endif
@@ -4740,8 +4764,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	 */
	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
					ac->highest_zoneidx, ac->nodemask);
	if (!ac->preferred_zoneref->zone)
	if (!ac->preferred_zoneref->zone) {
		if (!mem_reliable_fallback_zone(gfp_mask, ac))
			goto nopage;
	}

	if (alloc_flags & ALLOC_KSWAPD)
		wake_all_kswapds(order, gfp_mask, ac);