Commit 5f0b48de authored by Ma Wupeng's avatar Ma Wupeng Committed by Wang Wensheng
Browse files

mm: Introduce fallback mechanism for memory reliable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S


CVE: NA

--------------------------------

Introduce fallback mechanism for memory reliable. memory allocation will
fallback to non-mirrored region if zone's low watermark is reached and
kswapd will be awakened at this time.

This mechanism is enabled by defalut and can be disabled by adding
"reliable_debug=F" to the kernel parameters. This mechanism rely on
CONFIG_MEMORY_RELIABLE and need "kernelcore=reliable" in the kernel
parameters.

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
parent 8968270e
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -4786,9 +4786,11 @@
			See Documentation/admin-guide/cgroup-v1/cpusets.rst.

	reliable_debug=	[ARM64]
			Format: [P]
			Format: [F][,P]
			Only works with CONFIG_MEMORY_RELIABLE and
			"kernelcore=reliable" is configured.
			F: User memory allocation(special user task, tmpfs) will
			not allocate memory from non-mirrored region if failed.
			P: Page cache does not use the reliable memory.

	reserve=	[KNL,BUGS] Force kernel to ignore I/O ports or memory
+7 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);

extern bool reliable_enabled;
extern bool shmem_reliable;
extern bool reliable_allow_fallback;
extern bool pagecache_use_reliable_mem;
extern struct percpu_counter pagecache_reliable_pages;
extern struct percpu_counter anon_reliable_pages;
@@ -104,6 +105,11 @@ static inline bool mem_reliable_should_reclaim(void)

	return false;
}

static inline bool reliable_allow_fb_enabled(void)
{
	return reliable_allow_fallback;
}
#else
#define reliable_enabled 0
#define pagecache_use_reliable_mem 0
@@ -138,6 +144,7 @@ static inline void mem_reliable_out_of_memory(gfp_t gfp_mask,
					      unsigned int order,
					      int preferred_nid,
					      nodemask_t *nodemask) {}
static inline bool reliable_allow_fb_enabled(void) { return false; }
#endif

#endif
+5 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ EXPORT_SYMBOL_GPL(mem_reliable);

bool reliable_enabled;
bool shmem_reliable __read_mostly = true;
bool reliable_allow_fallback __read_mostly = true;
bool pagecache_use_reliable_mem __read_mostly = true;
struct percpu_counter pagecache_reliable_pages;
struct percpu_counter anon_reliable_pages;
@@ -294,6 +295,10 @@ static int __init setup_reliable_debug(char *str)
	 */
	for (; *str && *str != ','; str++) {
		switch (*str) {
		case 'F':
			reliable_allow_fallback = false;
			pr_info("disable memory reliable fallback\n");
			break;
		case 'P':
			pagecache_use_reliable_mem = false;
			pr_info("disable page cache use reliable memory\n");
+25 −1
Original line number Diff line number Diff line
@@ -4668,6 +4668,28 @@ check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
	return false;
}

#ifdef CONFIG_MEMORY_RELIABLE
static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
						  struct alloc_context *ac)
{
	if (!reliable_allow_fb_enabled())
		return;

	if (gfp_mask & __GFP_NOFAIL)
		return;

	if ((ac->highest_zoneidx == ZONE_NORMAL) && (gfp_mask & GFP_RELIABLE)) {
		ac->highest_zoneidx = gfp_zone(gfp_mask & ~GFP_RELIABLE);
		ac->preferred_zoneref = first_zones_zonelist(
			ac->zonelist, ac->highest_zoneidx, ac->nodemask);
		return;
	}
}
#else
static inline void mem_reliable_fallback_slowpath(gfp_t gfp_mask,
						  struct alloc_context *ac) {}
#endif

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
						struct alloc_context *ac)
@@ -4719,6 +4741,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	if (alloc_flags & ALLOC_KSWAPD)
		wake_all_kswapds(order, gfp_mask, ac);

	mem_reliable_fallback_slowpath(gfp_mask, ac);

	/*
	 * The adjusted alloc_flags might result in immediate success, so try
	 * that first
@@ -5232,7 +5256,7 @@ static inline bool check_after_alloc(gfp_t *gfp, unsigned int order,
	*_page = NULL;

out_retry:
	if (is_global_init(current)) {
	if (reliable_allow_fb_enabled() || is_global_init(current)) {
		*gfp &= ~GFP_RELIABLE;
		return true;
	}