Commit f30c7817 authored by Ma Wupeng's avatar Ma Wupeng Committed by Wang Wensheng
Browse files

mm: Introduce shmem mirrored memory limit for memory reliable

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4SK3S


CVE: NA

--------------------------------

This limit is used to restrict the amount of mirrored memory by shmem.
This memory allocation will return no memory if reliable fallback is off
or fallback to non-mirrored region if reliable fallback on.

This limit can be set or access via
/proc/sys/vm/shmem_reliable_bytes_limit.
The default value of this limit is LONG_MAX. This limit can be set from 0
to the total size of mirrored memory.

Signed-off-by: default avatarMa Wupeng <mawupeng1@huawei.com>
Signed-off-by: default avatarZhou Guanghui <zhouguanghui1@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
parent 32be46d7
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@ DECLARE_STATIC_KEY_FALSE(mem_reliable);
extern bool reliable_enabled;
extern bool shmem_reliable;
extern struct percpu_counter reliable_shmem_used_nr_page;
extern long shmem_reliable_nr_page __read_mostly;
extern bool reliable_allow_fallback;
extern bool pagecache_use_reliable_mem;
extern struct percpu_counter pagecache_reliable_pages;
@@ -88,6 +89,12 @@ static inline void shmem_reliable_page_counter(struct page *page, int nr_page)
		percpu_counter_add(&reliable_shmem_used_nr_page, nr_page);
}

static inline bool mem_reliable_shmem_limit_check(void)
{
	return percpu_counter_read_positive(&reliable_shmem_used_nr_page) <
		shmem_reliable_nr_page;
}

static inline u64 task_reliable_used_pages(void)
{
	s64 nr_pages;
@@ -135,6 +142,7 @@ static inline void reliable_report_meminfo(struct seq_file *m) {}
static inline bool shmem_reliable_is_enabled(void) { return false; }
static inline void shmem_reliable_page_counter(struct page *page,
					       int nr_page) {}
static inline bool mem_reliable_shmem_limit_check(void) { return true; }
static inline void page_cache_prepare_alloc(gfp_t *gfp) {}
static inline bool mem_reliable_status(void) { return false; }
static inline bool page_reliable(struct page *page) { return false; }
+35 −0
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@ struct percpu_counter anon_reliable_pages;
static unsigned long reliable_pagecache_max_bytes = ULONG_MAX;
/* reliable user limit for user tasks with reliable flag */
unsigned long task_reliable_limit = ULONG_MAX;
long shmem_reliable_nr_page = ULONG_MAX >> PAGE_SHIFT;

bool mem_reliable_counter_initialized(void)
{
@@ -231,6 +232,31 @@ static int reliable_pagecache_max_bytes_write(struct ctl_table *table,
	return ret;
}

#ifdef CONFIG_SHMEM
static unsigned long sysctl_shmem_reliable_bytes_limit = ULONG_MAX;

static int reliable_shmem_bytes_limit_handler(struct ctl_table *table,
					      int write, void __user *buffer,
					      size_t *length, loff_t *ppos)
{
	unsigned long *data_ptr = (unsigned long *)(table->data);
	unsigned long old = *data_ptr;
	int ret;

	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
	if (!ret && write) {
		if (*data_ptr > PAGES_TO_B(total_reliable_pages())) {
			*data_ptr = old;
			return -EINVAL;
		}

		shmem_reliable_nr_page = *data_ptr >> PAGE_SHIFT;
	}

	return ret;
}
#endif

static struct ctl_table reliable_ctl_table[] = {
	{
		.procname = "reliable_pagecache_max_bytes",
@@ -246,6 +272,15 @@ static struct ctl_table reliable_ctl_table[] = {
		.mode = 0644,
		.proc_handler = reliable_limit_handler,
	},
#ifdef CONFIG_SHMEM
	{
		.procname = "shmem_reliable_bytes_limit",
		.data = &sysctl_shmem_reliable_bytes_limit,
		.maxlen = sizeof(sysctl_shmem_reliable_bytes_limit),
		.mode = 0644,
		.proc_handler = reliable_shmem_bytes_limit_handler,
	},
#endif
	{}
};

+14 −4
Original line number Diff line number Diff line
@@ -1561,12 +1561,20 @@ static struct page *shmem_alloc_page(gfp_t gfp,
	return page;
}

static inline void shmem_prepare_alloc(gfp_t *gfp_mask)
static inline bool shmem_prepare_alloc(gfp_t *gfp_mask)
{
	if (!shmem_reliable_is_enabled())
		return;
		return true;

	if (mem_reliable_shmem_limit_check()) {
		*gfp_mask |= GFP_RELIABLE;
		return true;
	}

	if (reliable_allow_fb_enabled())
		return true;

	return false;
}

static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
@@ -1585,7 +1593,8 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
	if (!shmem_inode_acct_block(inode, nr))
		goto failed;

	shmem_prepare_alloc(&gfp);
	if (!shmem_prepare_alloc(&gfp))
		goto no_mem;

	if (huge)
		page = shmem_alloc_hugepage(gfp, info, index, node_id);
@@ -1597,6 +1606,7 @@ static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
		return page;
	}

no_mem:
	err = -ENOMEM;
	shmem_inode_unacct_blocks(inode, nr);
failed: