Commit ca3b6b0e authored by Chen Wandun's avatar Chen Wandun Committed by Zheng Zengkai
Browse files

mm: support pagecache limit

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4HOXK


CVE: NA

--------------------------------

Add /pros/sys/vm/cache_limit_mbytes to set page cache limit. This
interface set the upper limit of page cache, if usage of page cache
is over cache_limit_mbytes, it will trigger memory reclaim, the
reclaim size and reclaim interval are decided by interfaces
/proc/sys/vm/cache_reclaim_s and /proc/sys/vm/cache_reclaim_weight,
these two intefaces are introduced in previous patch.

Signed-off-by: default avatarChen Wandun <chenwandun@huawei.com>
Reviewed-by: default avatarTong Tiangen <tongtiangen@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 581a69b8
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -77,6 +77,7 @@ Currently, these files are in /proc/sys/vm:
- cache_reclaim_s
- cache_reclaim_weight
- cache_reclaim_enable
- cache_limit_mbytes


admin_reserve_kbytes
@@ -1058,3 +1059,10 @@ cache_reclaim_enable
====================

This is used to switch on/off periodical memory reclaim feature.


cache_limit_mbytes
==================

This is used to set the upper limit of page cache in megabytes.
Page cache will be reclaimed periodically if page cache is over limit.
+2 −1
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@
#ifndef _LINUX_PAGE_CACHE_LIMIT_H
#define _LINUX_PAGE_CACHE_LIMIT_H
#ifdef CONFIG_PAGE_CACHE_LIMIT
extern unsigned long page_cache_shrink_memory(unsigned long nr_to_reclaim);
extern unsigned long page_cache_shrink_memory(unsigned long nr_to_reclaim,
						bool may_swap);
#endif /* CONFIG_PAGE_CACHE_LIMIT */
#endif /* _LINUX_PAGE_CACHE_LIMIT_H */
+70 −1
Original line number Diff line number Diff line
@@ -8,12 +8,14 @@
#include <linux/swap.h>
#include <linux/sysctl.h>
#include <linux/workqueue.h>
#include "internal.h"

static int vm_cache_reclaim_s __read_mostly;
static int vm_cache_reclaim_s_max = 43200;
static int vm_cache_reclaim_weight __read_mostly = 1;
static int vm_cache_reclaim_weight_max = 100;
static int vm_cache_reclaim_enable = 1;
static unsigned long vm_cache_limit_mbytes __read_mostly;

static void shrink_shepherd(struct work_struct *w);
static DECLARE_DEFERRABLE_WORK(shepherd, shrink_shepherd);
@@ -31,6 +33,31 @@ static unsigned long node_reclaim_num(void)
	return SWAP_CLUSTER_MAX * nr_cpus_node(nid) * vm_cache_reclaim_weight;
}

static bool page_cache_over_limit(void)
{
	unsigned long lru_file;
	unsigned long limit;

	limit = vm_cache_limit_mbytes * ((1024 * 1024UL) / PAGE_SIZE);
	lru_file = global_node_page_state(NR_ACTIVE_FILE) +
			global_node_page_state(NR_INACTIVE_FILE);
	if (lru_file > limit)
		return true;

	return false;
}

static bool should_reclaim_page_cache(void)
{
	if (!should_periodical_reclaim())
		return false;

	if (!vm_cache_limit_mbytes)
		return false;

	return true;
}

static int cache_reclaim_enable_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
@@ -64,6 +91,37 @@ static int cache_reclaim_sysctl_handler(struct ctl_table *table, int write,
	return ret;
}

static int cache_limit_mbytes_sysctl_handler(struct ctl_table *table, int write,
		void __user *buffer, size_t *length, loff_t *ppos)
{
	int ret;
	unsigned long vm_cache_limit_mbytes_max;
	unsigned long origin_mbytes = vm_cache_limit_mbytes;
	int nr_retries = MAX_RECLAIM_RETRIES;

	vm_cache_limit_mbytes_max = totalram_pages() >> (20 - PAGE_SHIFT);
	ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
	if (ret || !write)
		return ret;

	if (vm_cache_limit_mbytes > vm_cache_limit_mbytes_max) {
		vm_cache_limit_mbytes = origin_mbytes;
		return -EINVAL;
	}

	if (write) {
		while (should_reclaim_page_cache() && page_cache_over_limit() &&
				nr_retries--) {
			if (signal_pending(current))
				return -EINTR;

			page_cache_shrink_memory(node_reclaim_num(), false);
		}
	}

	return 0;
}

static struct ctl_table ctl_table[] = {
	{
		.procname       = "cache_reclaim_s",
@@ -92,6 +150,13 @@ static struct ctl_table ctl_table[] = {
		.extra1		= SYSCTL_ZERO,
		.extra2		= SYSCTL_ONE,
	},
	{
		.procname	= "cache_limit_mbytes",
		.data		= &vm_cache_limit_mbytes,
		.maxlen		= sizeof(vm_cache_limit_mbytes),
		.mode		= 0644,
		.proc_handler	= cache_limit_mbytes_sysctl_handler,
	},
	{}
};

@@ -123,7 +188,11 @@ static void shrink_shepherd(struct work_struct *w)

static void shrink_page_work(struct work_struct *w)
{
	page_cache_shrink_memory(node_reclaim_num());
	if (should_reclaim_page_cache()) {
		if (page_cache_over_limit())
			page_cache_shrink_memory(node_reclaim_num(), false);
	} else if (should_periodical_reclaim())
		page_cache_shrink_memory(node_reclaim_num(), true);
}

static void shrink_shepherd_timer(void)
+3 −2
Original line number Diff line number Diff line
@@ -4595,7 +4595,8 @@ struct page *get_page_from_vaddr(struct mm_struct *mm, unsigned long vaddr)
EXPORT_SYMBOL_GPL(get_page_from_vaddr);

#ifdef CONFIG_PAGE_CACHE_LIMIT
unsigned long page_cache_shrink_memory(unsigned long nr_to_reclaim)
unsigned long page_cache_shrink_memory(unsigned long nr_to_reclaim,
					bool may_swap)
{
	unsigned long nr_reclaimed;
	unsigned int noreclaim_flag;
@@ -4606,7 +4607,7 @@ unsigned long page_cache_shrink_memory(unsigned long nr_to_reclaim)
		.may_writepage = !laptop_mode,
		.nr_to_reclaim = nr_to_reclaim / 2,
		.may_unmap = 1,
		.may_swap = 1,
		.may_swap = may_swap,
		.priority = DEF_PRIORITY,
	};