Commit 9bbb63c8 authored by Liu Shixin's avatar Liu Shixin
Browse files

memcg: introduce per-memcg swapin interface

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I7CGGT


CVE: NA

--------------------------------

Add a new per-memcg swapin interface to load data into memory in advance
to improve access efficiency.
Usage:
	# echo 0 > memory.force_swapin

Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
parent eefe54b2
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -78,6 +78,7 @@ Brief summary of control files.
 memory.stat			     show various statistics
 memory.use_hierarchy		     set/show hierarchical account enabled
 memory.force_empty		     trigger forced page reclaim
 memory.force_swapin		     trigger forced swapin anon page
 memory.pressure_level		     set memory pressure notifications
 memory.swappiness		     set/show swappiness parameter of vmscan
				     (See sysctl's vm.swappiness)
+1 −0
Original line number Diff line number Diff line
@@ -2650,6 +2650,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
		       struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
		     struct list_head *uf);
extern void force_swapin_vma(struct vm_area_struct *vma);
extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);

extern unsigned long __do_mmap_mm(struct mm_struct *mm, struct file *file,
+19 −0
Original line number Diff line number Diff line
@@ -259,6 +259,25 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma,

	lru_add_drain();	/* Push any new pages onto the LRU now */
}

void force_swapin_vma(struct vm_area_struct *vma)
{
	struct file *file = vma->vm_file;

	if (!can_madv_lru_vma(vma))
		return;

	if (!file) {
		walk_page_vma(vma, &swapin_walk_ops, vma);
		lru_add_drain();
	} else if (shmem_mapping(file->f_mapping))
		force_shm_swapin_readahead(vma, vma->vm_start,
			vma->vm_end, file->f_mapping);
}
#else
void force_swapin_vma(struct vm_area_struct *vma)
{
}
#endif		/* CONFIG_SWAP */

/*
+33 −0
Original line number Diff line number Diff line
@@ -4106,6 +4106,32 @@ static __init int memcg_swap_qos_sysctls_init(void)
}
late_initcall(memcg_swap_qos_sysctls_init);
#endif

static int mem_cgroup_task_swapin(struct task_struct *task, void *arg)
{
	struct mm_struct *mm = task->mm;
	struct vm_area_struct *vma;
	struct blk_plug plug;

	mmap_read_lock(mm);
	blk_start_plug(&plug);
	for (vma = mm->mmap; vma; vma = vma->vm_next)
		force_swapin_vma(vma);
	blk_finish_plug(&plug);
	mmap_read_unlock(mm);

	return 0;
}

static ssize_t memory_swapin(struct kernfs_open_file *of, char *buf,
			      size_t nbytes, loff_t off)
{
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));

	mem_cgroup_scan_tasks(memcg, mem_cgroup_task_swapin, NULL);

	return nbytes;
}
#endif

#ifdef CONFIG_NUMA
@@ -5800,6 +5826,13 @@ static struct cftype mem_cgroup_legacy_files[] = {
		.name = "reclaim",
		.write = memory_reclaim,
	},
#ifdef CONFIG_MEMCG_SWAP_QOS
	{
		.name = "force_swapin",
		.flags = CFTYPE_NOT_ON_ROOT,
		.write = memory_swapin,
	},
#endif
	{
		.name = "high_async_ratio",
		.flags = CFTYPE_NOT_ON_ROOT,