Commit 0f6fb357 authored by Nanyong Sun's avatar Nanyong Sun Committed by Ma Wupeng
Browse files

memcg: support ksm merge any mode per cgroup

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I72R0B


CVE: NA

----------------------------------------------------------------------

Add control file "memory.ksm" to enable ksm per cgroup.
Echo to 1 will set all tasks currently in the cgroup to ksm merge
any mode, which means ksm gets enabled for all vma's of a process.
Meanwhile echo to 0 will disable ksm for them and unmerge the
merged pages.
Cat the file will show the above state and ksm related profits
of this cgroup.

Signed-off-by: default avatarNanyong Sun <sunnanyong@huawei.com>
parent 351ceedb
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -99,6 +99,7 @@ Brief summary of control files.
 memory.kmem.tcp.failcnt             show the number of tcp buf memory usage
				     hits limits
 memory.kmem.tcp.max_usage_in_bytes  show max tcp buf memory usage recorded
 memory.ksm                          set/show ksm merge any mode
==================================== ==========================================

1. History
+4 −0
Original line number Diff line number Diff line
@@ -392,7 +392,11 @@ struct mem_cgroup {
	KABI_RESERVE(3)
	KABI_RESERVE(4)
#endif
#ifdef CONFIG_KSM
	KABI_USE(5, bool ksm_merge_any)
#else
	KABI_RESERVE(5)
#endif
	KABI_RESERVE(6)
	KABI_RESERVE(7)
	KABI_RESERVE(8)
+119 −3
Original line number Diff line number Diff line
@@ -71,6 +71,9 @@
#include <linux/uaccess.h>

#include <trace/events/vmscan.h>
#ifndef __GENKSYMS__
#include <linux/ksm.h>
#endif

struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(memory_cgrp_subsys);
@@ -250,10 +253,15 @@ enum res_type {
	     iter != NULL;				\
	     iter = mem_cgroup_iter(NULL, iter, NULL))

static inline bool __task_is_dying(struct task_struct *task)
{
	return tsk_is_oom_victim(task) || fatal_signal_pending(task) ||
		(task->flags & PF_EXITING);
}

static inline bool task_is_dying(void)
{
	return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
		(current->flags & PF_EXITING);
	return __task_is_dying(current);
}

/* Some nice accessors for the vmpressure. */
@@ -5331,6 +5339,104 @@ static ssize_t memcg_high_async_ratio_write(struct kernfs_open_file *of,
	return nbytes;
}

#ifdef CONFIG_KSM
static int memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable)
{
	struct task_struct *task;
	struct mm_struct *mm;
	struct css_task_iter it;
	int ret = 0;

	if (enable == READ_ONCE(memcg->ksm_merge_any))
		return 0;

	css_task_iter_start(&memcg->css, CSS_TASK_ITER_PROCS, &it);
	while (!ret && (task = css_task_iter_next(&it))) {
		if (__task_is_dying(task))
			continue;

		mm = get_task_mm(task);
		if (!mm)
			continue;

		if (mmap_write_lock_killable(mm)) {
			mmput(mm);
			continue;
		}

		if (enable)
			ret = ksm_enable_merge_any(mm);
		else
			ret = ksm_disable_merge_any(mm);

		mmap_write_unlock(mm);
		mmput(mm);
	}
	css_task_iter_end(&it);

	return ret;
}

static int memory_ksm_show(struct seq_file *m, void *v)
{
	unsigned long ksm_merging_pages = 0;
	unsigned long ksm_rmap_items = 0;
	long ksm_process_profits = 0;
	unsigned int tasks = 0;
	struct task_struct *task;
	struct mm_struct *mm;
	struct css_task_iter it;
	struct mem_cgroup *memcg = mem_cgroup_from_seq(m);

	css_task_iter_start(&memcg->css, CSS_TASK_ITER_PROCS, &it);
	while ((task = css_task_iter_next(&it))) {
		mm = get_task_mm(task);
		if (!mm)
			continue;

		if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
			tasks++;

		ksm_rmap_items += mm->ksm_rmap_items;
		ksm_merging_pages += mm->ksm_merging_pages;
		ksm_process_profits += ksm_process_profit(mm);
		mmput(mm);
	}
	css_task_iter_end(&it);

	seq_printf(m, "merge any state: %d\n", READ_ONCE(memcg->ksm_merge_any));
	seq_printf(m, "merge any tasks: %u\n", tasks);
	seq_printf(m, "ksm_rmap_items %lu\n", ksm_rmap_items);
	seq_printf(m, "ksm_merging_pages %lu\n", ksm_merging_pages);
	seq_printf(m, "ksm_process_profits %ld\n", ksm_process_profits);
	return 0;
}

static ssize_t memory_ksm_write(struct kernfs_open_file *of, char *buf,
					size_t nbytes, loff_t off)
{
	bool enable;
	int err;
	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));

	buf = strstrip(buf);
	if (!buf)
		return -EINVAL;

	err = kstrtobool(buf, &enable);
	if (err)
		return err;

	err = memcg_set_ksm_for_tasks(memcg, enable);
	if (err)
		return err;

	WRITE_ONCE(memcg->ksm_merge_any, enable);

	return nbytes;
}
#endif /* CONFIG_KSM */

#ifdef CONFIG_CGROUP_V1_WRITEBACK
#include "../kernel/cgroup/cgroup-internal.h"

@@ -5615,6 +5721,14 @@ static struct cftype mem_cgroup_legacy_files[] = {
		.seq_show = wb_blkio_show,
		.write = wb_blkio_write,
	},
#endif
#ifdef CONFIG_KSM
	{
		.name = "ksm",
		.flags = CFTYPE_NOT_ON_ROOT,
		.write = memory_ksm_write,
		.seq_show = memory_ksm_show,
	},
#endif
	{ },	/* terminate */
};
@@ -5858,7 +5972,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
		if (parent != root_mem_cgroup)
			memory_cgrp_subsys.broken_hierarchy = true;
	}

#ifdef CONFIG_KSM
	memcg->ksm_merge_any = false;
#endif
	/* The following stuff does not apply to the root */
	if (!parent) {
		root_mem_cgroup = memcg;