Unverified Commit 9c27a05e authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!7983 v2 Enhance memcg KSM feature.

Merge Pull Request from: @ci-robot 
 
PR sync from: Jinjiang Tu <tujinjiang@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/R2QURQIJQPJG6EZX4ECWFKL6JE7RYHI7/ 
Enhance memcg KSM feature.

Changelog since v1:
 * update commit log

Jinjiang Tu (3):
  mm/ksm: fix ksm exec support for prctl
  mm/memcontrol: add ksm state for memcg
  mm/memcontrol: enable KSM for tasks moving to new memcg

Stefan Roesch (1):
  mm/ksm: support fork/exec for prctl


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I9GT87 
 
Link:https://gitee.com/openeuler/kernel/pulls/7983

 

Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents b51141f8 decc704f
Loading
Loading
Loading
Loading
+13 −0
Original line number Diff line number Diff line
@@ -65,6 +65,9 @@
#include <linux/compat.h>
#include <linux/vmalloc.h>
#include <linux/io_uring.h>
#ifndef __GENKSYMS__
#include <linux/ksm.h>
#endif

#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -252,6 +255,14 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
		goto err_free;
	}

	/*
	 * Need to be called with mmap write lock
	 * held, to avoid race with ksmd.
	 */
	err = ksm_execve(mm);
	if (err)
		goto err_ksm;

	/*
	 * Place the stack at the largest stack address the architecture
	 * supports. Later, we'll move this to an appropriate place. We don't
@@ -273,6 +284,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
	bprm->p = vma->vm_end - sizeof(void *);
	return 0;
err:
	ksm_exit(mm);
err_ksm:
	mmap_write_unlock(mm);
err_free:
	bprm->vma = NULL;
+13 −0
Original line number Diff line number Diff line
@@ -45,6 +45,14 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
	return 0;
}

static inline int ksm_execve(struct mm_struct *mm)
{
	if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
		return __ksm_enter(mm);

	return 0;
}

static inline void ksm_exit(struct mm_struct *mm)
{
	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
@@ -83,6 +91,11 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
	return 0;
}

static inline int ksm_execve(struct mm_struct *mm)
{
	return 0;
}

static inline void ksm_exit(struct mm_struct *mm)
{
}
+4 −0
Original line number Diff line number Diff line
@@ -414,7 +414,11 @@ struct mem_cgroup {
#else
	KABI_RESERVE(7)
#endif
#ifdef CONFIG_KSM
	KABI_USE(8, bool auto_ksm_enabled)
#else
	KABI_RESERVE(8)
#endif

	struct mem_cgroup_per_node *nodeinfo[0];
	/* WARNING: nodeinfo must be the last member here */
+4 −2
Original line number Diff line number Diff line
@@ -70,13 +70,15 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE		22	/* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE	23      /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP		24	/* disable THP for all VMAs */
#define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)
#define MMF_OOM_VICTIM		25	/* mm is the oom victim */
#define MMF_OOM_REAP_QUEUED	26	/* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS	27	/* mm is shared between processes */
#define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)
#define MMF_VM_MERGE_ANY	29
#define MMF_VM_MERGE_ANY_MASK	(1 << MMF_VM_MERGE_ANY)

#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
				 MMF_DISABLE_THP_MASK)
				 MMF_DISABLE_THP_MASK | MMF_VM_MERGE_ANY_MASK)

#define MMF_VM_MERGE_ANY	29
#endif /* _LINUX_SCHED_COREDUMP_H */
+72 −1
Original line number Diff line number Diff line
@@ -5772,7 +5772,7 @@ static ssize_t memcg_high_async_ratio_write(struct kernfs_open_file *of,
}

#ifdef CONFIG_KSM
static int memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable)
static int __memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable)
{
	struct task_struct *task;
	struct mm_struct *mm;
@@ -5806,6 +5806,27 @@ static int memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable)
	return ret;
}

static int memcg_set_ksm_for_tasks(struct mem_cgroup *memcg, bool enable)
{
	struct mem_cgroup *iter;
	int ret = 0;

	for_each_mem_cgroup_tree(iter, memcg) {
		if (READ_ONCE(iter->auto_ksm_enabled) == enable)
			continue;

		ret = __memcg_set_ksm_for_tasks(iter, enable);
		if (ret) {
			mem_cgroup_iter_break(memcg, iter);
			break;
		}

		WRITE_ONCE(iter->auto_ksm_enabled, enable);
	}

	return ret;
}

static int memory_ksm_show(struct seq_file *m, void *v)
{
	unsigned long ksm_merging_pages = 0;
@@ -5833,6 +5854,7 @@ static int memory_ksm_show(struct seq_file *m, void *v)
	}
	css_task_iter_end(&it);

	seq_printf(m, "auto ksm enabled: %d\n", READ_ONCE(memcg->auto_ksm_enabled));
	seq_printf(m, "merge any tasks: %u\n", tasks);
	seq_printf(m, "ksm_rmap_items %lu\n", ksm_rmap_items);
	seq_printf(m, "ksm_merging_pages %lu\n", ksm_merging_pages);
@@ -5855,12 +5877,48 @@ static ssize_t memory_ksm_write(struct kernfs_open_file *of, char *buf,
	if (err)
		return err;

	if (READ_ONCE(memcg->auto_ksm_enabled) == enable)
		return nbytes;

	err = memcg_set_ksm_for_tasks(memcg, enable);
	if (err)
		return err;

	return nbytes;
}

static void memcg_attach_ksm(struct cgroup_taskset *tset)
{
	struct cgroup_subsys_state *css;
	struct mem_cgroup *memcg;
	struct task_struct *task;

	cgroup_taskset_first(tset, &css);
	memcg = mem_cgroup_from_css(css);
	if (!READ_ONCE(memcg->auto_ksm_enabled))
		return;

	cgroup_taskset_for_each(task, css, tset) {
		struct mm_struct *mm = get_task_mm(task);

		if (!mm)
			continue;

		if (mmap_write_lock_killable(mm)) {
			mmput(mm);
			continue;
		}

		ksm_enable_merge_any(mm);

		mmap_write_unlock(mm);
		mmput(mm);
	}
}
#else
static inline void memcg_attach_ksm(struct cgroup_taskset *tset)
{
}
#endif /* CONFIG_KSM */

#ifdef CONFIG_CGROUP_V1_WRITEBACK
@@ -6430,6 +6488,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
	}

	hugetlb_pool_inherit(memcg, parent);
#ifdef CONFIG_KSM
	memcg->auto_ksm_enabled = READ_ONCE(parent->auto_ksm_enabled);
#endif

	error = memcg_online_kmem(memcg);
	if (error)
@@ -7345,6 +7406,12 @@ static void mem_cgroup_move_charge(void)
	atomic_dec(&mc.from->moving_account);
}

static void mem_cgroup_attach(struct cgroup_taskset *tset)
{
	if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
		memcg_attach_ksm(tset);
}

static void mem_cgroup_move_task(void)
{
	if (mc.to) {
@@ -7360,6 +7427,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
static void mem_cgroup_attach(struct cgroup_taskset *tset)
{
}
static void mem_cgroup_move_task(void)
{
}
@@ -7623,6 +7693,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
	.css_rstat_flush = mem_cgroup_css_rstat_flush,
	.can_attach = mem_cgroup_can_attach,
	.cancel_attach = mem_cgroup_cancel_attach,
	.attach = mem_cgroup_attach,
	.post_attach = mem_cgroup_move_task,
	.bind = mem_cgroup_bind,
	.dfl_cftypes = memory_files,