Commit e300ae71 authored by Yuchen Tang's avatar Yuchen Tang
Browse files

etmem: add original kernel swap enabled options

euleros inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I4QVXW


CVE: NA

-------------------------------------------------

etmem, the memory vertical expansion technology,
uses DRAM and high-performance storage new media to form multi-level
memory storage.
By grading the stored data, etmem migrates the classified cold
storage data from the storage medium to the high-performance
storage medium,
so as to achieve the purpose of memory capacity expansion and
memory cost reduction.

When the memory expansion function etmem is running, the native
swap function of the kernel needs to be disabled in certain
scenarios to avoid the impact of kernel swap.

This feature provides the preceding functions.

The /sys/kernel/mm/swap/ directory provides the kernel_swap_enable
sys interface to enable or disable the native swap function
of the kernel.

The default value of /sys/kernel/mm/swap/kernel_swap_enable is true,
that is, kernel swap is enabled by default.

Turn on kernel swap:
	echo true > /sys/kernel/mm/swap/kernel_swap_enable

Turn off kernel swap:
	echo false > /sys/kernel/mm/swap/kernel_swap_enable

Signed-off-by: default avatarliubo <liubo254@huawei.com>
Signed-off-by: default avatarYuchen Tang <tangyuchen5@huawei.com>
parent 16e5bb34
Loading
Loading
Loading
Loading
+7 −0
Original line number Diff line number Diff line
@@ -25,6 +25,8 @@ static inline struct kvm *mm_kvm(struct mm_struct *mm)
extern int add_page_for_swap(struct page *page, struct list_head *pagelist);
extern struct page *get_page_from_vaddr(struct mm_struct *mm,
					unsigned long vaddr);
extern struct kobj_attribute kernel_swap_enable_attr;
extern bool kernel_swap_enabled(void);
#else /* !CONFIG_ETMEM */
static inline int add_page_for_swap(struct page *page, struct list_head *pagelist)
{
@@ -36,5 +38,10 @@ static inline struct page *get_page_from_vaddr(struct mm_struct *mm,
{
	return NULL;
}

static inline bool kernel_swap_enabled(void)
{
	return true;
}
#endif /* #ifdef CONFIG_ETMEM */
#endif /* define __MM_ETMEM_H_ */
+31 −0
Original line number Diff line number Diff line
@@ -9,6 +9,37 @@
#include <linux/etmem.h>
#include "internal.h"

static bool enable_kernel_swap __read_mostly = true;

bool kernel_swap_enabled(void)
{
	return READ_ONCE(enable_kernel_swap);
}

static ssize_t kernel_swap_enable_show(struct kobject *kobj,
					struct kobj_attribute *attr, char *buf)
{
	return sprintf(buf, "%s\n", enable_kernel_swap ? "true" : "false");
}

static ssize_t kernel_swap_enable_store(struct kobject *kobj,
					struct kobj_attribute *attr,
					const char *buf, size_t count)
{
	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
		WRITE_ONCE(enable_kernel_swap, true);
	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
		WRITE_ONCE(enable_kernel_swap, false);
	else
		return -EINVAL;

	return count;
}

struct kobj_attribute kernel_swap_enable_attr =
	__ATTR(kernel_swap_enable, 0644, kernel_swap_enable_show,
		kernel_swap_enable_store);

int add_page_for_swap(struct page *page, struct list_head *pagelist)
{
	int err = -EBUSY;
+4 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/swap_slots.h>
#include <linux/huge_mm.h>
#include <linux/shmem_fs.h>
#include <linux/etmem.h>
#include "internal.h"
#include "swap.h"

@@ -881,6 +882,9 @@ static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);

static struct attribute *swap_attrs[] = {
	&vma_ra_enabled_attr.attr,
#ifdef CONFIG_ETMEM
	&kernel_swap_enable_attr.attr,
#endif
	NULL,
};

+7 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@
#include <linux/khugepaged.h>
#include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <linux/etmem.h>

#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -3045,6 +3046,9 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
		goto out;
	}

	if (sc->may_swap && !kernel_swap_enabled())
		sc->may_swap = 0;

	/* If we have no swap space, do not bother scanning anon folios. */
	if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
		scan_balance = SCAN_FILE;
@@ -3312,6 +3316,9 @@ static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc)
	struct mem_cgroup *memcg = lruvec_memcg(lruvec);
	struct pglist_data *pgdat = lruvec_pgdat(lruvec);

	if (sc->may_swap && !kernel_swap_enabled())
		return 0;

	if (!sc->may_swap)
		return 0;