Unverified Commit 5ee27742 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!967 Support dynamic_hugetlb on arm64 and fix some bug

Merge Pull Request from: @ci-robot 
 
PR sync from:  Liu Shixin <liushixin2@huawei.com>
 https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/thread/GMXFWIIVGDUCTVDK44XS2U2JPUIK4ZEN/ 
Support dynamic_hugetlb on arm64 and fix some bug.

Liu Shixin (6):
  mm/dynamic_hugetlb: fix kabi broken when enable CONFIG_DYNAMIC_HUGETLB
    on arm64
  mm/dynamic_hugetlb: support dynamic hugetlb on arm64
  mm/dynamic_hugetlb: isolate hugepage without dissolve
  mm/dynamic_hugetlb: replace spin_lock with mutex_lock and fix kabi
    broken
  mm/dynamic_hugetlb: set PagePool to bad page
  mm/dynamic_hugetlb: fix type error of pfn in
    __hpool_split_gigantic_page()


-- 
2.25.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/967

 

Reviewed-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 7bfe5365 c5fd2410
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -262,7 +262,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON

config DYNAMIC_HUGETLB
	bool "Dynamic HugeTLB"
	depends on X86_64
	depends on X86_64 || (ARM64 && ARM64_4K_PAGES)
	depends on HUGETLBFS
	depends on MEMCG && CGROUP_HUGETLB
	help
+1 −1
Original line number Diff line number Diff line
@@ -1213,7 +1213,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
	 */
	mpol_shared_policy_init(&p->policy, NULL);
	/* Initialize hpool here in case of a quick call to destroy */
	link_hpool(p);
	link_hpool(p, sbinfo->hstate);

	return &p->vfs_inode;
}
+21 −3
Original line number Diff line number Diff line
@@ -66,7 +66,7 @@ enum huge_pages_pool_type {
struct dhugetlb_pool {
	int nid;
	spinlock_t lock;
	spinlock_t reserved_lock;
	KABI_DEPRECATE(spinlock_t, reserved_lock)
	atomic_t refcnt;
	unsigned long normal_pages_disabled;

@@ -74,6 +74,18 @@ struct dhugetlb_pool {

	unsigned long total_huge_pages;
	struct huge_pages_pool hpages_pool[HUGE_PAGES_POOL_MAX];

	/* The dhugetlb_pool structures is only used by core kernel, it is
	 * also accessed only the memory cgroup and hugetlb core code and
	 * so changes made to dhugetlb_pool structure should not affect
	 * third-party kernel modules.
	 */
	KABI_EXTEND(struct mutex reserved_lock)

	/*
	 * The percpu_pool[] should only be used by dynamic hugetlb core.
	 * External kernel modules should not used it.
	 */
	struct percpu_pages_pool percpu_pool[0];
};

@@ -97,13 +109,14 @@ bool free_page_to_dhugetlb_pool(struct page *page);
void free_page_list_to_dhugetlb_pool(struct list_head *list);
int task_has_mem_in_hpool(struct task_struct *tsk);

void link_hpool(struct hugetlbfs_inode_info *p);
void link_hpool(struct hugetlbfs_inode_info *p, struct hstate *h);
void unlink_hpool(struct hugetlbfs_inode_info *p);
bool file_has_mem_in_hpool(struct hugetlbfs_inode_info *p);
int dhugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *p);
struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetlb_pool *hpool,
						bool need_unreserved);
void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve);
bool page_belong_to_dynamic_hugetlb(struct page *page);

#else

@@ -147,7 +160,7 @@ static inline int task_has_mem_in_hpool(struct task_struct *tsk)
}

#ifdef CONFIG_HUGETLBFS
static inline void link_hpool(struct hugetlbfs_inode_info *p)
static inline void link_hpool(struct hugetlbfs_inode_info *p, struct hstate *h)
{
}
static inline void unlink_hpool(struct hugetlbfs_inode_info *p)
@@ -171,6 +184,11 @@ static inline
void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve)
{
}
static inline
bool page_belong_to_dynamic_hugetlb(struct page *page)
{
	return false;
}
#endif

#endif /* CONFIG_DYNAMIC_HUGETLB */
+5 −1
Original line number Diff line number Diff line
@@ -372,7 +372,7 @@ struct mem_cgroup {
	struct deferred_split deferred_split_queue;
#endif

#ifdef CONFIG_DYNAMIC_HUGETLB
#if defined(CONFIG_DYNAMIC_HUGETLB) && defined(CONFIG_X86_64)
	struct dhugetlb_pool *hpool;
#endif
#ifndef __GENKSYMS__
@@ -397,7 +397,11 @@ struct mem_cgroup {
#else
	KABI_RESERVE(5)
#endif
#if defined(CONFIG_DYNAMIC_HUGETLB) && defined(CONFIG_ARM64)
	KABI_USE(6, struct dhugetlb_pool *hpool)
#else
	KABI_RESERVE(6)
#endif
	KABI_RESERVE(7)
	KABI_RESERVE(8)

+45 −9
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@

#include <linux/rmap.h>
#include <linux/migrate.h>
#include <linux/memblock.h>
#include <linux/memory_hotplug.h>
#include <linux/dynamic_hugetlb.h>

@@ -54,7 +55,8 @@ static void __hpool_split_gigantic_page(struct dhugetlb_pool *hpool, struct page
{
	int nr_pages = 1 << (PUD_SHIFT - PAGE_SHIFT);
	int nr_blocks = 1 << (PMD_SHIFT - PAGE_SHIFT);
	int i, pfn = page_to_pfn(page);
	unsigned long pfn = page_to_pfn(page);
	int i;

	lockdep_assert_held(&hpool->lock);
	atomic_set(compound_mapcount_ptr(page), 0);
@@ -447,6 +449,19 @@ static struct dhugetlb_pool *find_hpool_by_dhugetlb_pagelist(struct page *page)
	return hpool;
}

bool page_belong_to_dynamic_hugetlb(struct page *page)
{
	struct dhugetlb_pool *hpool;

	if (!dhugetlb_enabled)
		return false;

	hpool = find_hpool_by_dhugetlb_pagelist(page);
	if (hpool)
		return true;
	return false;
}

static struct dhugetlb_pool *find_hpool_by_task(struct task_struct *tsk)
{
	struct mem_cgroup *memcg;
@@ -515,6 +530,13 @@ static struct page *__alloc_page_from_dhugetlb_pool(void)
	spin_lock_irqsave(&percpu_pool->lock, flags);

	do {
		/*
		 * Before discard the bad page, set PagePool flag to
		 * distinguish from free page. And increase used_pages
		 * to guarantee used + freed = total.
		 */
		if (page)
			SetPagePool(page);
		page = NULL;
		if (percpu_pool->free_pages == 0) {
			int ret;
@@ -530,8 +552,8 @@ static struct page *__alloc_page_from_dhugetlb_pool(void)
		page = list_entry(percpu_pool->head_page.next, struct page, lru);
		list_del(&page->lru);
		percpu_pool->free_pages--;
	} while (page && check_new_page(page));
		percpu_pool->used_pages++;
	} while (page && check_new_page(page));
	SetPagePool(page);

unlock:
@@ -618,14 +640,20 @@ void free_page_list_to_dhugetlb_pool(struct list_head *list)
	}
}

void link_hpool(struct hugetlbfs_inode_info *p)
void link_hpool(struct hugetlbfs_inode_info *p, struct hstate *h)
{
	unsigned long size;

	if (!dhugetlb_enabled || !p)
		return;

	size = huge_page_size(h);
	if (size == PMD_SIZE || size == PUD_SIZE) {
		p->hpool = find_hpool_by_task(current);
		if (!get_hpool_unless_zero(p->hpool))
			p->hpool = NULL;
	} else
		p->hpool = NULL;
}

void unlink_hpool(struct hugetlbfs_inode_info *p)
@@ -733,8 +761,15 @@ void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve)
	}

	spin_lock(&hpool->lock);
	/*
	 * memory_failure will free the hwpoison hugepage, and then try to
	 * dissolve it and free subpage to buddy system. Since the page in
	 * dhugetlb_pool should not free to buudy system, we isolate the
	 * hugepage here directly, and skip the latter dissolution.
	 */
	if (PageHWPoison(page))
		goto out;
	ClearPagePool(page);
	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
	if (hstate_is_gigantic(h))
		hpages_pool = &hpool->hpages_pool[HUGE_PAGES_POOL_1G];
	else
@@ -750,6 +785,7 @@ void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve)
	}
	trace_dynamic_hugetlb_alloc_free(hpool, page, hpages_pool->free_huge_pages,
					 DHUGETLB_FREE, huge_page_size(h));
out:
	spin_unlock(&hpool->lock);
	put_hpool(hpool);
}
@@ -859,7 +895,7 @@ static int hugetlb_pool_create(struct mem_cgroup *memcg, unsigned long nid)
		return -ENOMEM;

	spin_lock_init(&hpool->lock);
	spin_lock_init(&hpool->reserved_lock);
	mutex_init(&hpool->reserved_lock);
	hpool->nid = nid;
	atomic_set(&hpool->refcnt, 1);

@@ -972,7 +1008,7 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp
	if (!get_hpool_unless_zero(hpool))
		return -EINVAL;

	spin_lock(&hpool->reserved_lock);
	mutex_lock(&hpool->reserved_lock);
	spin_lock(&hpool->lock);
	hpages_pool = &hpool->hpages_pool[hpages_pool_idx];
	if (nr_pages > hpages_pool->nr_huge_pages) {
@@ -1008,7 +1044,7 @@ static ssize_t update_reserved_pages(struct mem_cgroup *memcg, char *buf, int hp
		hpages_pool->free_normal_pages += delta;
	}
	spin_unlock(&hpool->lock);
	spin_unlock(&hpool->reserved_lock);
	mutex_unlock(&hpool->reserved_lock);
	put_hpool(hpool);
	return 0;
}
Loading