Commit 6ede0f00 authored by Liu Shixin's avatar Liu Shixin Committed by Zheng Zengkai
Browse files

mm/dynamic_hugetlb: free huge pages to dhugetlb_pool

hulk inclusion
category: feature
bugzilla: 46904, https://gitee.com/openeuler/kernel/issues/I4QSHG


CVE: NA

--------------------------------

Add function to free huge page to dhugetlb_pool.

Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 39eec758
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -102,6 +102,7 @@ bool file_has_mem_in_hpool(struct hugetlbfs_inode_info *p);
int dhugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *p);
struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetlb_pool *hpool,
						bool need_unreserved);
void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve);

#else

@@ -162,6 +163,10 @@ struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetl
{
	return NULL;
}
static inline
void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve)
{
}
#endif

#endif /* CONFIG_DYNAMIC_HUGETLB */
+30 −0
Original line number Diff line number Diff line
@@ -649,6 +649,36 @@ struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetl
	return page;
}

void free_huge_page_to_dhugetlb_pool(struct page *page, bool restore_reserve)
{
	struct hstate *h = page_hstate(page);
	struct huge_pages_pool *hpages_pool;
	struct dhugetlb_pool *hpool;

	hpool = find_hpool_by_dhugetlb_pagelist(page);

	if (!get_hpool_unless_zero(hpool)) {
		pr_err("dhugetlb: free error: get hpool failed\n");
		return;
	}

	spin_lock(&hpool->lock);
	ClearPagePool(page);
	set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
	if (hstate_is_gigantic(h))
		hpages_pool = &hpool->hpages_pool[HUGE_PAGES_POOL_1G];
	else
		hpages_pool = &hpool->hpages_pool[HUGE_PAGES_POOL_2M];

	list_add(&page->lru, &hpages_pool->hugepage_freelists);
	hpages_pool->free_huge_pages++;
	hpages_pool->used_huge_pages--;
	if (restore_reserve)
		hpages_pool->resv_huge_pages++;
	spin_unlock(&hpool->lock);
	put_hpool(hpool);
}

static int alloc_hugepage_from_hugetlb(struct dhugetlb_pool *hpool,
				       unsigned long nid, unsigned long nr_pages)
{
+13 −0
Original line number Diff line number Diff line
@@ -1583,6 +1583,19 @@ void free_huge_page(struct page *page)
	restore_reserve = HPageRestoreReserve(page);
	ClearHPageRestoreReserve(page);

	if (dhugetlb_enabled && PagePool(page)) {
		spin_lock(&hugetlb_lock);
		ClearHPageMigratable(page);
		list_del(&page->lru);
		hugetlb_cgroup_uncharge_page(hstate_index(h),
					     pages_per_huge_page(h), page);
		hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
						  pages_per_huge_page(h), page);
		spin_unlock(&hugetlb_lock);
		free_huge_page_to_dhugetlb_pool(page, restore_reserve);
		return;
	}

	/*
	 * If HPageRestoreReserve was set on page, page allocation consumed a
	 * reservation.  If the page was associated with a subpool, there