Commit 39eec758 authored by Liu Shixin's avatar Liu Shixin Committed by Zheng Zengkai
Browse files

mm/dynamic_hugetlb: alloc huge pages from dhugetlb_pool

hulk inclusion
category: feature
bugzilla: 46904, https://gitee.com/openeuler/kernel/issues/I4QSHG


CVE: NA

--------------------------------

Add function to alloc huge page from dhugetlb_pool.
When process is bound to a mem_cgroup configured with dhugetlb_pool,
only allowed to alloc huge page from dhugetlb_pool. If there is no huge
pages in dhugetlb_pool, the mmap() will failed due to the reserve count
introduced in previous patch.

Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent 5993c1d6
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -100,6 +100,8 @@ void link_hpool(struct hugetlbfs_inode_info *p);
void unlink_hpool(struct hugetlbfs_inode_info *p);
bool file_has_mem_in_hpool(struct hugetlbfs_inode_info *p);
int dhugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_info *p);
struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetlb_pool *hpool,
						bool need_unreserved);

#else

@@ -154,6 +156,12 @@ static inline int dhugetlb_acct_memory(struct hstate *h, long delta, struct huge
{
	return 0;
}
static inline
struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetlb_pool *hpool,
						bool need_unreserved)
{
	return NULL;
}
#endif

#endif /* CONFIG_DYNAMIC_HUGETLB */
+38 −1
Original line number Diff line number Diff line
@@ -103,7 +103,7 @@ static int hpool_split_page(struct dhugetlb_pool *hpool, int hpages_pool_idx)
	if (!split_page)
		return -ENOMEM;

	page = list_entry(hpages_pool->hugepage_freelists.next, struct page, lru);
	page = list_entry(hpages_pool->hugepage_freelists.prev, struct page, lru);
	list_del(&page->lru);
	hpages_pool->free_normal_pages--;

@@ -612,6 +612,43 @@ int dhugetlb_acct_memory(struct hstate *h, long delta, struct hugetlbfs_inode_in
	return ret;
}

struct page *alloc_huge_page_from_dhugetlb_pool(struct hstate *h, struct dhugetlb_pool *hpool,
						bool need_unreserved)
{
	struct huge_pages_pool *hpages_pool;
	struct page *page = NULL;
	unsigned long flags;

	if (!dhugetlb_enabled)
		return NULL;

	spin_lock_irqsave(&hpool->lock, flags);
	if (hstate_is_gigantic(h))
		hpages_pool = &hpool->hpages_pool[HUGE_PAGES_POOL_1G];
	else
		hpages_pool = &hpool->hpages_pool[HUGE_PAGES_POOL_2M];

	if (hpages_pool->free_huge_pages) {
		page = list_entry(hpages_pool->hugepage_freelists.next, struct page, lru);
		list_del(&page->lru);
		hpages_pool->free_huge_pages--;
		hpages_pool->used_huge_pages++;
		if (need_unreserved) {
			SetHPageRestoreReserve(page);
			hpages_pool->resv_huge_pages--;
		}
	}
	if (page) {
		INIT_LIST_HEAD(&page->lru);
		set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
		set_page_refcounted(page);
		SetPagePool(page);
	}
	spin_unlock_irqrestore(&hpool->lock, flags);

	return page;
}

static int alloc_hugepage_from_hugetlb(struct dhugetlb_pool *hpool,
				       unsigned long nid, unsigned long nr_pages)
{
+14 −0
Original line number Diff line number Diff line
@@ -2534,6 +2534,19 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
	if (ret)
		goto out_uncharge_cgroup_reservation;

	if (file_has_mem_in_hpool(info)) {
		bool need_unreserved = false;

		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg))
			need_unreserved = true;
		page = alloc_huge_page_from_dhugetlb_pool(h, info->hpool, need_unreserved);
		if (!page)
			goto out_uncharge_cgroup;
		spin_lock_irq(&hugetlb_lock);
		list_add(&page->lru, &h->hugepage_activelist);
		goto out;
	}

	spin_lock_irq(&hugetlb_lock);
	/*
	 * glb_chg is passed to indicate whether or not a page must be taken
@@ -2554,6 +2567,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
		list_add(&page->lru, &h->hugepage_activelist);
		/* Fall through */
	}
out:
	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
	/* If allocation is not consuming a reservation, also store the
	 * hugetlb_cgroup pointer on the page.