Commit d1c60955 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: convert hugetlb prep functions to folios

Convert prep_new_huge_page() and __prep_compound_gigantic_page() to
folios.

Link: https://lkml.kernel.org/r/20221129225039.82257-10-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 7f325a8d
Loading
Loading
Loading
Loading
+30 −33
Original line number Diff line number Diff line
@@ -1789,29 +1789,27 @@ static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
	set_hugetlb_cgroup_rsvd(folio, NULL);
}

static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
{
	struct folio *folio = page_folio(page);

	__prep_new_hugetlb_folio(h, folio);
	spin_lock_irq(&hugetlb_lock);
	__prep_account_new_huge_page(h, nid);
	spin_unlock_irq(&hugetlb_lock);
}

static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
								bool demote)
static bool __prep_compound_gigantic_folio(struct folio *folio,
					unsigned int order, bool demote)
{
	int i, j;
	int nr_pages = 1 << order;
	struct page *p;

	/* we rely on prep_new_huge_page to set the destructor */
	set_compound_order(page, order);
	__ClearPageReserved(page);
	__SetPageHead(page);
	/* we rely on prep_new_hugetlb_folio to set the destructor */
	folio_set_compound_order(folio, order);
	__folio_clear_reserved(folio);
	__folio_set_head(folio);
	for (i = 0; i < nr_pages; i++) {
		p = nth_page(page, i);
		p = folio_page(folio, i);

		/*
		 * For gigantic hugepages allocated through bootmem at
@@ -1853,43 +1851,41 @@ static bool __prep_compound_gigantic_page(struct page *page, unsigned int order,
			VM_BUG_ON_PAGE(page_count(p), p);
		}
		if (i != 0)
			set_compound_head(p, page);
			set_compound_head(p, &folio->page);
	}
	atomic_set(compound_mapcount_ptr(page), -1);
	atomic_set(subpages_mapcount_ptr(page), 0);
	atomic_set(compound_pincount_ptr(page), 0);
	atomic_set(folio_mapcount_ptr(folio), -1);
	atomic_set(folio_subpages_mapcount_ptr(folio), 0);
	atomic_set(folio_pincount_ptr(folio), 0);
	return true;

out_error:
	/* undo page modifications made above */
	for (j = 0; j < i; j++) {
		p = nth_page(page, j);
		p = folio_page(folio, j);
		if (j != 0)
			clear_compound_head(p);
		set_page_refcounted(p);
	}
	/* need to clear PG_reserved on remaining tail pages  */
	for (; j < nr_pages; j++) {
		p = nth_page(page, j);
		p = folio_page(folio, j);
		__ClearPageReserved(p);
	}
	set_compound_order(page, 0);
#ifdef CONFIG_64BIT
	page[1].compound_nr = 0;
#endif
	__ClearPageHead(page);
	folio_set_compound_order(folio, 0);
	__folio_clear_head(folio);
	return false;
}

static bool prep_compound_gigantic_page(struct page *page, unsigned int order)
static bool prep_compound_gigantic_folio(struct folio *folio,
							unsigned int order)
{
	return __prep_compound_gigantic_page(page, order, false);
	return __prep_compound_gigantic_folio(folio, order, false);
}

static bool prep_compound_gigantic_page_for_demote(struct page *page,
static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
							unsigned int order)
{
	return __prep_compound_gigantic_page(page, order, true);
	return __prep_compound_gigantic_folio(folio, order, true);
}

/*
@@ -2041,7 +2037,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
		return NULL;
	folio = page_folio(page);
	if (hstate_is_gigantic(h)) {
		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
		if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
			/*
			 * Rare failure to convert pages to compound page.
			 * Free pages and try again - ONCE!
@@ -2054,7 +2050,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
			return NULL;
		}
	}
	prep_new_huge_page(h, page, page_to_nid(page));
	prep_new_hugetlb_folio(h, folio, folio_nid(folio));

	return page;
}
@@ -3058,10 +3054,10 @@ static void __init gather_bootmem_prealloc(void)
		struct hstate *h = m->hstate;

		VM_BUG_ON(!hstate_is_gigantic(h));
		WARN_ON(page_count(page) != 1);
		if (prep_compound_gigantic_page(page, huge_page_order(h))) {
			WARN_ON(PageReserved(page));
			prep_new_huge_page(h, page, page_to_nid(page));
		WARN_ON(folio_ref_count(folio) != 1);
		if (prep_compound_gigantic_folio(folio, huge_page_order(h))) {
			WARN_ON(folio_test_reserved(folio));
			prep_new_hugetlb_folio(h, folio, folio_nid(folio));
			free_huge_page(page); /* add to the hugepage allocator */
		} else {
			/* VERY unlikely inflated ref count on a tail page */
@@ -3480,13 +3476,14 @@ static int demote_free_huge_page(struct hstate *h, struct page *page)
	for (i = 0; i < pages_per_huge_page(h);
				i += pages_per_huge_page(target_hstate)) {
		subpage = nth_page(page, i);
		folio = page_folio(subpage);
		if (hstate_is_gigantic(target_hstate))
			prep_compound_gigantic_page_for_demote(subpage,
			prep_compound_gigantic_folio_for_demote(folio,
							target_hstate->order);
		else
			prep_compound_page(subpage, target_hstate->order);
		set_page_private(subpage, 0);
		prep_new_huge_page(target_hstate, subpage, nid);
		prep_new_hugetlb_folio(target_hstate, folio, nid);
		free_huge_page(subpage);
	}
	mutex_unlock(&target_hstate->resize_lock);