Commit 7f325a8d authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: convert free_gigantic_page() to folios

Convert callers of free_gigantic_page() to use folios, function is then
renamed to free_gigantic_folio().

Link: https://lkml.kernel.org/r/20221129225039.82257-9-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 240d67a8
Loading
Loading
Loading
Loading
+17 −12
Original line number Diff line number Diff line
@@ -1361,18 +1361,20 @@ static void destroy_compound_gigantic_folio(struct folio *folio,
	__destroy_compound_gigantic_folio(folio, order, false);
}

static void free_gigantic_page(struct page *page, unsigned int order)
static void free_gigantic_folio(struct folio *folio, unsigned int order)
{
	/*
	 * If the page isn't allocated using the cma allocator,
	 * cma_release() returns false.
	 */
#ifdef CONFIG_CMA
	if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
	int nid = folio_nid(folio);

	if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
		return;
#endif

	free_contig_range(page_to_pfn(page), 1 << order);
	free_contig_range(folio_pfn(folio), 1 << order);
}

#ifdef CONFIG_CONTIG_ALLOC
@@ -1426,7 +1428,8 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
{
	return NULL;
}
static inline void free_gigantic_page(struct page *page, unsigned int order) { }
static inline void free_gigantic_folio(struct folio *folio,
						unsigned int order) { }
static inline void destroy_compound_gigantic_folio(struct folio *folio,
						unsigned int order) { }
#endif
@@ -1565,7 +1568,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
	 * If we don't know which subpages are hwpoisoned, we can't free
	 * the hugepage, so it's leaked intentionally.
	 */
	if (HPageRawHwpUnreliable(page))
	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
		return;

	if (hugetlb_vmemmap_restore(h, page)) {
@@ -1575,7 +1578,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
		 * page and put the page back on the hugetlb free list and treat
		 * as a surplus page.
		 */
		add_hugetlb_folio(h, page_folio(page), true);
		add_hugetlb_folio(h, folio, true);
		spin_unlock_irq(&hugetlb_lock);
		return;
	}
@@ -1588,7 +1591,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
		hugetlb_clear_page_hwpoison(&folio->page);

	for (i = 0; i < pages_per_huge_page(h); i++) {
		subpage = nth_page(page, i);
		subpage = folio_page(folio, i);
		subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
				1 << PG_referenced | 1 << PG_dirty |
				1 << PG_active | 1 << PG_private |
@@ -1597,12 +1600,12 @@ static void __update_and_free_page(struct hstate *h, struct page *page)

	/*
	 * Non-gigantic pages demoted from CMA allocated gigantic pages
	 * need to be given back to CMA in free_gigantic_page.
	 * need to be given back to CMA in free_gigantic_folio.
	 */
	if (hstate_is_gigantic(h) ||
	    hugetlb_cma_folio(folio, huge_page_order(h))) {
		destroy_compound_gigantic_folio(folio, huge_page_order(h));
		free_gigantic_page(page, huge_page_order(h));
		free_gigantic_folio(folio, huge_page_order(h));
	} else {
		__free_pages(page, huge_page_order(h));
	}
@@ -2025,6 +2028,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
		nodemask_t *node_alloc_noretry)
{
	struct page *page;
	struct folio *folio;
	bool retry = false;

retry:
@@ -2035,14 +2039,14 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
				nid, nmask, node_alloc_noretry);
	if (!page)
		return NULL;

	folio = page_folio(page);
	if (hstate_is_gigantic(h)) {
		if (!prep_compound_gigantic_page(page, huge_page_order(h))) {
			/*
			 * Rare failure to convert pages to compound page.
			 * Free pages and try again - ONCE!
			 */
			free_gigantic_page(page, huge_page_order(h));
			free_gigantic_folio(folio, huge_page_order(h));
			if (!retry) {
				retry = true;
				goto retry;
@@ -3050,6 +3054,7 @@ static void __init gather_bootmem_prealloc(void)

	list_for_each_entry(m, &huge_boot_pages, list) {
		struct page *page = virt_to_page(m);
		struct folio *folio = page_folio(page);
		struct hstate *h = m->hstate;

		VM_BUG_ON(!hstate_is_gigantic(h));
@@ -3060,7 +3065,7 @@ static void __init gather_bootmem_prealloc(void)
			free_huge_page(page); /* add to the hugepage allocator */
		} else {
			/* VERY unlikely inflated ref count on a tail page */
			free_gigantic_page(page, huge_page_order(h));
			free_gigantic_folio(folio, huge_page_order(h));
		}

		/*