Commit 6f6956cf authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: convert __update_and_free_page() to folios

Change __update_and_free_page() to __update_and_free_hugetlb_folio() by
changing its callers to pass in a folio.

Link: https://lkml.kernel.org/r/20230113223057.173292-3-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6aa3a920
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -1698,10 +1698,10 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
	enqueue_hugetlb_folio(h, folio);
}

static void __update_and_free_page(struct hstate *h, struct page *page)
static void __update_and_free_hugetlb_folio(struct hstate *h,
						struct folio *folio)
{
	int i;
	struct folio *folio = page_folio(page);
	struct page *subpage;

	if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
@@ -1714,7 +1714,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
		return;

	if (hugetlb_vmemmap_restore(h, page)) {
	if (hugetlb_vmemmap_restore(h, &folio->page)) {
		spin_lock_irq(&hugetlb_lock);
		/*
		 * If we cannot allocate vmemmap pages, just refuse to free the
@@ -1750,7 +1750,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
		destroy_compound_gigantic_folio(folio, huge_page_order(h));
		free_gigantic_folio(folio, huge_page_order(h));
	} else {
		__free_pages(page, huge_page_order(h));
		__free_pages(&folio->page, huge_page_order(h));
	}
}

@@ -1790,7 +1790,7 @@ static void free_hpage_workfn(struct work_struct *work)
		 */
		h = size_to_hstate(page_size(page));

		__update_and_free_page(h, page);
		__update_and_free_hugetlb_folio(h, page_folio(page));

		cond_resched();
	}
@@ -1807,7 +1807,7 @@ static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
				 bool atomic)
{
	if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
		__update_and_free_page(h, &folio->page);
		__update_and_free_hugetlb_folio(h, folio);
		return;
	}