Commit d6ef19e2 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: convert update_and_free_page() to folios

Make more progress on converting the free_huge_page() destructor to
operate on folios by converting update_and_free_page() to folios.

Link: https://lkml.kernel.org/r/20221129225039.82257-6-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com&gt;\>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent cfd5082b
Loading
Loading
Loading
Loading
+16 −14
Original line number Diff line number Diff line
@@ -1478,7 +1478,7 @@ static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
	 * apply.
	 *
	 * This handles the case where more than one ref is held when and
	 * after update_and_free_page is called.
	 * after update_and_free_hugetlb_folio is called.
	 *
	 * In the case of demote we do not ref count the page as it will soon
	 * be turned into a page of smaller size.
@@ -1609,7 +1609,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
}

/*
 * As update_and_free_page() can be called under any context, so we cannot
 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
 * the vmemmap pages.
@@ -1657,11 +1657,11 @@ static inline void flush_free_hpage_work(struct hstate *h)
		flush_work(&free_hpage_work);
}

static void update_and_free_page(struct hstate *h, struct page *page,
static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
				 bool atomic)
{
	if (!HPageVmemmapOptimized(page) || !atomic) {
		__update_and_free_page(h, page);
	if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
		__update_and_free_page(h, &folio->page);
		return;
	}

@@ -1672,16 +1672,18 @@ static void update_and_free_page(struct hstate *h, struct page *page,
	 * empty. Otherwise, schedule_work() had been called but the workfn
	 * hasn't retrieved the list yet.
	 */
	if (llist_add((struct llist_node *)&page->mapping, &hpage_freelist))
	if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
		schedule_work(&free_hpage_work);
}

static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list)
{
	struct page *page, *t_page;
	struct folio *folio;

	list_for_each_entry_safe(page, t_page, list, lru) {
		update_and_free_page(h, page, false);
		folio = page_folio(page);
		update_and_free_hugetlb_folio(h, folio, false);
		cond_resched();
	}
}
@@ -1751,12 +1753,12 @@ void free_huge_page(struct page *page)
	if (folio_test_hugetlb_temporary(folio)) {
		remove_hugetlb_folio(h, folio, false);
		spin_unlock_irqrestore(&hugetlb_lock, flags);
		update_and_free_page(h, page, true);
		update_and_free_hugetlb_folio(h, folio, true);
	} else if (h->surplus_huge_pages_node[nid]) {
		/* remove the page from active list */
		remove_hugetlb_folio(h, folio, true);
		spin_unlock_irqrestore(&hugetlb_lock, flags);
		update_and_free_page(h, page, true);
		update_and_free_hugetlb_folio(h, folio, true);
	} else {
		arch_clear_hugepage_flags(page);
		enqueue_huge_page(h, page);
@@ -2172,8 +2174,8 @@ int dissolve_free_huge_page(struct page *page)
		spin_unlock_irq(&hugetlb_lock);

		/*
		 * Normally update_and_free_page will allocate required vmemmmap
		 * before freeing the page.  update_and_free_page will fail to
		 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
		 * before freeing the page.  update_and_free_hugtlb_folio will fail to
		 * free the page if it can not allocate required vmemmap.  We
		 * need to adjust max_huge_pages if the page is not freed.
		 * Attempt to allocate vmemmmap here so that we can take
@@ -2181,7 +2183,7 @@ int dissolve_free_huge_page(struct page *page)
		 */
		rc = hugetlb_vmemmap_restore(h, &folio->page);
		if (!rc) {
			update_and_free_page(h, &folio->page, false);
			update_and_free_hugetlb_folio(h, folio, false);
		} else {
			spin_lock_irq(&hugetlb_lock);
			add_hugetlb_page(h, &folio->page, false);
@@ -2818,7 +2820,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
		 * Pages have been replaced, we can safely free the old one.
		 */
		spin_unlock_irq(&hugetlb_lock);
		update_and_free_page(h, old_page, false);
		update_and_free_hugetlb_folio(h, old_folio, false);
	}

	return ret;
@@ -2827,7 +2829,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
	spin_unlock_irq(&hugetlb_lock);
	/* Page has a zero ref count, but needs a ref to be freed */
	folio_ref_unfreeze(new_folio, 1);
	update_and_free_page(h, new_page, false);
	update_and_free_hugetlb_folio(h, new_folio, false);

	return ret;
}