Commit 19fc1a7e authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: change hugetlb allocation functions to return a folio

Many hugetlb allocation helper functions have now been converting to
folios, update their higher level callers to be compatible with folios. 
alloc_pool_huge_page is reorganized to avoid a smatch warning reporting
the folio variable is uninitialized.

[sidhartha.kumar@oracle.com: update alloc_and_dissolve_hugetlb_folio comments]
  Link: https://lkml.kernel.org/r/20221206233512.146535-1-sidhartha.kumar@oracle.com
Link: https://lkml.kernel.org/r/20221129225039.82257-11-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reported-by: default avatarWei Chen <harperchen1110@gmail.com>
Suggested-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Suggested-by: default avatarRasmus Villemoes <linux@rasmusvillemoes.dk>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d1c60955
Loading
Loading
Loading
Loading
+64 −70
Original line number Diff line number Diff line
@@ -1378,23 +1378,23 @@ static void free_gigantic_folio(struct folio *folio, unsigned int order)
}

#ifdef CONFIG_CONTIG_ALLOC
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
		int nid, nodemask_t *nodemask)
{
	struct page *page;
	unsigned long nr_pages = pages_per_huge_page(h);
	if (nid == NUMA_NO_NODE)
		nid = numa_mem_id();

#ifdef CONFIG_CMA
	{
		struct page *page;
		int node;

		if (hugetlb_cma[nid]) {
			page = cma_alloc(hugetlb_cma[nid], nr_pages,
					huge_page_order(h), true);
			if (page)
				return page;
				return page_folio(page);
		}

		if (!(gfp_mask & __GFP_THISNODE)) {
@@ -1405,17 +1405,18 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
				page = cma_alloc(hugetlb_cma[node], nr_pages,
						huge_page_order(h), true);
				if (page)
					return page;
					return page_folio(page);
			}
		}
	}
#endif

	return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
	page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
	return page ? page_folio(page) : NULL;
}

#else /* !CONFIG_CONTIG_ALLOC */
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
					int nid, nodemask_t *nodemask)
{
	return NULL;
@@ -1423,7 +1424,7 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
#endif /* CONFIG_CONTIG_ALLOC */

#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
					int nid, nodemask_t *nodemask)
{
	return NULL;
@@ -1950,7 +1951,7 @@ pgoff_t hugetlb_basepage_index(struct page *page)
	return (index << compound_order(page_head)) + compound_idx;
}

static struct page *alloc_buddy_huge_page(struct hstate *h,
static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
		gfp_t gfp_mask, int nid, nodemask_t *nmask,
		nodemask_t *node_alloc_noretry)
{
@@ -1988,11 +1989,6 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
		page = NULL;
	}

	if (page)
		__count_vm_event(HTLB_BUDDY_PGALLOC);
	else
		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);

	/*
	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
	 * indicates an overall state change.  Clear bit so that we resume
@@ -2009,7 +2005,13 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
	if (node_alloc_noretry && !page && alloc_try_hard)
		node_set(nid, *node_alloc_noretry);

	return page;
	if (!page) {
		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
		return NULL;
	}

	__count_vm_event(HTLB_BUDDY_PGALLOC);
	return page_folio(page);
}

/*
@@ -2019,23 +2021,21 @@ static struct page *alloc_buddy_huge_page(struct hstate *h,
 * Note that returned page is 'frozen':  ref count of head page and all tail
 * pages is zero.
 */
static struct page *alloc_fresh_huge_page(struct hstate *h,
static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
		gfp_t gfp_mask, int nid, nodemask_t *nmask,
		nodemask_t *node_alloc_noretry)
{
	struct page *page;
	struct folio *folio;
	bool retry = false;

retry:
	if (hstate_is_gigantic(h))
		page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
		folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
	else
		page = alloc_buddy_huge_page(h, gfp_mask,
		folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
				nid, nmask, node_alloc_noretry);
	if (!page)
	if (!folio)
		return NULL;
	folio = page_folio(page);
	if (hstate_is_gigantic(h)) {
		if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
			/*
@@ -2052,7 +2052,7 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
	}
	prep_new_hugetlb_folio(h, folio, folio_nid(folio));

	return page;
	return folio;
}

/*
@@ -2062,23 +2062,20 @@ static struct page *alloc_fresh_huge_page(struct hstate *h,
static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
				nodemask_t *node_alloc_noretry)
{
	struct page *page;
	struct folio *folio;
	int nr_nodes, node;
	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;

	for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
		page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
						node_alloc_noretry);
		if (page)
			break;
		folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
					nodes_allowed, node_alloc_noretry);
		if (folio) {
			free_huge_page(&folio->page); /* free it into the hugepage allocator */
			return 1;
		}
	}

	if (!page)
	return 0;

	free_huge_page(page); /* free it into the hugepage allocator */

	return 1;
}

/*
@@ -2237,7 +2234,7 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
						int nid, nodemask_t *nmask)
{
	struct page *page = NULL;
	struct folio *folio = NULL;

	if (hstate_is_gigantic(h))
		return NULL;
@@ -2247,8 +2244,8 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
		goto out_unlock;
	spin_unlock_irq(&hugetlb_lock);

	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
	if (!page)
	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
	if (!folio)
		return NULL;

	spin_lock_irq(&hugetlb_lock);
@@ -2260,43 +2257,42 @@ static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
	 * codeflow
	 */
	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
		SetHPageTemporary(page);
		folio_set_hugetlb_temporary(folio);
		spin_unlock_irq(&hugetlb_lock);
		free_huge_page(page);
		free_huge_page(&folio->page);
		return NULL;
	}

	h->surplus_huge_pages++;
	h->surplus_huge_pages_node[page_to_nid(page)]++;
	h->surplus_huge_pages_node[folio_nid(folio)]++;

out_unlock:
	spin_unlock_irq(&hugetlb_lock);

	return page;
	return &folio->page;
}

static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
				     int nid, nodemask_t *nmask)
{
	struct page *page;
	struct folio *folio;

	if (hstate_is_gigantic(h))
		return NULL;

	page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
	if (!page)
	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
	if (!folio)
		return NULL;

	/* fresh huge pages are frozen */
	set_page_refcounted(page);

	folio_ref_unfreeze(folio, 1);
	/*
	 * We do not account these pages as surplus because they are only
	 * temporary and will be released properly on the last reference
	 */
	SetHPageTemporary(page);
	folio_set_hugetlb_temporary(folio);

	return page;
	return &folio->page;
}

/*
@@ -2745,54 +2741,52 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
}

/*
 * alloc_and_dissolve_huge_page - Allocate a new page and dissolve the old one
 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
 * the old one
 * @h: struct hstate old page belongs to
 * @old_page: Old page to dissolve
 * @old_folio: Old folio to dissolve
 * @list: List to isolate the page in case we need to
 * Returns 0 on success, otherwise negated error.
 */
static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
					struct list_head *list)
static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
			struct folio *old_folio, struct list_head *list)
{
	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
	struct folio *old_folio = page_folio(old_page);
	int nid = folio_nid(old_folio);
	struct page *new_page;
	struct folio *new_folio;
	int ret = 0;

	/*
	 * Before dissolving the page, we need to allocate a new one for the
	 * pool to remain stable.  Here, we allocate the page and 'prep' it
	 * Before dissolving the folio, we need to allocate a new one for the
	 * pool to remain stable.  Here, we allocate the folio and 'prep' it
	 * by doing everything but actually updating counters and adding to
	 * the pool.  This simplifies and let us do most of the processing
	 * under the lock.
	 */
	new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL);
	if (!new_page)
	new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
	if (!new_folio)
		return -ENOMEM;
	new_folio = page_folio(new_page);
	__prep_new_hugetlb_folio(h, new_folio);

retry:
	spin_lock_irq(&hugetlb_lock);
	if (!folio_test_hugetlb(old_folio)) {
		/*
		 * Freed from under us. Drop new_page too.
		 * Freed from under us. Drop new_folio too.
		 */
		goto free_new;
	} else if (folio_ref_count(old_folio)) {
		/*
		 * Someone has grabbed the page, try to isolate it here.
		 * Someone has grabbed the folio, try to isolate it here.
		 * Fail with -EBUSY if not possible.
		 */
		spin_unlock_irq(&hugetlb_lock);
		ret = isolate_hugetlb(old_page, list);
		ret = isolate_hugetlb(&old_folio->page, list);
		spin_lock_irq(&hugetlb_lock);
		goto free_new;
	} else if (!folio_test_hugetlb_freed(old_folio)) {
		/*
		 * Page's refcount is 0 but it has not been enqueued in the
		 * Folio's refcount is 0 but it has not been enqueued in the
		 * freelist yet. Race window is small, so we can succeed here if
		 * we retry.
		 */
@@ -2801,7 +2795,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
		goto retry;
	} else {
		/*
		 * Ok, old_page is still a genuine free hugepage. Remove it from
		 * Ok, old_folio is still a genuine free hugepage. Remove it from
		 * the freelist and decrease the counters. These will be
		 * incremented again when calling __prep_account_new_huge_page()
		 * and enqueue_hugetlb_folio() for new_folio. The counters will
@@ -2810,14 +2804,14 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
		remove_hugetlb_folio(h, old_folio, false);

		/*
		 * Ref count on new page is already zero as it was dropped
		 * Ref count on new_folio is already zero as it was dropped
		 * earlier.  It can be directly added to the pool free list.
		 */
		__prep_account_new_huge_page(h, nid);
		enqueue_hugetlb_folio(h, new_folio);

		/*
		 * Pages have been replaced, we can safely free the old one.
		 * Folio has been replaced, we can safely free the old one.
		 */
		spin_unlock_irq(&hugetlb_lock);
		update_and_free_hugetlb_folio(h, old_folio, false);
@@ -2827,7 +2821,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,

free_new:
	spin_unlock_irq(&hugetlb_lock);
	/* Page has a zero ref count, but needs a ref to be freed */
	/* Folio has a zero ref count, but needs a ref to be freed */
	folio_ref_unfreeze(new_folio, 1);
	update_and_free_hugetlb_folio(h, new_folio, false);

@@ -2865,7 +2859,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
	if (folio_ref_count(folio) && !isolate_hugetlb(&folio->page, list))
		ret = 0;
	else if (!folio_ref_count(folio))
		ret = alloc_and_dissolve_huge_page(h, &folio->page, list);
		ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);

	return ret;
}
@@ -3083,14 +3077,14 @@ static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
			if (!alloc_bootmem_huge_page(h, nid))
				break;
		} else {
			struct page *page;
			struct folio *folio;
			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;

			page = alloc_fresh_huge_page(h, gfp_mask, nid,
			folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
					&node_states[N_MEMORY], NULL);
			if (!page)
			if (!folio)
				break;
			free_huge_page(page); /* free it into the hugepage allocator */
			free_huge_page(&folio->page); /* free it into the hugepage allocator */
		}
		cond_resched();
	}