Commit a36f1e90 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: convert dequeue_hugetlb_page functions to folios

dequeue_huge_page_node_exact() is changed to dequeue_hugetlb_folio_node_
exact() and dequeue_huge_page_nodemask() is changed to dequeue_hugetlb_
folio_nodemask().  Update their callers to pass in a folio.

Link: https://lkml.kernel.org/r/20230113223057.173292-4-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6f6956cf
Loading
Loading
Loading
Loading
+30 −26
Original line number Diff line number Diff line
@@ -1282,32 +1282,33 @@ static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
	folio_set_hugetlb_freed(folio);
}

static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
								int nid)
{
	struct page *page;
	struct folio *folio;
	bool pin = !!(current->flags & PF_MEMALLOC_PIN);

	lockdep_assert_held(&hugetlb_lock);
	list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
		if (pin && !is_longterm_pinnable_page(page))
	list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
		if (pin && !folio_is_longterm_pinnable(folio))
			continue;

		if (PageHWPoison(page))
		if (folio_test_hwpoison(folio))
			continue;

		list_move(&page->lru, &h->hugepage_activelist);
		set_page_refcounted(page);
		ClearHPageFreed(page);
		list_move(&folio->lru, &h->hugepage_activelist);
		folio_ref_unfreeze(folio, 1);
		folio_clear_hugetlb_freed(folio);
		h->free_huge_pages--;
		h->free_huge_pages_node[nid]--;
		return page;
		return folio;
	}

	return NULL;
}

static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
		nodemask_t *nmask)
static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
							int nid, nodemask_t *nmask)
{
	unsigned int cpuset_mems_cookie;
	struct zonelist *zonelist;
@@ -1320,7 +1321,7 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
retry_cpuset:
	cpuset_mems_cookie = read_mems_allowed_begin();
	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
		struct page *page;
		struct folio *folio;

		if (!cpuset_zone_allowed(zone, gfp_mask))
			continue;
@@ -1332,9 +1333,9 @@ static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask,
			continue;
		node = zone_to_nid(zone);

		page = dequeue_huge_page_node_exact(h, node);
		if (page)
			return page;
		folio = dequeue_hugetlb_folio_node_exact(h, node);
		if (folio)
			return folio;
	}
	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
		goto retry_cpuset;
@@ -1352,7 +1353,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
				unsigned long address, int avoid_reserve,
				long chg)
{
	struct page *page = NULL;
	struct folio *folio = NULL;
	struct mempolicy *mpol;
	gfp_t gfp_mask;
	nodemask_t *nodemask;
@@ -1374,22 +1375,24 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);

	if (mpol_is_preferred_many(mpol)) {
		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
							nid, nodemask);

		/* Fallback to all nodes if page==NULL */
		nodemask = NULL;
	}

	if (!page)
		page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
	if (!folio)
		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
							nid, nodemask);

	if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
		SetHPageRestoreReserve(page);
	if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
		folio_set_hugetlb_restore_reserve(folio);
		h->resv_huge_pages--;
	}

	mpol_cond_put(mpol);
	return page;
	return &folio->page;

err:
	return NULL;
@@ -2475,12 +2478,13 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
{
	spin_lock_irq(&hugetlb_lock);
	if (available_huge_pages(h)) {
		struct page *page;
		struct folio *folio;

		page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
		if (page) {
		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
						preferred_nid, nmask);
		if (folio) {
			spin_unlock_irq(&hugetlb_lock);
			return page;
			return &folio->page;
		}
	}
	spin_unlock_irq(&hugetlb_lock);