Commit 240d67a8 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: convert enqueue_huge_page() to folios

Convert callers of enqueue_huge_page() to pass in a folio, function is
renamed to enqueue_hugetlb_folio().

Link: https://lkml.kernel.org/r/20221129225039.82257-8-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Tarun Sahu <tsahu@linux.ibm.com>
Cc: Wei Chen <harperchen1110@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2f6c57d6
Loading
Loading
Loading
Loading
+11 −11
Original line number Diff line number Diff line
@@ -1127,17 +1127,17 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
	return false;
}

static void enqueue_huge_page(struct hstate *h, struct page *page)
static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
{
	int nid = page_to_nid(page);
	int nid = folio_nid(folio);

	lockdep_assert_held(&hugetlb_lock);
	VM_BUG_ON_PAGE(page_count(page), page);
	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);

	list_move(&page->lru, &h->hugepage_freelists[nid]);
	list_move(&folio->lru, &h->hugepage_freelists[nid]);
	h->free_huge_pages++;
	h->free_huge_pages_node[nid]++;
	SetHPageFreed(page);
	folio_set_hugetlb_freed(folio);
}

static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1549,7 +1549,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
		return;

	arch_clear_hugepage_flags(&folio->page);
	enqueue_huge_page(h, &folio->page);
	enqueue_hugetlb_folio(h, folio);
}

static void __update_and_free_page(struct hstate *h, struct page *page)
@@ -1761,7 +1761,7 @@ void free_huge_page(struct page *page)
		update_and_free_hugetlb_folio(h, folio, true);
	} else {
		arch_clear_hugepage_flags(page);
		enqueue_huge_page(h, page);
		enqueue_hugetlb_folio(h, folio);
		spin_unlock_irqrestore(&hugetlb_lock, flags);
	}
}
@@ -2438,7 +2438,7 @@ static int gather_surplus_pages(struct hstate *h, long delta)
		if ((--needed) < 0)
			break;
		/* Add the page to the hugetlb allocator */
		enqueue_huge_page(h, page);
		enqueue_hugetlb_folio(h, page_folio(page));
	}
free:
	spin_unlock_irq(&hugetlb_lock);
@@ -2804,8 +2804,8 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
		 * Ok, old_page is still a genuine free hugepage. Remove it from
		 * the freelist and decrease the counters. These will be
		 * incremented again when calling __prep_account_new_huge_page()
		 * and enqueue_huge_page() for new_page. The counters will remain
		 * stable since this happens under the lock.
		 * and enqueue_hugetlb_folio() for new_folio. The counters will
		 * remain stable since this happens under the lock.
		 */
		remove_hugetlb_folio(h, old_folio, false);

@@ -2814,7 +2814,7 @@ static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page,
		 * earlier.  It can be directly added to the pool free list.
		 */
		__prep_account_new_huge_page(h, nid);
		enqueue_huge_page(h, new_page);
		enqueue_hugetlb_folio(h, new_folio);

		/*
		 * Pages have been replaced, we can safely free the old one.