Commit ff7d853b authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb: increase use of folios in alloc_huge_page()

Change hugetlb_cgroup_commit_charge{,_rsvd}(), dequeue_huge_page_vma() and
alloc_buddy_huge_page_with_mpol() to use folios so alloc_huge_page() is
cleaned by operating on folios until its return.

Link: https://lkml.kernel.org/r/20230113223057.173292-6-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 3a740e8b
Loading
Loading
Loading
Loading
+4 −4
Original line number Diff line number Diff line
@@ -141,10 +141,10 @@ extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
					     struct hugetlb_cgroup **ptr);
extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
					 struct hugetlb_cgroup *h_cg,
					 struct page *page);
					 struct folio *folio);
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
					      struct hugetlb_cgroup *h_cg,
					      struct page *page);
					      struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
					 struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
@@ -230,14 +230,14 @@ static inline int hugetlb_cgroup_charge_cgroup_rsvd(int idx,

static inline void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
						struct hugetlb_cgroup *h_cg,
						struct page *page)
						struct folio *folio)
{
}

static inline void
hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
				  struct hugetlb_cgroup *h_cg,
				  struct page *page)
				  struct folio *folio)
{
}

+16 −17
Original line number Diff line number Diff line
@@ -1348,7 +1348,7 @@ static unsigned long available_huge_pages(struct hstate *h)
	return h->free_huge_pages - h->resv_huge_pages;
}

static struct page *dequeue_huge_page_vma(struct hstate *h,
static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
				struct vm_area_struct *vma,
				unsigned long address, int avoid_reserve,
				long chg)
@@ -1392,7 +1392,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
	}

	mpol_cond_put(mpol);
	return &folio->page;
	return folio;

err:
	return NULL;
@@ -2446,7 +2446,7 @@ static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
 * Use the VMA's mpolicy to allocate a huge page from the buddy.
 */
static
struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
		struct vm_area_struct *vma, unsigned long addr)
{
	struct folio *folio = NULL;
@@ -2469,7 +2469,7 @@ struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
	if (!folio)
		folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
	mpol_cond_put(mpol);
	return &folio->page;
	return folio;
}

/* page migration callback function */
@@ -3018,7 +3018,6 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
{
	struct hugepage_subpool *spool = subpool_vma(vma);
	struct hstate *h = hstate_vma(vma);
	struct page *page;
	struct folio *folio;
	long map_chg, map_commit;
	long gbl_chg;
@@ -3082,34 +3081,34 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
	 * from the global free pool (global change).  gbl_chg == 0 indicates
	 * a reservation exists for the allocation.
	 */
	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
	if (!page) {
	folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
	if (!folio) {
		spin_unlock_irq(&hugetlb_lock);
		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
		if (!page)
		folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
		if (!folio)
			goto out_uncharge_cgroup;
		spin_lock_irq(&hugetlb_lock);
		if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
			SetHPageRestoreReserve(page);
			folio_set_hugetlb_restore_reserve(folio);
			h->resv_huge_pages--;
		}
		list_add(&page->lru, &h->hugepage_activelist);
		set_page_refcounted(page);
		list_add(&folio->lru, &h->hugepage_activelist);
		folio_ref_unfreeze(folio, 1);
		/* Fall through */
	}
	folio = page_folio(page);
	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);

	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
	/* If allocation is not consuming a reservation, also store the
	 * hugetlb_cgroup pointer on the page.
	 */
	if (deferred_reserve) {
		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
						  h_cg, page);
						  h_cg, folio);
	}

	spin_unlock_irq(&hugetlb_lock);

	hugetlb_set_page_subpool(page, spool);
	hugetlb_set_folio_subpool(folio, spool);

	map_commit = vma_commit_reservation(h, vma, addr);
	if (unlikely(map_chg > map_commit)) {
@@ -3130,7 +3129,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
					pages_per_huge_page(h), folio);
	}
	return page;
	return &folio->page;

out_uncharge_cgroup:
	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
+2 −6
Original line number Diff line number Diff line
@@ -331,19 +331,15 @@ static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,

void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
				  struct hugetlb_cgroup *h_cg,
				  struct page *page)
				  struct folio *folio)
{
	struct folio *folio = page_folio(page);

	__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
}

void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
				       struct hugetlb_cgroup *h_cg,
				       struct page *page)
				       struct folio *folio)
{
	struct folio *folio = page_folio(page);

	__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
}