Commit d4ab0316 authored by Sidhartha Kumar's avatar Sidhartha Kumar Committed by Andrew Morton
Browse files

mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios

Continue to use a folio inside free_huge_page() by converting
hugetlb_cgroup_uncharge_page*() to folios.

Link: https://lkml.kernel.org/r/20221101223059.460937-8-sidhartha.kumar@oracle.com


Signed-off-by: default avatarSidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0356c4b9
Loading
Loading
Loading
Loading
+8 −8
Original line number Diff line number Diff line
@@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
					      struct hugetlb_cgroup *h_cg,
					      struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
					 struct page *page);
extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
					      struct page *page);
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
					 struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
					      struct folio *folio);

extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
					   struct hugetlb_cgroup *h_cg);
@@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
{
}

static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
						struct page *page)
static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
						struct folio *folio)
{
}

static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
						     unsigned long nr_pages,
						     struct page *page)
						     struct folio *folio)
{
}
static inline void hugetlb_cgroup_uncharge_cgroup(int idx,
+9 −6
Original line number Diff line number Diff line
@@ -1742,10 +1742,10 @@ void free_huge_page(struct page *page)

	spin_lock_irqsave(&hugetlb_lock, flags);
	folio_clear_hugetlb_migratable(folio);
	hugetlb_cgroup_uncharge_page(hstate_index(h),
				     pages_per_huge_page(h), page);
	hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
					  pages_per_huge_page(h), page);
	hugetlb_cgroup_uncharge_folio(hstate_index(h),
				     pages_per_huge_page(h), folio);
	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
					  pages_per_huge_page(h), folio);
	if (restore_reserve)
		h->resv_huge_pages++;

@@ -2872,6 +2872,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
	struct hugepage_subpool *spool = subpool_vma(vma);
	struct hstate *h = hstate_vma(vma);
	struct page *page;
	struct folio *folio;
	long map_chg, map_commit;
	long gbl_chg;
	int ret, idx;
@@ -2935,6 +2936,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
	 * a reservation exists for the allocation.
	 */
	page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);

	if (!page) {
		spin_unlock_irq(&hugetlb_lock);
		page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
@@ -2949,6 +2951,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
		set_page_refcounted(page);
		/* Fall through */
	}
	folio = page_folio(page);
	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
	/* If allocation is not consuming a reservation, also store the
	 * hugetlb_cgroup pointer on the page.
@@ -2978,8 +2981,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
		rsv_adjust = hugepage_subpool_put_pages(spool, 1);
		hugetlb_acct_memory(h, -rsv_adjust);
		if (deferred_reserve)
			hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
					pages_per_huge_page(h), page);
			hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
					pages_per_huge_page(h), folio);
	}
	return page;

+10 −11
Original line number Diff line number Diff line
@@ -346,11 +346,10 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
/*
 * Should be called with hugetlb_lock held
 */
static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
					   struct page *page, bool rsvd)
static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
					   struct folio *folio, bool rsvd)
{
	struct hugetlb_cgroup *h_cg;
	struct folio *folio = page_folio(page);

	if (hugetlb_cgroup_disabled())
		return;
@@ -368,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
		css_put(&h_cg->css);
	else {
		unsigned long usage =
			h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
			h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
		/*
		 * This write is not atomic due to fetching usage and writing
		 * to it, but that's fine because we call this with
		 * hugetlb_lock held anyway.
		 */
		WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
		WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
			   usage - nr_pages);
	}
}

void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
				  struct page *page)
void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
				  struct folio *folio)
{
	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
}

void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
				       struct page *page)
void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
				       struct folio *folio)
{
	__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
	__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
}

static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,