Commit 6c037149 authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds
Browse files

hugetlb: convert PageHugeFreed to HPageFreed flag

Use new hugetlb specific HPageFreed flag to replace the PageHugeFreed
interfaces.

Link: https://lkml.kernel.org/r/20210122195231.324857-6-mike.kravetz@oracle.com


Signed-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9157c311
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -487,11 +487,13 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 *	allocator.  Typically used for migration target pages when no pages
 *	are available in the pool.  The hugetlb free page path will
 *	immediately free pages with this flag set to the buddy allocator.
 * HPG_freed - Set when page is on the free lists.
 */
enum hugetlb_page_flags {
	HPG_restore_reserve = 0,
	HPG_migratable,
	HPG_temporary,
	HPG_freed,
	__NR_HPAGEFLAGS,
};

@@ -536,6 +538,7 @@ static inline void ClearHPage##uname(struct page *page) \
HPAGEFLAG(RestoreReserve, restore_reserve)
HPAGEFLAG(Migratable, migratable)
HPAGEFLAG(Temporary, temporary)
HPAGEFLAG(Freed, freed)

#ifdef CONFIG_HUGETLB_PAGE

+4 −19
Original line number Diff line number Diff line
@@ -79,21 +79,6 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;

static inline bool PageHugeFreed(struct page *head)
{
	return page_private(head + 4) == -1UL;
}

static inline void SetPageHugeFreed(struct page *head)
{
	set_page_private(head + 4, -1UL);
}

static inline void ClearPageHugeFreed(struct page *head)
{
	set_page_private(head + 4, 0);
}

/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);

@@ -1053,7 +1038,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
	list_move(&page->lru, &h->hugepage_freelists[nid]);
	h->free_huge_pages++;
	h->free_huge_pages_node[nid]++;
	SetPageHugeFreed(page);
	SetHPageFreed(page);
}

static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1070,7 +1055,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)

		list_move(&page->lru, &h->hugepage_activelist);
		set_page_refcounted(page);
		ClearPageHugeFreed(page);
		ClearHPageFreed(page);
		h->free_huge_pages--;
		h->free_huge_pages_node[nid]--;
		return page;
@@ -1485,7 +1470,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
	spin_lock(&hugetlb_lock);
	h->nr_huge_pages++;
	h->nr_huge_pages_node[nid]++;
	ClearPageHugeFreed(page);
	ClearHPageFreed(page);
	spin_unlock(&hugetlb_lock);
}

@@ -1756,7 +1741,7 @@ int dissolve_free_huge_page(struct page *page)
		 * We should make sure that the page is already on the free list
		 * when it is dissolved.
		 */
		if (unlikely(!PageHugeFreed(head))) {
		if (unlikely(!HPageFreed(head))) {
			spin_unlock(&hugetlb_lock);
			cond_resched();