Commit 738fe30d authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Liu Shixin
Browse files

mm: turn folio_test_hugetlb into a PageType

stable inclusion
from stable-v6.6.30
commit 2431b5f2650dfc47ce782d1ca7b02d6b3916976f
category: cleanup
bugzilla: https://gitee.com/openeuler/kernel/issues/I9NYY7
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=2431b5f2650dfc47ce782d1ca7b02d6b3916976f

--------------------------------

commit d99e3140a4d33e26066183ff727d8f02f56bec64 upstream.

The current folio_test_hugetlb() can be fooled by a concurrent folio split
into returning true for a folio which has never belonged to hugetlbfs.
This can't happen if the caller holds a refcount on it, but we have a few
places (memory-failure, compaction, procfs) which do not and should not
take a speculative reference.

Since hugetlb pages do not use individual page mapcounts (they are always
fully mapped and use the entire_mapcount field to record the number of
mappings), the PageType field is available now that page_mapcount()
ignores the value in this field.

In compaction and with CONFIG_DEBUG_VM enabled, the current implementation
can result in an oops, as reported by Luis. This happens since 9c5ccf2d
("mm: remove HUGETLB_PAGE_DTOR") effectively added some VM_BUG_ON() checks
in the PageHuge() testing path.

[willy@infradead.org: update vmcoreinfo]
  Link: https://lkml.kernel.org/r/ZgGZUvsdhaT1Va-T@casper.infradead.org
Link: https://lkml.kernel.org/r/20240321142448.1645400-6-willy@infradead.org


Fixes: 9c5ccf2d ("mm: remove HUGETLB_PAGE_DTOR")
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reported-by: default avatarLuis Chamberlain <mcgrof@kernel.org>
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218227


Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Conflicts:
	include/linux/page-flags.h
[ Conflict with commit aeb5fd3e since it add a page_type PG_dpool with
  value 0x800 which is same as PG_hugetlb. Change PG_dpool to 0x10000. ]
Signed-off-by: default avatarLiu Shixin <liushixin2@huawei.com>
parent 75510e3e
Loading
Loading
Loading
Loading
+34 −38
Original line number Diff line number Diff line
@@ -193,7 +193,6 @@ enum pageflags {

	/* At least one page in this folio has the hwpoison flag set */
	PG_has_hwpoisoned = PG_error,
	PG_hugetlb = PG_active,
	PG_large_rmappable = PG_workingset, /* anon or file-backed */
};

@@ -848,29 +847,6 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)

#define PG_head_mask ((1UL << PG_head))

#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)

/**
 * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
 * @folio: The folio to test.
 *
 * Context: Any context.  Caller should have a reference on the folio to
 * prevent it from being turned into a tail page.
 * Return: True for hugetlbfs folios, false for anon folios or folios
 * belonging to other filesystems.
 */
static inline bool folio_test_hugetlb(struct folio *folio)
{
	return folio_test_large(folio) &&
		test_bit(PG_hugetlb, folio_flags(folio, 1));
}
#else
TESTPAGEFLAG_FALSE(Huge, hugetlb)
#endif

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
 * PageHuge() only returns true for hugetlbfs pages, but not for
@@ -926,18 +902,6 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
	TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned)
#endif

/*
 * Check if a page is currently marked HWPoisoned. Note that this check is
 * best effort only and inherently racy: there is no way to synchronize with
 * failing hardware.
 */
static inline bool is_page_hwpoison(struct page *page)
{
	if (PageHWPoison(page))
		return true;
	return PageHuge(page) && PageHWPoison(compound_head(page));
}

/*
 * For pages that are never mapped to userspace (and aren't PageSlab),
 * page_type may be used.  Because it is initialised to -1, we invert the
@@ -954,8 +918,9 @@ static inline bool is_page_hwpoison(struct page *page)
#define PG_offline	0x00000100
#define PG_table	0x00000200
#define PG_guard	0x00000400
#define PG_hugetlb	0x00000800
#ifdef CONFIG_DYNAMIC_POOL
#define PG_dpool	0x00000800
#define PG_dpool	0x00010000
#endif

#define PageType(page, flag)						\
@@ -1058,6 +1023,37 @@ PAGE_TYPE_OPS(Guard, guard, guard)
PAGE_TYPE_OPS(Dpool, dpool, dpool)
#endif

#ifdef CONFIG_HUGETLB_PAGE
FOLIO_TYPE_OPS(hugetlb, hugetlb)
#else
FOLIO_TEST_FLAG_FALSE(hugetlb)
#endif

/**
 * PageHuge - Determine if the page belongs to hugetlbfs
 * @page: The page to test.
 *
 * Context: Any context.
 * Return: True for hugetlbfs pages, false for anon pages or pages
 * belonging to other filesystems.
 */
static inline bool PageHuge(const struct page *page)
{
	return folio_test_hugetlb(page_folio(page));
}

/*
 * Check if a page is currently marked HWPoisoned. Note that this check is
 * best effort only and inherently racy: there is no way to synchronize with
 * failing hardware.
 */
static inline bool is_page_hwpoison(struct page *page)
{
	if (PageHWPoison(page))
		return true;
	return PageHuge(page) && PageHWPoison(compound_head(page));
}

extern bool is_free_buddy_page(struct page *page);

PAGEFLAG(Isolated, isolated, PF_ANY);
@@ -1124,7 +1120,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
 */
#define PAGE_FLAGS_SECOND						\
	(0xffUL /* order */		| 1UL << PG_has_hwpoisoned |	\
	 1UL << PG_hugetlb		| 1UL << PG_large_rmappable)
	 1UL << PG_large_rmappable)

#define PAGE_FLAGS_PRIVATE				\
	(1UL << PG_private | 1UL << PG_private_2)
+1 −0
Original line number Diff line number Diff line
@@ -143,6 +143,7 @@ IF_HAVE_PG_ARCH_X(arch_3)
#define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }

#define __def_pagetype_names						\
	DEF_PAGETYPE_NAME(hugetlb),					\
	DEF_PAGETYPE_NAME(offline),					\
	DEF_PAGETYPE_NAME(guard),					\
	DEF_PAGETYPE_NAME(table),					\
+2 −3
Original line number Diff line number Diff line
@@ -673,11 +673,10 @@ static int __init crash_save_vmcoreinfo_init(void)
	VMCOREINFO_NUMBER(PG_head_mask);
#define PAGE_BUDDY_MAPCOUNT_VALUE	(~PG_buddy)
	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
#ifdef CONFIG_HUGETLB_PAGE
	VMCOREINFO_NUMBER(PG_hugetlb);
#define PAGE_HUGETLB_MAPCOUNT_VALUE	(~PG_hugetlb)
	VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
#define PAGE_OFFLINE_MAPCOUNT_VALUE	(~PG_offline)
	VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
#endif

#ifdef CONFIG_KALLSYMS
	VMCOREINFO_SYMBOL(kallsyms_names);
+3 −19
Original line number Diff line number Diff line
@@ -1632,7 +1632,7 @@ static inline void __clear_hugetlb_destructor(struct hstate *h,
{
	lockdep_assert_held(&hugetlb_lock);

	folio_clear_hugetlb(folio);
	__folio_clear_hugetlb(folio);
}

/*
@@ -1719,7 +1719,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
		h->surplus_huge_pages_node[nid]++;
	}

	folio_set_hugetlb(folio);
	__folio_set_hugetlb(folio);
	folio_change_private(folio, NULL);
	/*
	 * We have to set hugetlb_vmemmap_optimized again as above
@@ -1981,7 +1981,7 @@ void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
{
	hugetlb_vmemmap_optimize(h, &folio->page);
	INIT_LIST_HEAD(&folio->lru);
	folio_set_hugetlb(folio);
	__folio_set_hugetlb(folio);
	hugetlb_set_folio_subpool(folio, NULL);
	set_hugetlb_cgroup(folio, NULL);
	set_hugetlb_cgroup_rsvd(folio, NULL);
@@ -2084,22 +2084,6 @@ bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
	return __prep_compound_gigantic_folio(folio, order, true);
}

/*
 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
 * transparent huge pages.  See the PageTransHuge() documentation for more
 * details.
 */
int PageHuge(struct page *page)
{
	struct folio *folio;

	if (!PageCompound(page))
		return 0;
	folio = page_folio(page);
	return folio_test_hugetlb(folio);
}
EXPORT_SYMBOL_GPL(PageHuge);

/*
 * Find and lock address space (mapping) in write mode.
 *