Commit 77fe7f13 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm/page_alloc: check high-order pages for corruption during PCP operations

Eric Dumazet pointed out that commit 44042b44 ("mm/page_alloc: allow
high-order pages to be stored on the per-cpu lists") only checks the
head page during PCP refill and allocation operations.  This was an
oversight and all pages should be checked.  This will incur a small
performance penalty but it's necessary for correctness.

Link: https://lkml.kernel.org/r/20220310092456.GJ15701@techsingularity.net


Fixes: 44042b44 ("mm/page_alloc: allow high-order pages to be stored on the per-cpu lists")
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Reported-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Wei Xu <weixugc@google.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3313204c
Loading
Loading
Loading
Loading
+23 −23
Original line number Diff line number Diff line
@@ -2291,23 +2291,36 @@ static inline int check_new_page(struct page *page)
	return 1;
}

static bool check_new_pages(struct page *page, unsigned int order)
{
	int i;
	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;

		if (unlikely(check_new_page(p)))
			return true;
	}

	return false;
}

#ifdef CONFIG_DEBUG_VM
/*
 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
 * also checked when pcp lists are refilled from the free lists.
 */
static inline bool check_pcp_refill(struct page *page)
static inline bool check_pcp_refill(struct page *page, unsigned int order)
{
	if (debug_pagealloc_enabled_static())
		return check_new_page(page);
		return check_new_pages(page, order);
	else
		return false;
}

static inline bool check_new_pcp(struct page *page)
static inline bool check_new_pcp(struct page *page, unsigned int order)
{
	return check_new_page(page);
	return check_new_pages(page, order);
}
#else
/*
@@ -2315,32 +2328,19 @@ static inline bool check_new_pcp(struct page *page)
 * when pcp lists are being refilled from the free lists. With debug_pagealloc
 * enabled, they are also checked when being allocated from the pcp lists.
 */
static inline bool check_pcp_refill(struct page *page)
static inline bool check_pcp_refill(struct page *page, unsigned int order)
{
	return check_new_page(page);
	return check_new_pages(page, order);
}
static inline bool check_new_pcp(struct page *page)
static inline bool check_new_pcp(struct page *page, unsigned int order)
{
	if (debug_pagealloc_enabled_static())
		return check_new_page(page);
		return check_new_pages(page, order);
	else
		return false;
}
#endif /* CONFIG_DEBUG_VM */

static bool check_new_pages(struct page *page, unsigned int order)
{
	int i;
	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;

		if (unlikely(check_new_page(p)))
			return true;
	}

	return false;
}

inline void post_alloc_hook(struct page *page, unsigned int order,
				gfp_t gfp_flags)
{
@@ -2982,7 +2982,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
		if (unlikely(page == NULL))
			break;

		if (unlikely(check_pcp_refill(page)))
		if (unlikely(check_pcp_refill(page, order)))
			continue;

		/*
@@ -3600,7 +3600,7 @@ struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
		page = list_first_entry(list, struct page, lru);
		list_del(&page->lru);
		pcp->count -= 1 << order;
	} while (check_new_pcp(page));
	} while (check_new_pcp(page, order));

	return page;
}