Commit fd56eef2 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm/page_alloc: simplify how many pages are selected per pcp list during bulk free

free_pcppages_bulk() selects pages to free by round-robining between
lists.  Originally this was to evenly shrink pages by migratetype but
uneven freeing is inevitable due to high pages.  Simplify list selection
by starting with a list that definitely has pages on it in
free_unref_page_commit() and for drain, it does not matter where
draining starts as all pages are removed.

Link: https://lkml.kernel.org/r/20220217002227.5739-4-mgorman@techsingularity.net


Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarAaron Lu <aaron.lu@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 35b6d770
Loading
Loading
Loading
Loading
+11 −23
Original line number Diff line number Diff line
@@ -1444,13 +1444,11 @@ static inline void prefetch_buddy(struct page *page, unsigned int order)
 * count is the number of pages to free.
 */
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
					struct per_cpu_pages *pcp,
					int pindex)
{
	int pindex = 0;
	int min_pindex = 0;
	int max_pindex = NR_PCP_LISTS - 1;
	int batch_free = 0;
	int nr_freed = 0;
	unsigned int order;
	int prefetch_nr = READ_ONCE(pcp->batch);
	bool isolated_pageblocks;
@@ -1464,16 +1462,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
	count = min(pcp->count, count);
	while (count > 0) {
		struct list_head *list;
		int nr_pages;

		/*
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
		 */
		/* Remove pages from lists in a round-robin fashion. */
		do {
			batch_free++;
			if (++pindex > max_pindex)
				pindex = min_pindex;
			list = &pcp->lists[pindex];
@@ -1486,18 +1478,15 @@ static void free_pcppages_bulk(struct zone *zone, int count,
				min_pindex++;
		} while (1);

		/* This is the only non-empty list. Free them all. */
		if (batch_free >= max_pindex - min_pindex)
			batch_free = count;

		order = pindex_to_order(pindex);
		nr_pages = 1 << order;
		BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH));
		do {
			page = list_last_entry(list, struct page, lru);
			/* must delete to avoid corrupting pcp list */
			list_del(&page->lru);
			nr_freed += 1 << order;
			count -= 1 << order;
			count -= nr_pages;
			pcp->count -= nr_pages;

			if (bulkfree_pcp_prepare(page))
				continue;
@@ -1521,9 +1510,8 @@ static void free_pcppages_bulk(struct zone *zone, int count,
				prefetch_buddy(page, order);
				prefetch_nr--;
			}
		} while (count > 0 && --batch_free && !list_empty(list));
		} while (count > 0 && !list_empty(list));
	}
	pcp->count -= nr_freed;

	/*
	 * local_lock_irq held so equivalent to spin_lock_irqsave for
@@ -3077,7 +3065,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
	batch = READ_ONCE(pcp->batch);
	to_drain = min(pcp->count, batch);
	if (to_drain > 0)
		free_pcppages_bulk(zone, to_drain, pcp);
		free_pcppages_bulk(zone, to_drain, pcp, 0);
	local_unlock_irqrestore(&pagesets.lock, flags);
}
#endif
@@ -3098,7 +3086,7 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone)

	pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
	if (pcp->count)
		free_pcppages_bulk(zone, pcp->count, pcp);
		free_pcppages_bulk(zone, pcp->count, pcp, 0);

	local_unlock_irqrestore(&pagesets.lock, flags);
}
@@ -3379,7 +3367,7 @@ static void free_unref_page_commit(struct page *page, int migratetype,
	if (pcp->count >= high) {
		int batch = READ_ONCE(pcp->batch);

		free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp);
		free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp, pindex);
	}
}