Commit eb2e2b42 authored by Mel Gorman's avatar Mel Gorman Committed by Andrew Morton
Browse files

mm/page_alloc: explicitly record high-order atomic allocations in alloc_flags

A high-order ALLOC_HARDER allocation is assumed to be atomic.  While that
is accurate, it changes later in the series.  In preparation, explicitly
record high-order atomic allocations in gfp_to_alloc_flags().

Link: https://lkml.kernel.org/r/20230113111217.14134-4-mgorman@techsingularity.net


Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: NeilBrown <neilb@suse.de>
Cc: Thierry Reding <thierry.reding@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c988dcbe
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -789,6 +789,7 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
#else
#define ALLOC_NOFRAGMENT	  0x0
#endif
#define ALLOC_HIGHATOMIC	0x200 /* Allows access to MIGRATE_HIGHATOMIC */
#define ALLOC_KSWAPD		0x800 /* allow waking of kswapd, __GFP_KSWAPD_RECLAIM set */

enum ttu_flags;
+23 −6
Original line number Diff line number Diff line
@@ -3724,10 +3724,20 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
		 * reserved for high-order atomic allocation, so order-0
		 * request should skip it.
		 */
		if (order > 0 && alloc_flags & ALLOC_HARDER)
		if (alloc_flags & ALLOC_HIGHATOMIC)
			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
		if (!page) {
			page = __rmqueue(zone, order, migratetype, alloc_flags);

			/*
			 * If the allocation fails, allow OOM handling access
			 * to HIGHATOMIC reserves as failing now is worse than
			 * failing a high-order atomic allocation in the
			 * future.
			 */
			if (!page && (alloc_flags & ALLOC_OOM))
				page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);

			if (!page) {
				spin_unlock_irqrestore(&zone->lock, flags);
				return NULL;
@@ -4041,9 +4051,11 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
			return true;
		}
#endif
		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
		if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) &&
		    !free_area_empty(area, MIGRATE_HIGHATOMIC)) {
			return true;
		}
	}
	return false;
}

@@ -4304,7 +4316,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
			 * If this is a high-order atomic allocation then check
			 * if the pageblock should be reserved for the future
			 */
			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
			if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
				reserve_highatomic_pageblock(page, zone, order);

			return page;
@@ -4831,7 +4843,7 @@ static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
}

static inline unsigned int
gfp_to_alloc_flags(gfp_t gfp_mask)
gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
{
	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;

@@ -4857,8 +4869,13 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
		 * if it can't schedule.
		 */
		if (!(gfp_mask & __GFP_NOMEMALLOC))
		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
			alloc_flags |= ALLOC_HARDER;

			if (order > 0)
				alloc_flags |= ALLOC_HIGHATOMIC;
		}

		/*
		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
		 * comment for __cpuset_node_allowed().
@@ -5066,7 +5083,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
	 * kswapd needs to be woken up, and to avoid the cost of setting up
	 * alloc_flags precisely. So we do that now.
	 */
	alloc_flags = gfp_to_alloc_flags(gfp_mask);
	alloc_flags = gfp_to_alloc_flags(gfp_mask, order);

	/*
	 * We need to recalculate the starting point for the zonelist iterator