Commit ab130f91 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds
Browse files

mm: rename page_order() to buddy_order()



The current page_order() can only be called on pages in the buddy
allocator.  For compound pages, you have to use compound_order().  This is
confusing and led to a bug, so rename page_order() to buddy_order().

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Link: https://lkml.kernel.org/r/20201001152259.14932-2-willy@infradead.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1f0f8c0d
Loading
Loading
Loading
Loading
+3 −3
Original line number Original line Diff line number Diff line
@@ -625,7 +625,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
		}
		}


		/* Found a free page, will break it into order-0 pages */
		/* Found a free page, will break it into order-0 pages */
		order = page_order(page);
		order = buddy_order(page);
		isolated = __isolate_free_page(page, order);
		isolated = __isolate_free_page(page, order);
		if (!isolated)
		if (!isolated)
			break;
			break;
@@ -898,7 +898,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
		 * potential isolation targets.
		 * potential isolation targets.
		 */
		 */
		if (PageBuddy(page)) {
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);
			unsigned long freepage_order = buddy_order_unsafe(page);


			/*
			/*
			 * Without lock, we cannot be sure that what we got is
			 * Without lock, we cannot be sure that what we got is
@@ -1172,7 +1172,7 @@ static bool suitable_migration_target(struct compact_control *cc,
		 * the only small danger is that we skip a potentially suitable
		 * the only small danger is that we skip a potentially suitable
		 * pageblock, so it's not worth to check order for valid range.
		 * pageblock, so it's not worth to check order for valid range.
		 */
		 */
		if (page_order_unsafe(page) >= pageblock_order)
		if (buddy_order_unsafe(page) >= pageblock_order)
			return false;
			return false;
	}
	}


+4 −4
Original line number Original line Diff line number Diff line
@@ -270,16 +270,16 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
 * page from being allocated in parallel and returning garbage as the order.
 * page from being allocated in parallel and returning garbage as the order.
 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
 * page cannot be allocated or merged in parallel. Alternatively, it must
 * page cannot be allocated or merged in parallel. Alternatively, it must
 * handle invalid values gracefully, and use page_order_unsafe() below.
 * handle invalid values gracefully, and use buddy_order_unsafe() below.
 */
 */
static inline unsigned int page_order(struct page *page)
static inline unsigned int buddy_order(struct page *page)
{
{
	/* PageBuddy() must be checked by the caller */
	/* PageBuddy() must be checked by the caller */
	return page_private(page);
	return page_private(page);
}
}


/*
/*
 * Like page_order(), but for callers who cannot afford to hold the zone lock.
 * Like buddy_order(), but for callers who cannot afford to hold the zone lock.
 * PageBuddy() should be checked first by the caller to minimize race window,
 * PageBuddy() should be checked first by the caller to minimize race window,
 * and invalid values must be handled gracefully.
 * and invalid values must be handled gracefully.
 *
 *
@@ -289,7 +289,7 @@ static inline unsigned int page_order(struct page *page)
 * times, potentially observing different values in the tests and the actual
 * times, potentially observing different values in the tests and the actual
 * use of the result.
 * use of the result.
 */
 */
#define page_order_unsafe(page)		READ_ONCE(page_private(page))
#define buddy_order_unsafe(page)	READ_ONCE(page_private(page))


static inline bool is_cow_mapping(vm_flags_t flags)
static inline bool is_cow_mapping(vm_flags_t flags)
{
{
+15 −15
Original line number Original line Diff line number Diff line
@@ -792,7 +792,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
				unsigned int order, int migratetype) {}
#endif
#endif


static inline void set_page_order(struct page *page, unsigned int order)
static inline void set_buddy_order(struct page *page, unsigned int order)
{
{
	set_page_private(page, order);
	set_page_private(page, order);
	__SetPageBuddy(page);
	__SetPageBuddy(page);
@@ -817,7 +817,7 @@ static inline bool page_is_buddy(struct page *page, struct page *buddy,
	if (!page_is_guard(buddy) && !PageBuddy(buddy))
	if (!page_is_guard(buddy) && !PageBuddy(buddy))
		return false;
		return false;


	if (page_order(buddy) != order)
	if (buddy_order(buddy) != order)
		return false;
		return false;


	/*
	/*
@@ -1059,7 +1059,7 @@ static inline void __free_one_page(struct page *page,
	}
	}


done_merging:
done_merging:
	set_page_order(page, order);
	set_buddy_order(page, order);


	if (fpi_flags & FPI_TO_TAIL)
	if (fpi_flags & FPI_TO_TAIL)
		to_tail = true;
		to_tail = true;
@@ -2178,7 +2178,7 @@ static inline void expand(struct zone *zone, struct page *page,
			continue;
			continue;


		add_to_free_list(&page[size], zone, high, migratetype);
		add_to_free_list(&page[size], zone, high, migratetype);
		set_page_order(&page[size], high);
		set_buddy_order(&page[size], high);
	}
	}
}
}


@@ -2392,7 +2392,7 @@ static int move_freepages(struct zone *zone,
		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
		VM_BUG_ON_PAGE(page_zone(page) != zone, page);
		VM_BUG_ON_PAGE(page_zone(page) != zone, page);


		order = page_order(page);
		order = buddy_order(page);
		move_to_free_list(page, zone, order, migratetype);
		move_to_free_list(page, zone, order, migratetype);
		page += 1 << order;
		page += 1 << order;
		pages_moved += 1 << order;
		pages_moved += 1 << order;
@@ -2516,7 +2516,7 @@ static inline void boost_watermark(struct zone *zone)
static void steal_suitable_fallback(struct zone *zone, struct page *page,
static void steal_suitable_fallback(struct zone *zone, struct page *page,
		unsigned int alloc_flags, int start_type, bool whole_block)
		unsigned int alloc_flags, int start_type, bool whole_block)
{
{
	unsigned int current_order = page_order(page);
	unsigned int current_order = buddy_order(page);
	int free_pages, movable_pages, alike_pages;
	int free_pages, movable_pages, alike_pages;
	int old_block_type;
	int old_block_type;


@@ -8344,7 +8344,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page,
		 */
		 */
		if (!page_ref_count(page)) {
		if (!page_ref_count(page)) {
			if (PageBuddy(page))
			if (PageBuddy(page))
				iter += (1 << page_order(page)) - 1;
				iter += (1 << buddy_order(page)) - 1;
			continue;
			continue;
		}
		}


@@ -8557,7 +8557,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
	}
	}


	if (outer_start != start) {
	if (outer_start != start) {
		order = page_order(pfn_to_page(outer_start));
		order = buddy_order(pfn_to_page(outer_start));


		/*
		/*
		 * outer_start page could be small order buddy page and
		 * outer_start page could be small order buddy page and
@@ -8782,7 +8782,7 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)


		BUG_ON(page_count(page));
		BUG_ON(page_count(page));
		BUG_ON(!PageBuddy(page));
		BUG_ON(!PageBuddy(page));
		order = page_order(page);
		order = buddy_order(page);
		del_page_from_free_list(page, zone, order);
		del_page_from_free_list(page, zone, order);
		pfn += (1 << order);
		pfn += (1 << order);
	}
	}
@@ -8801,7 +8801,7 @@ bool is_free_buddy_page(struct page *page)
	for (order = 0; order < MAX_ORDER; order++) {
	for (order = 0; order < MAX_ORDER; order++) {
		struct page *page_head = page - (pfn & ((1 << order) - 1));
		struct page *page_head = page - (pfn & ((1 << order) - 1));


		if (PageBuddy(page_head) && page_order(page_head) >= order)
		if (PageBuddy(page_head) && buddy_order(page_head) >= order)
			break;
			break;
	}
	}
	spin_unlock_irqrestore(&zone->lock, flags);
	spin_unlock_irqrestore(&zone->lock, flags);
@@ -8838,7 +8838,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,


		if (current_buddy != target) {
		if (current_buddy != target) {
			add_to_free_list(current_buddy, zone, high, migratetype);
			add_to_free_list(current_buddy, zone, high, migratetype);
			set_page_order(current_buddy, high);
			set_buddy_order(current_buddy, high);
			page = next_page;
			page = next_page;
		}
		}
	}
	}
@@ -8858,16 +8858,16 @@ bool take_page_off_buddy(struct page *page)
	spin_lock_irqsave(&zone->lock, flags);
	spin_lock_irqsave(&zone->lock, flags);
	for (order = 0; order < MAX_ORDER; order++) {
	for (order = 0; order < MAX_ORDER; order++) {
		struct page *page_head = page - (pfn & ((1 << order) - 1));
		struct page *page_head = page - (pfn & ((1 << order) - 1));
		int buddy_order = page_order(page_head);
		int page_order = buddy_order(page_head);


		if (PageBuddy(page_head) && buddy_order >= order) {
		if (PageBuddy(page_head) && page_order >= order) {
			unsigned long pfn_head = page_to_pfn(page_head);
			unsigned long pfn_head = page_to_pfn(page_head);
			int migratetype = get_pfnblock_migratetype(page_head,
			int migratetype = get_pfnblock_migratetype(page_head,
								   pfn_head);
								   pfn_head);


			del_page_from_free_list(page_head, zone, buddy_order);
			del_page_from_free_list(page_head, zone, page_order);
			break_down_buddy_pages(zone, page_head, page, 0,
			break_down_buddy_pages(zone, page_head, page, 0,
						buddy_order, migratetype);
						page_order, migratetype);
			ret = true;
			ret = true;
			break;
			break;
		}
		}
+2 −2
Original line number Original line Diff line number Diff line
@@ -88,7 +88,7 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
	 * these pages to be merged.
	 * these pages to be merged.
	 */
	 */
	if (PageBuddy(page)) {
	if (PageBuddy(page)) {
		order = page_order(page);
		order = buddy_order(page);
		if (order >= pageblock_order) {
		if (order >= pageblock_order) {
			pfn = page_to_pfn(page);
			pfn = page_to_pfn(page);
			buddy_pfn = __find_buddy_pfn(pfn, order);
			buddy_pfn = __find_buddy_pfn(pfn, order);
@@ -261,7 +261,7 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn,
			 * the correct MIGRATE_ISOLATE freelist. There is no
			 * the correct MIGRATE_ISOLATE freelist. There is no
			 * simple way to verify that as VM_BUG_ON(), though.
			 * simple way to verify that as VM_BUG_ON(), though.
			 */
			 */
			pfn += 1 << page_order(page);
			pfn += 1 << buddy_order(page);
		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
		else if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
			/* A HWPoisoned page cannot be also PageBuddy */
			/* A HWPoisoned page cannot be also PageBuddy */
			pfn++;
			pfn++;
+3 −3
Original line number Original line Diff line number Diff line
@@ -295,7 +295,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
			if (PageBuddy(page)) {
			if (PageBuddy(page)) {
				unsigned long freepage_order;
				unsigned long freepage_order;


				freepage_order = page_order_unsafe(page);
				freepage_order = buddy_order_unsafe(page);
				if (freepage_order < MAX_ORDER)
				if (freepage_order < MAX_ORDER)
					pfn += (1UL << freepage_order) - 1;
					pfn += (1UL << freepage_order) - 1;
				continue;
				continue;
@@ -490,7 +490,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)


		page = pfn_to_page(pfn);
		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);
			unsigned long freepage_order = buddy_order_unsafe(page);


			if (freepage_order < MAX_ORDER)
			if (freepage_order < MAX_ORDER)
				pfn += (1UL << freepage_order) - 1;
				pfn += (1UL << freepage_order) - 1;
@@ -584,7 +584,7 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
			 * heavy lock contention.
			 * heavy lock contention.
			 */
			 */
			if (PageBuddy(page)) {
			if (PageBuddy(page)) {
				unsigned long order = page_order_unsafe(page);
				unsigned long order = buddy_order_unsafe(page);


				if (order > 0 && order < MAX_ORDER)
				if (order > 0 && order < MAX_ORDER)
					pfn += (1UL << order) - 1;
					pfn += (1UL << order) - 1;
Loading