Commit 4aed23a2 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

mm/page_idle: Convert page_idle_clear_pte_refs() to use a folio



The PG_idle and PG_young bits are ignored if they're set on tail
pages, so ensure we're passing a folio around.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2aff7a47
Loading
Loading
Loading
Loading
+12 −9
Original line number Diff line number Diff line
@@ -13,6 +13,8 @@
#include <linux/page_ext.h>
#include <linux/page_idle.h>

#include "internal.h"

#define BITMAP_CHUNK_SIZE	sizeof(u64)
#define BITMAP_CHUNK_BITS	(BITMAP_CHUNK_SIZE * BITS_PER_BYTE)

@@ -48,7 +50,8 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
					struct vm_area_struct *vma,
					unsigned long addr, void *arg)
{
	DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
	struct folio *folio = page_folio(page);
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
	bool referenced = false;

	while (page_vma_mapped_walk(&pvmw)) {
@@ -70,19 +73,20 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
	}

	if (referenced) {
		clear_page_idle(page);
		folio_clear_idle(folio);
		/*
		 * We cleared the referenced bit in a mapping to this page. To
		 * avoid interference with page reclaim, mark it young so that
		 * page_referenced() will return > 0.
		 */
		set_page_young(page);
		folio_set_young(folio);
	}
	return true;
}

static void page_idle_clear_pte_refs(struct page *page)
{
	struct folio *folio = page_folio(page);
	/*
	 * Since rwc.arg is unused, rwc is effectively immutable, so we
	 * can make it static const to save some cycles and stack.
@@ -93,18 +97,17 @@ static void page_idle_clear_pte_refs(struct page *page)
	};
	bool need_lock;

	if (!page_mapped(page) ||
	    !page_rmapping(page))
	if (!folio_mapped(folio) || !folio_raw_mapping(folio))
		return;

	need_lock = !PageAnon(page) || PageKsm(page);
	if (need_lock && !trylock_page(page))
	need_lock = !folio_test_anon(folio) || folio_test_ksm(folio);
	if (need_lock && !folio_trylock(folio))
		return;

	rmap_walk(page, (struct rmap_walk_control *)&rwc);
	rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);

	if (need_lock)
		unlock_page(page);
		folio_unlock(folio);
}

static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,