Commit b3ac0413 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle)
Browse files

mm/rmap: Turn page_referenced() into folio_referenced()



Both its callers pass a page which was previously on an LRU list,
so were passing a folio by definition.  Use the type system to enforce
that and remove a few calls to compound_head().

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent dcc5d337
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -190,7 +190,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
/*
 * Called from mm/vmscan.c to handle paging out
 */
int page_referenced(struct page *, int is_locked,
int folio_referenced(struct folio *, int is_locked,
			struct mem_cgroup *memcg, unsigned long *vm_flags);

void try_to_migrate(struct page *page, enum ttu_flags flags);
@@ -301,7 +301,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#define anon_vma_prepare(vma)	(0)
#define anon_vma_link(vma)	do {} while (0)

static inline int page_referenced(struct page *page, int is_locked,
static inline int folio_referenced(struct folio *folio, int is_locked,
				  struct mem_cgroup *memcg,
				  unsigned long *vm_flags)
{
+1 −1
Original line number Diff line number Diff line
@@ -77,7 +77,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
		/*
		 * We cleared the referenced bit in a mapping to this page. To
		 * avoid interference with page reclaim, mark it young so that
		 * page_referenced() will return > 0.
		 * folio_referenced() will return > 0.
		 */
		folio_set_young(folio);
	}
+35 −35
Original line number Diff line number Diff line
@@ -789,29 +789,30 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
	return pmd;
}

struct page_referenced_arg {
struct folio_referenced_arg {
	int mapcount;
	int referenced;
	unsigned long vm_flags;
	struct mem_cgroup *memcg;
};
/*
 * arg: page_referenced_arg will be passed
 * arg: folio_referenced_arg will be passed
 */
static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma,
			unsigned long address, void *arg)
{
	struct page_referenced_arg *pra = arg;
	DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0);
	struct folio *folio = page_folio(page);
	struct folio_referenced_arg *pra = arg;
	DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
	int referenced = 0;

	while (page_vma_mapped_walk(&pvmw)) {
		address = pvmw.address;

		if ((vma->vm_flags & VM_LOCKED) &&
		    (!PageTransCompound(page) || !pvmw.pte)) {
		    (!folio_test_large(folio) || !pvmw.pte)) {
			/* Restore the mlock which got missed */
			mlock_vma_page(page, vma, !pvmw.pte);
			mlock_vma_folio(folio, vma, !pvmw.pte);
			page_vma_mapped_walk_done(&pvmw);
			pra->vm_flags |= VM_LOCKED;
			return false; /* To break the loop */
@@ -823,10 +824,10 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
				/*
				 * Don't treat a reference through
				 * a sequentially read mapping as such.
				 * If the page has been used in another mapping,
				 * If the folio has been used in another mapping,
				 * we will catch it; if this other mapping is
				 * already gone, the unmap path will have set
				 * PG_referenced or activated the page.
				 * the referenced flag or activated the folio.
				 */
				if (likely(!(vma->vm_flags & VM_SEQ_READ)))
					referenced++;
@@ -836,7 +837,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
						pvmw.pmd))
				referenced++;
		} else {
			/* unexpected pmd-mapped page? */
			/* unexpected pmd-mapped folio? */
			WARN_ON_ONCE(1);
		}

@@ -844,8 +845,8 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
	}

	if (referenced)
		clear_page_idle(page);
	if (test_and_clear_page_young(page))
		folio_clear_idle(folio);
	if (folio_test_clear_young(folio))
		referenced++;

	if (referenced) {
@@ -859,9 +860,9 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
	return true;
}

static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
{
	struct page_referenced_arg *pra = arg;
	struct folio_referenced_arg *pra = arg;
	struct mem_cgroup *memcg = pra->memcg;

	if (!mm_match_cgroup(vma->vm_mm, memcg))
@@ -871,27 +872,26 @@ static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
}

/**
 * page_referenced - test if the page was referenced
 * @page: the page to test
 * @is_locked: caller holds lock on the page
 * folio_referenced() - Test if the folio was referenced.
 * @folio: The folio to test.
 * @is_locked: Caller holds lock on the folio.
 * @memcg: target memory cgroup
 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
 * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
 *
 * Quick test_and_clear_referenced for all mappings to a page,
 * returns the number of ptes which referenced the page.
 * Quick test_and_clear_referenced for all mappings of a folio,
 *
 * Return: The number of mappings which referenced the folio.
 */
int page_referenced(struct page *page,
		    int is_locked,
		    struct mem_cgroup *memcg,
		    unsigned long *vm_flags)
int folio_referenced(struct folio *folio, int is_locked,
		     struct mem_cgroup *memcg, unsigned long *vm_flags)
{
	int we_locked = 0;
	struct page_referenced_arg pra = {
		.mapcount = total_mapcount(page),
	struct folio_referenced_arg pra = {
		.mapcount = folio_mapcount(folio),
		.memcg = memcg,
	};
	struct rmap_walk_control rwc = {
		.rmap_one = page_referenced_one,
		.rmap_one = folio_referenced_one,
		.arg = (void *)&pra,
		.anon_lock = page_lock_anon_vma_read,
	};
@@ -900,11 +900,11 @@ int page_referenced(struct page *page,
	if (!pra.mapcount)
		return 0;

	if (!page_rmapping(page))
	if (!folio_raw_mapping(folio))
		return 0;

	if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
		we_locked = trylock_page(page);
	if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
		we_locked = folio_trylock(folio);
		if (!we_locked)
			return 1;
	}
@@ -915,14 +915,14 @@ int page_referenced(struct page *page,
	 * cgroups
	 */
	if (memcg) {
		rwc.invalid_vma = invalid_page_referenced_vma;
		rwc.invalid_vma = invalid_folio_referenced_vma;
	}

	rmap_walk(page, &rwc);
	rmap_walk(&folio->page, &rwc);
	*vm_flags = pra.vm_flags;

	if (we_locked)
		unlock_page(page);
		folio_unlock(folio);

	return pra.referenced;
}
@@ -1052,8 +1052,8 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
	/*
	 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
	 * simultaneously, so a concurrent reader (eg page_referenced()'s
	 * PageAnon()) will not see one without the other.
	 * simultaneously, so a concurrent reader (eg folio_referenced()'s
	 * folio_test_anon()) will not see one without the other.
	 */
	WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
}
+12 −8
Original line number Diff line number Diff line
@@ -1386,10 +1386,11 @@ enum page_references {
static enum page_references page_check_references(struct page *page,
						  struct scan_control *sc)
{
	struct folio *folio = page_folio(page);
	int referenced_ptes, referenced_page;
	unsigned long vm_flags;

	referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
	referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
					   &vm_flags);
	referenced_page = TestClearPageReferenced(page);

@@ -2490,7 +2491,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 *
 * If the pages are mostly unmapped, the processing is fast and it is
 * appropriate to hold lru_lock across the whole operation.  But if
 * the pages are mapped, the processing is slow (page_referenced()), so
 * the pages are mapped, the processing is slow (folio_referenced()), so
 * we should drop lru_lock around each page.  It's impossible to balance
 * this, so instead we remove the pages from the LRU while processing them.
 * It is safe to rely on PG_active against the non-LRU pages in here because
@@ -2510,7 +2511,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
	LIST_HEAD(l_hold);	/* The pages which were snipped off */
	LIST_HEAD(l_active);
	LIST_HEAD(l_inactive);
	struct page *page;
	unsigned nr_deactivate, nr_activate;
	unsigned nr_rotated = 0;
	int file = is_file_lru(lru);
@@ -2532,9 +2532,13 @@ static void shrink_active_list(unsigned long nr_to_scan,
	spin_unlock_irq(&lruvec->lru_lock);

	while (!list_empty(&l_hold)) {
		struct folio *folio;
		struct page *page;

		cond_resched();
		page = lru_to_page(&l_hold);
		list_del(&page->lru);
		folio = lru_to_folio(&l_hold);
		list_del(&folio->lru);
		page = &folio->page;

		if (unlikely(!page_evictable(page))) {
			putback_lru_page(page);
@@ -2549,7 +2553,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
			}
		}

		if (page_referenced(page, 0, sc->target_mem_cgroup,
		if (folio_referenced(folio, 0, sc->target_mem_cgroup,
				     &vm_flags)) {
			/*
			 * Identify referenced, file-backed active pages and