Commit 5a9e3474 authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton
Browse files

mm/swap: convert deactivate_page() to folio_deactivate()

Deactivate_page() has already been converted to use folios, this change
converts it to take in a folio argument instead of calling page_folio(). 
It also renames the function folio_deactivate() to be more consistent with
other folio functions.

[akpm@linux-foundation.org: fix left-over comments, per Yu Zhao]
Link: https://lkml.kernel.org/r/20221221180848.20774-5-vishal.moola@gmail.com


Signed-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarSeongJae Park <sj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f70da5ee
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -401,7 +401,7 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
extern void deactivate_page(struct page *page);
void folio_deactivate(struct folio *folio);
void folio_mark_lazyfree(struct folio *folio);
extern void swap_setup(void);

+1 −1
Original line number Diff line number Diff line
@@ -297,7 +297,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
		if (mark_accessed)
			folio_mark_accessed(folio);
		else
			deactivate_page(&folio->page);
			folio_deactivate(folio);
		folio_put(folio);
		applied += folio_nr_pages(folio);
	}
+2 −2
Original line number Diff line number Diff line
@@ -416,7 +416,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
					list_add(&folio->lru, &folio_list);
			}
		} else
			deactivate_page(&folio->page);
			folio_deactivate(folio);
huge_unlock:
		spin_unlock(ptl);
		if (pageout)
@@ -510,7 +510,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
					list_add(&folio->lru, &folio_list);
			}
		} else
			deactivate_page(&folio->page);
			folio_deactivate(folio);
	}

	arch_leave_lazy_mmu_mode();
+2 −2
Original line number Diff line number Diff line
@@ -2846,11 +2846,11 @@ bool folio_mark_dirty(struct folio *folio)

	if (likely(mapping)) {
		/*
		 * readahead/lru_deactivate_page could remain
		 * readahead/folio_deactivate could remain
		 * PG_readahead/PG_reclaim due to race with folio_end_writeback
		 * About readahead, if the folio is written, the flags would be
		 * reset. So no problem.
		 * About lru_deactivate_page, if the folio is redirtied,
		 * About folio_deactivate, if the folio is redirtied,
		 * the flag will be reset. So no problem. but if the
		 * folio is used by readahead it will confuse readahead
		 * and make it restart the size rampup process. But it's
+6 −8
Original line number Diff line number Diff line
@@ -733,17 +733,15 @@ void deactivate_file_folio(struct folio *folio)
}

/*
 * deactivate_page - deactivate a page
 * @page: page to deactivate
 * folio_deactivate - deactivate a folio
 * @folio: folio to deactivate
 *
 * deactivate_page() moves @page to the inactive list if @page was on the active
 * list and was not an unevictable page.  This is done to accelerate the reclaim
 * of @page.
 * folio_deactivate() moves @folio to the inactive list if @folio was on the
 * active list and was not unevictable. This is done to accelerate the
 * reclaim of @folio.
 */
void deactivate_page(struct page *page)
void folio_deactivate(struct folio *folio)
{
	struct folio *folio = page_folio(page);

	if (folio_test_lru(folio) && !folio_test_unevictable(folio) &&
	    (folio_test_active(folio) || lru_gen_enabled())) {
		struct folio_batch *fbatch;
Loading