Loading include/linux/ksm.h +2 −2 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); #else /* !CONFIG_KSM */ Loading @@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, return page; } static inline void rmap_walk_ksm(struct page *page, static inline void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { } Loading include/linux/rmap.h +5 −6 Original line number Diff line number Diff line Loading @@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); /* * Called by memory-failure.c to kill processes. */ struct anon_vma *page_lock_anon_vma_read(struct page *page); struct anon_vma *folio_lock_anon_vma_read(struct folio *folio); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); Loading @@ -286,15 +285,15 @@ struct rmap_walk_control { * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. */ bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct page *page); struct anon_vma *(*anon_lock)(struct page *page); int (*done)(struct folio *folio); struct anon_vma *(*anon_lock)(struct folio *folio); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; void rmap_walk(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ Loading mm/damon/paddr.c +7 −8 Original line number Diff line number Diff line Loading @@ -16,10 +16,10 @@ #include "../internal.h" #include "prmtv-common.h" static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; Loading @@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr) struct page *page = damon_get_page(PHYS_PFN(paddr)); struct rmap_walk_control rwc = { .rmap_one = __damon_pa_mkold, .anon_lock = page_lock_anon_vma_read, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; Loading @@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr) if (need_lock && !folio_trylock(folio)) goto out; rmap_walk(&folio->page, &rwc); rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); Loading Loading @@ -87,10 +87,9 @@ struct damon_pa_access_chk_result { bool accessed; }; static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma, static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { struct folio *folio = page_folio(page); struct damon_pa_access_chk_result *result = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); Loading Loading @@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) struct rmap_walk_control rwc = { .arg = &result, .rmap_one = __damon_pa_young, .anon_lock = page_lock_anon_vma_read, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; Loading @@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) return NULL; } rmap_walk(&folio->page, &rwc); rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); Loading mm/folio-compat.c +0 −7 Original line number Diff line number Diff line Loading @@ -164,10 +164,3 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } #ifdef CONFIG_MMU struct anon_vma *page_lock_anon_vma_read(struct page *page) { return folio_lock_anon_vma_read(page_folio(page)); } #endif mm/huge_memory.c +1 −1 Original line number Diff line number Diff line Loading @@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a * reference to it and then lock the anon_vma for write. This * is similar to page_lock_anon_vma_read except the write lock * is similar to folio_lock_anon_vma_read except the write lock * is taken to serialise against parallel split or collapse * operations. */ Loading Loading
include/linux/ksm.h +2 −2 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); #else /* !CONFIG_KSM */ Loading @@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, return page; } static inline void rmap_walk_ksm(struct page *page, static inline void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { } Loading
include/linux/rmap.h +5 −6 Original line number Diff line number Diff line Loading @@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); /* * Called by memory-failure.c to kill processes. */ struct anon_vma *page_lock_anon_vma_read(struct page *page); struct anon_vma *folio_lock_anon_vma_read(struct folio *folio); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); Loading @@ -286,15 +285,15 @@ struct rmap_walk_control { * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. */ bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct page *page); struct anon_vma *(*anon_lock)(struct page *page); int (*done)(struct folio *folio); struct anon_vma *(*anon_lock)(struct folio *folio); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; void rmap_walk(struct page *page, struct rmap_walk_control *rwc); void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ Loading
mm/damon/paddr.c +7 −8 Original line number Diff line number Diff line Loading @@ -16,10 +16,10 @@ #include "../internal.h" #include "prmtv-common.h" static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma, static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0); DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; Loading @@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr) struct page *page = damon_get_page(PHYS_PFN(paddr)); struct rmap_walk_control rwc = { .rmap_one = __damon_pa_mkold, .anon_lock = page_lock_anon_vma_read, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; Loading @@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr) if (need_lock && !folio_trylock(folio)) goto out; rmap_walk(&folio->page, &rwc); rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); Loading Loading @@ -87,10 +87,9 @@ struct damon_pa_access_chk_result { bool accessed; }; static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma, static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg) { struct folio *folio = page_folio(page); struct damon_pa_access_chk_result *result = arg; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); Loading Loading @@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) struct rmap_walk_control rwc = { .arg = &result, .rmap_one = __damon_pa_young, .anon_lock = page_lock_anon_vma_read, .anon_lock = folio_lock_anon_vma_read, }; bool need_lock; Loading @@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) return NULL; } rmap_walk(&folio->page, &rwc); rmap_walk(folio, &rwc); if (need_lock) folio_unlock(folio); Loading
mm/folio-compat.c +0 −7 Original line number Diff line number Diff line Loading @@ -164,10 +164,3 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } #ifdef CONFIG_MMU struct anon_vma *page_lock_anon_vma_read(struct page *page) { return folio_lock_anon_vma_read(page_folio(page)); } #endif
mm/huge_memory.c +1 −1 Original line number Diff line number Diff line Loading @@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) * The caller does not necessarily hold an mmap_lock that would * prevent the anon_vma disappearing so we first we take a * reference to it and then lock the anon_vma for write. This * is similar to page_lock_anon_vma_read except the write lock * is similar to folio_lock_anon_vma_read except the write lock * is taken to serialise against parallel split or collapse * operations. */ Loading