Commit c7c3dec1 authored by Johannes Weiner's avatar Johannes Weiner Committed by Andrew Morton
Browse files

mm: rmap: remove lock_page_memcg()

The previous patch made sure charge moving only touches pages for which
page_mapped() is stable.  lock_page_memcg() is no longer needed.

Link: https://lkml.kernel.org/r/20221206171340.139790-3-hannes@cmpxchg.org


Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 4e0cf05f
Loading
Loading
Loading
Loading
+8 −18
Original line number Diff line number Diff line
@@ -1222,9 +1222,6 @@ void page_add_anon_rmap(struct page *page,
	bool compound = flags & RMAP_COMPOUND;
	bool first = true;

	if (unlikely(PageKsm(page)))
		lock_page_memcg(page);

	/* Is page being mapped by PTE? Is this its first map to be added? */
	if (likely(!compound)) {
		first = atomic_inc_and_test(&page->_mapcount);
@@ -1262,15 +1259,14 @@ void page_add_anon_rmap(struct page *page,
	if (nr)
		__mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);

	if (unlikely(PageKsm(page)))
		unlock_page_memcg(page);

	if (likely(!PageKsm(page))) {
		/* address might be in next vma when migration races vma_adjust */
	else if (first)
		if (first)
			__page_set_anon_rmap(page, vma, address,
					     !!(flags & RMAP_EXCLUSIVE));
		else
			__page_check_anon_rmap(page, vma, address);
	}

	mlock_vma_page(page, vma, compound);
}
@@ -1329,7 +1325,6 @@ void page_add_file_rmap(struct page *page,
	bool first;

	VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
	lock_page_memcg(page);

	/* Is page being mapped by PTE? Is this its first map to be added? */
	if (likely(!compound)) {
@@ -1365,7 +1360,6 @@ void page_add_file_rmap(struct page *page,
			NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
	if (nr)
		__mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
	unlock_page_memcg(page);

	mlock_vma_page(page, vma, compound);
}
@@ -1394,8 +1388,6 @@ void page_remove_rmap(struct page *page,
		return;
	}

	lock_page_memcg(page);

	/* Is page being unmapped by PTE? Is this its last map to be removed? */
	if (likely(!compound)) {
		last = atomic_add_negative(-1, &page->_mapcount);
@@ -1451,8 +1443,6 @@ void page_remove_rmap(struct page *page,
	 * and remember that it's only reliable while mapped.
	 */

	unlock_page_memcg(page);

	munlock_vma_page(page, vma, compound);
}