Commit 6c77b607 authored by Kefeng Wang's avatar Kefeng Wang Committed by Andrew Morton
Browse files

mm: kill lock|unlock_page_memcg()

Since commit c7c3dec1 ("mm: rmap: remove lock_page_memcg()"),
no more user, kill lock_page_memcg() and unlock_page_memcg().

Link: https://lkml.kernel.org/r/20230614143612.62575-1-wangkefeng.wang@huawei.com


Signed-off-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 399fd496
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -297,7 +297,7 @@ Lock order is as follows::

  Page lock (PG_locked bit of page->flags)
    mm->page_table_lock or split pte_lock
      lock_page_memcg (memcg->move_lock)
      folio_memcg_lock (memcg->move_lock)
        mapping->i_pages lock
          lruvec->lru_lock.

+1 −11
Original line number Diff line number Diff line
@@ -419,7 +419,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
 *
 * - the folio lock
 * - LRU isolation
 * - lock_page_memcg()
 * - folio_memcg_lock()
 * - exclusive reference
 * - mem_cgroup_trylock_pages()
 *
@@ -949,8 +949,6 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);

void folio_memcg_lock(struct folio *folio);
void folio_memcg_unlock(struct folio *folio);
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);

void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);

@@ -1438,14 +1436,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
{
}

static inline void lock_page_memcg(struct page *page)
{
}

static inline void unlock_page_memcg(struct page *page)
{
}

static inline void folio_memcg_lock(struct folio *folio)
{
}
+1 −1
Original line number Diff line number Diff line
@@ -117,7 +117,7 @@
 *    ->i_pages lock		(page_remove_rmap->set_page_dirty)
 *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
 *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
 *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg)
 *    ->memcg->move_lock	(page_remove_rmap->folio_memcg_lock)
 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 *    ->private_lock		(zap_pte_range->block_dirty_folio)
+4 −14
Original line number Diff line number Diff line
@@ -2148,17 +2148,12 @@ void folio_memcg_lock(struct folio *folio)
	 * When charge migration first begins, we can have multiple
	 * critical sections holding the fast-path RCU lock and one
	 * holding the slowpath move_lock. Track the task who has the
	 * move_lock for unlock_page_memcg().
	 * move_lock for folio_memcg_unlock().
	 */
	memcg->move_lock_task = current;
	memcg->move_lock_flags = flags;
}

void lock_page_memcg(struct page *page)
{
	folio_memcg_lock(page_folio(page));
}

static void __folio_memcg_unlock(struct mem_cgroup *memcg)
{
	if (memcg && memcg->move_lock_task == current) {
@@ -2186,11 +2181,6 @@ void folio_memcg_unlock(struct folio *folio)
	__folio_memcg_unlock(folio_memcg(folio));
}

void unlock_page_memcg(struct page *page)
{
	folio_memcg_unlock(page_folio(page));
}

struct memcg_stock_pcp {
	local_lock_t stock_lock;
	struct mem_cgroup *cached; /* this never be root cgroup */
@@ -2866,7 +2856,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
	 *
	 * - the page lock
	 * - LRU isolation
	 * - lock_page_memcg()
	 * - folio_memcg_lock()
	 * - exclusive reference
	 * - mem_cgroup_trylock_pages()
	 */
@@ -5829,7 +5819,7 @@ static int mem_cgroup_move_account(struct page *page,
	 * with (un)charging, migration, LRU putback, or anything else
	 * that would rely on a stable page's memory cgroup.
	 *
	 * Note that lock_page_memcg is a memcg lock, not a page lock,
	 * Note that folio_memcg_lock is a memcg lock, not a page lock,
	 * to save space. As soon as we switch page's memory cgroup to a
	 * new memcg that isn't locked, the above state can change
	 * concurrently again. Make sure we're truly done with it.
@@ -6320,7 +6310,7 @@ static void mem_cgroup_move_charge(void)
{
	lru_add_drain_all();
	/*
	 * Signal lock_page_memcg() to take the memcg's move_lock
	 * Signal folio_memcg_lock() to take the memcg's move_lock
	 * while we're moving its pages to another memcg. Then wait
	 * for already started RCU-only updates to finish.
	 */
+3 −3
Original line number Diff line number Diff line
@@ -2597,7 +2597,7 @@ EXPORT_SYMBOL(noop_dirty_folio);
/*
 * Helper function for set_page_dirty family.
 *
 * Caller must hold lock_page_memcg().
 * Caller must hold folio_memcg_lock().
 *
 * NOTE: This relies on being atomic wrt interrupts.
 */
@@ -2631,7 +2631,7 @@ static void folio_account_dirtied(struct folio *folio,
/*
 * Helper function for deaccounting dirty page without writeback.
 *
 * Caller must hold lock_page_memcg().
 * Caller must hold folio_memcg_lock().
 */
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
{
@@ -2650,7 +2650,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
 * If warn is true, then emit a warning if the folio is not uptodate and has
 * not been truncated.
 *
 * The caller must hold lock_page_memcg().  Most callers have the folio
 * The caller must hold folio_memcg_lock().  Most callers have the folio
 * locked.  A few have the folio blocked from truncation through other
 * means (eg zap_vma_pages() has it mapped and is holding the page table
 * lock).  This can also be called from mark_buffer_dirty(), which I