Commit d256d1cd authored by Tong Tiangen's avatar Tong Tiangen Committed by Andrew Morton
Browse files

mm: memory-failure: use rcu lock instead of tasklist_lock when collect_procs()

We found a softlock issue in our test, analyzed the logs, and found that
the relevant CPU call trace as follows:

CPU0:
  _do_fork
    -> copy_process()
      -> write_lock_irq(&tasklist_lock)  //Disable irq,waiting for
      					 //tasklist_lock

CPU1:
  wp_page_copy()
    ->pte_offset_map_lock()
      -> spin_lock(&page->ptl);        //Hold page->ptl
    -> ptep_clear_flush()
      -> flush_tlb_others() ...
        -> smp_call_function_many()
          -> arch_send_call_function_ipi_mask()
            -> csd_lock_wait()         //Waiting for other CPUs respond
	                               //IPI

CPU2:
  collect_procs_anon()
    -> read_lock(&tasklist_lock)       //Hold tasklist_lock
      ->for_each_process(tsk)
        -> page_mapped_in_vma()
          -> page_vma_mapped_walk()
	    -> map_pte()
              ->spin_lock(&page->ptl)  //Waiting for page->ptl

We can see that CPU1 waiting for CPU0 respond IPI,CPU0 waiting for CPU2
unlock tasklist_lock, CPU2 waiting for CPU1 unlock page->ptl. As a result,
softlockup is triggered.

For collect_procs_anon(), what we're doing is task list iteration, during
the iteration, with the help of call_rcu(), the task_struct object is freed
only after one or more grace periods elapse. the logic as follows:

release_task()
  -> __exit_signal()
    -> __unhash_process()
      -> list_del_rcu()

  -> put_task_struct_rcu_user()
    -> call_rcu(&task->rcu, delayed_put_task_struct)

delayed_put_task_struct()
  -> put_task_struct()
  -> if (refcount_sub_and_test())
     	__put_task_struct()
          -> free_task()

Therefore, under the protection of the rcu lock, we can safely use
get_task_struct() to ensure a safe reference to task_struct during the
iteration.

By removing the use of tasklist_lock in task list iteration, we can break
the softlock chain above.

The same logic can also be applied to:
 - collect_procs_file()
 - collect_procs_fsdax()
 - collect_procs_ksm()

Link: https://lkml.kernel.org/r/20230828022527.241693-1-tongtiangen@huawei.com


Signed-off-by: default avatarTong Tiangen <tongtiangen@huawei.com>
Acked-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2562d67b
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -121,9 +121,6 @@
 *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
 *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
 *    ->private_lock		(zap_pte_range->block_dirty_folio)
 *
 * ->i_mmap_rwsem
 *   ->tasklist_lock            (memory_failure, collect_procs_ao)
 */

static void page_cache_delete(struct address_space *mapping,
+2 −2
Original line number Diff line number Diff line
@@ -2925,7 +2925,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
		struct anon_vma *av = rmap_item->anon_vma;

		anon_vma_lock_read(av);
		read_lock(&tasklist_lock);
		rcu_read_lock();
		for_each_process(tsk) {
			struct anon_vma_chain *vmac;
			unsigned long addr;
@@ -2944,7 +2944,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
				}
			}
		}
		read_unlock(&tasklist_lock);
		rcu_read_unlock();
		anon_vma_unlock_read(av);
	}
}
+8 −8
Original line number Diff line number Diff line
@@ -547,8 +547,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
 * on behalf of the thread group. Return task_struct of the (first found)
 * dedicated thread if found, and return NULL otherwise.
 *
 * We already hold read_lock(&tasklist_lock) in the caller, so we don't
 * have to call rcu_read_lock/unlock() in this function.
 * We already hold rcu lock in the caller, so we don't have to call
 * rcu_read_lock/unlock() in this function.
 */
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
{
@@ -609,7 +609,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
		return;

	pgoff = page_to_pgoff(page);
	read_lock(&tasklist_lock);
	rcu_read_lock();
	for_each_process(tsk) {
		struct anon_vma_chain *vmac;
		struct task_struct *t = task_early_kill(tsk, force_early);
@@ -626,7 +626,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
			add_to_kill_anon_file(t, page, vma, to_kill);
		}
	}
	read_unlock(&tasklist_lock);
	rcu_read_unlock();
	anon_vma_unlock_read(av);
}

@@ -642,7 +642,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
	pgoff_t pgoff;

	i_mmap_lock_read(mapping);
	read_lock(&tasklist_lock);
	rcu_read_lock();
	pgoff = page_to_pgoff(page);
	for_each_process(tsk) {
		struct task_struct *t = task_early_kill(tsk, force_early);
@@ -662,7 +662,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
				add_to_kill_anon_file(t, page, vma, to_kill);
		}
	}
	read_unlock(&tasklist_lock);
	rcu_read_unlock();
	i_mmap_unlock_read(mapping);
}

@@ -685,7 +685,7 @@ static void collect_procs_fsdax(struct page *page,
	struct task_struct *tsk;

	i_mmap_lock_read(mapping);
	read_lock(&tasklist_lock);
	rcu_read_lock();
	for_each_process(tsk) {
		struct task_struct *t = task_early_kill(tsk, true);

@@ -696,7 +696,7 @@ static void collect_procs_fsdax(struct page *page,
				add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
		}
	}
	read_unlock(&tasklist_lock);
	rcu_read_unlock();
	i_mmap_unlock_read(mapping);
}
#endif /* CONFIG_FS_DAX */