Commit c583bcb8 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Don't invoke try_invoke_on_locked_down_task() with irqs disabled

The try_invoke_on_locked_down_task() function requires that
interrupts be enabled, but it is called with interrupts disabled from
rcu_print_task_stall(), resulting in an "IRQs not enabled as expected"
diagnostic.  This commit therefore updates rcu_print_task_stall()
to accumulate a list of the first few tasks while holding the current
leaf rcu_node structure's ->lock, then releases that lock and only then
uses try_invoke_on_locked_down_task() to attempt to obtain per-task
detailed information.  Of course, as soon as ->lock is released, the
task might exit, so the get_task_struct() function is used to prevent
the task structure from going away in the meantime.

Link: https://lore.kernel.org/lkml/000000000000903d5805ab908fc4@google.com/


Fixes: 5bef8da6 ("rcu: Add per-task state to RCU CPU stall warnings")
Reported-by: default avatar <syzbot+cb3b69ae80afd6535b0e@syzkaller.appspotmail.com>
Reported-by: default avatar <syzbot+f04854e1c5c9e913cc27@syzkaller.appspotmail.com>
Tested-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 3650b228
Loading
Loading
Loading
Loading
+17 −5
Original line number Diff line number Diff line
@@ -249,13 +249,16 @@ static bool check_slow_task(struct task_struct *t, void *arg)

/*
 * Scan the current list of tasks blocked within RCU read-side critical
 * sections, printing out the tid of each.
 * sections, printing out the tid of each of the first few of them.
 */
static int rcu_print_task_stall(struct rcu_node *rnp)
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
	__releases(rnp->lock)
{
	int i = 0;
	int ndetected = 0;
	struct rcu_stall_chk_rdr rscr;
	struct task_struct *t;
	struct task_struct *ts[8];

	if (!rcu_preempt_blocked_readers_cgp(rnp))
		return 0;
@@ -264,6 +267,14 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
	t = list_entry(rnp->gp_tasks->prev,
		       struct task_struct, rcu_node_entry);
	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
		get_task_struct(t);
		ts[i++] = t;
		if (i >= ARRAY_SIZE(ts))
			break;
	}
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	for (i--; i; i--) {
		t = ts[i];
		if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
			pr_cont(" P%d", t->pid);
		else
@@ -273,6 +284,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
				".q"[rscr.rs.b.need_qs],
				".e"[rscr.rs.b.exp_hint],
				".l"[rscr.on_blkd_list]);
		put_task_struct(t);
		ndetected++;
	}
	pr_cont("\n");
@@ -293,8 +305,9 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 * Because preemptible RCU does not exist, we never have to check for
 * tasks blocked within RCU read-side critical sections.
 */
static int rcu_print_task_stall(struct rcu_node *rnp)
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
{
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	return 0;
}
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -472,7 +485,6 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
	rcu_for_each_leaf_node(rnp) {
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		ndetected += rcu_print_task_stall(rnp);
		if (rnp->qsmask != 0) {
			for_each_leaf_node_possible_cpu(rnp, cpu)
				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
@@ -480,7 +492,7 @@ static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
					ndetected++;
				}
		}
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
	}

	for_each_possible_cpu(cpu)