Commit d9d9cd37 authored by Zhen Lei's avatar Zhen Lei
Browse files

sched/debug: Try trigger_single_cpu_backtrace(cpu) in dump_cpu_task()

mainline inclusion
from mainline-v6.1-rc1
commit e73dfe30
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I7OIXK

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e73dfe30930b75c98746152e7a2f6a8ab6067b51



--------------------------------

The trigger_all_cpu_backtrace() function attempts to send an NMI to the
target CPU, which usually provides much better stack traces than the
dump_cpu_task() function's approach of dumping that stack from some other
CPU.  So much so that most calls to dump_cpu_task() only happen after
a call to trigger_all_cpu_backtrace() has failed.  And the exception to
this rule really should attempt to use trigger_all_cpu_backtrace() first.

Therefore, move the trigger_all_cpu_backtrace() invocation into
dump_cpu_task().

Signed-off-by: default avatarZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@redhat.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
Cc: Valentin Schneider <vschneid@redhat.com>
Conflicts:
	kernel/smp.c

Signed-off-by: default avatarZhen Lei <thunder.leizhen@huawei.com>
parent 6b32e038
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -336,7 +336,7 @@ static void rcu_dump_cpu_stacks(void)
			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
				if (cpu_is_offline(cpu))
					pr_err("Offline CPU %d blocking current GP.\n", cpu);
				else if (!trigger_single_cpu_backtrace(cpu))
				else
					dump_cpu_task(cpu);
			}
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -473,7 +473,6 @@ static void rcu_check_gp_kthread_starvation(void)
					pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
				} else  {
					pr_err("Stack dump where RCU GP kthread last ran:\n");
					if (!trigger_single_cpu_backtrace(cpu))
					dump_cpu_task(cpu);
				}
			}
+3 −0
Original line number Diff line number Diff line
@@ -10250,6 +10250,9 @@ struct cgroup_subsys cpu_cgrp_subsys = {

void dump_cpu_task(int cpu)
{
	if (trigger_single_cpu_backtrace(cpu))
		return;

	pr_info("Task dump for CPU %d:\n", cpu);
	sched_show_task(cpu_curr(cpu));
}
+1 −2
Original line number Diff line number Diff line
@@ -205,7 +205,6 @@ static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd
			 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
	}
	if (cpu >= 0) {
		if (!trigger_single_cpu_backtrace(cpu))
		dump_cpu_task(cpu);
		if (!cpu_cur_csd) {
			pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);