Unverified Commit c780cd23 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3913 arm64: Add non nmi ipi backtrace support

Merge Pull Request from: @ci-robot 
 
PR sync from: Liao Chen <liaochen4@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/WGZBCM36SJXXK2XJ24NHH2RACVWUOF7M/ 
From: linruizhe <linruizhe@huawei.com>

Dependencies:
CONFIG_DETECT_HUNG_TASK=y
CONFIG_WATCHDOG=y

Li Zhengyu (1):
  arm64: Add non nmi ipi backtrace support


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I8PRY6 
 
Link:https://gitee.com/openeuler/kernel/pulls/3913

 

Reviewed-by: default avatarLiu Chao <liuchao173@huawei.com>
Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents 5b44083b 5e3e94b1
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -2242,6 +2242,14 @@ config ARM64_DEBUG_PRIORITY_MASKING
	  If unsure, say N
endif # ARM64_PSEUDO_NMI

config NON_NMI_IPI_BACKTRACE
	bool "Support non nmi ipi backtrace"
	depends on ARM64_PSEUDO_NMI
	default n
	help
	  This adds support for non nmi ipi backtrace, which allows hungtask_monitor
	  to print stack trace when recovering from a hung task.

config RELOCATABLE
	bool "Build a relocatable kernel image" if EXPERT
	select ARCH_HAS_RELR
+1 −0
Original line number Diff line number Diff line
@@ -539,6 +539,7 @@ CONFIG_ARM64_SVE=y
CONFIG_ARM64_SME=y
CONFIG_ARM64_PSEUDO_NMI=y
# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set
CONFIG_NON_NMI_IPI_BACKTRACE=y
CONFIG_RELOCATABLE=y
CONFIG_RANDOMIZE_BASE=y
CONFIG_KASLR_SKIP_MEM_RANGE=y
+35 −4
Original line number Diff line number Diff line
@@ -33,12 +33,43 @@ void arm64_send_nmi(cpumask_t *mask)
	__ipi_send_mask(ipi_nmi_desc, mask);
}

bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
#ifdef CONFIG_NON_NMI_IPI_BACKTRACE
static void ipi_cpu_backtrace(void *info)
{
	if (!ipi_nmi_desc)
		return false;
	__printk_safe_enter();
	nmi_cpu_backtrace(get_irq_regs());
	__printk_safe_exit();
}

static DEFINE_PER_CPU(call_single_data_t, cpu_backtrace_csd) =
	CSD_INIT(ipi_cpu_backtrace, NULL);

static void arm64_send_ipi(cpumask_t *mask)
{
	call_single_data_t *csd;
	int this_cpu = raw_smp_processor_id();
	int cpu;
	int ret;

	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		csd = &per_cpu(cpu_backtrace_csd, cpu);
		ret = smp_call_function_single_async(cpu, csd);
		if (ret)
			pr_info("Sending IPI failed to CPU %d\n", cpu);
	}
}
#endif

bool arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
{
	if (ipi_nmi_desc)
		nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_send_nmi);
#ifdef CONFIG_NON_NMI_IPI_BACKTRACE
	else
		nmi_trigger_cpumask_backtrace(mask, exclude_cpu, arm64_send_ipi);
#endif

	return true;
}