Commit 3053668e authored by Jinjie Ruan's avatar Jinjie Ruan
Browse files

arm64: Introduce Xint software solution

hulk inclusion
category: feature
bugzilla: https://gitee.com/openeuler/release-management/issues/IB6JLE



--------------------------------

Introduce xint software solution for kernel, it provides a lightweight
interrupt processing framework for latency-sensitive interrupts, and
enabled dynamically for each irq by /proc/irq/<irq>/xint interface.

The main implementation schemes are as follows:
1. For a small number of latency-sensitive interrupts, it could be
   configured as xint state, and process irq by xint framework instead of
   the kernel general interrupt framework, so improve performance by remove
   unnecessary processes. It is not recommended to configure too many
   interrupts as xint in the system, as this will affect system stability
   to some extent.
2. For each SGI/PPI/SPI interrupts whoes irq numbers are consecutive and
   limited, use a bitmap to check whether a hwirq is xint.

Signed-off-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Signed-off-by: default avatarJinjie Ruan <ruanjinjie@huawei.com>
parent cf5f2d9f
Loading
Loading
Loading
Loading
+23 −2
Original line number Diff line number Diff line
@@ -1549,9 +1549,30 @@ config FAST_SYSCALL
	  exception handling path that only considers necessary features
	  such as security, context saving, and recovery.

config ARCH_SUPPORTS_FAST_IRQ
	bool

config FAST_IRQ
	bool "Fast irq support"
	depends on ARCH_SUPPORTS_FAST_IRQ
	default n
	help
	  The irq handling process, which includes auxiliary
	  functions for debug/trace and core functions like
	  KPTI, interrupt time record, interrupt processing as
	  a random number source, interrupt affinity
	  modification and interrupt processing race, as well as
	  spurious and unhandled interrupt debugging, has been
	  identified as overly "lengthy".
	  To address this, we introduce the concept of fast irq,
	  a fast interrupt handling path that only considers
	  necessary features such as security, context saving
	  and recovery, which adds an lightweight interrupt processing
	  framework for latency-sensitive interrupts.

config DEBUG_FEATURE_BYPASS
	bool "Bypass debug feature in fast syscall"
	depends on FAST_SYSCALL
	depends on FAST_SYSCALL || FAST_IRQ
	default y
	help
	  This to bypass debug feature in fast syscall.
@@ -1563,7 +1584,7 @@ config DEBUG_FEATURE_BYPASS

config SECURITY_FEATURE_BYPASS
	bool "Bypass security feature in fast syscall"
	depends on FAST_SYSCALL
	depends on FAST_SYSCALL || FAST_IRQ
	default y
	help
	  This to bypass security feature in fast syscall.
+1 −0
Original line number Diff line number Diff line
@@ -263,6 +263,7 @@ config ARM64
	select TRACE_IRQFLAGS_NMI_SUPPORT
	select HAVE_SOFTIRQ_ON_OWN_STACK
	select ARCH_SUPPORTS_FAST_SYSCALL if !ARM64_MTE && !KASAN_HW_TAGS
	select ARCH_SUPPORTS_FAST_IRQ if ARM_GIC_V3 && !ARM64_MTE && !KASAN_HW_TAGS
	help
	  ARM 64-bit (AArch64) Linux support.

+2 −0
Original line number Diff line number Diff line
@@ -893,6 +893,8 @@ CONFIG_FUNCTION_ALIGNMENT_8B=y
CONFIG_FUNCTION_ALIGNMENT=8
CONFIG_ARCH_SUPPORTS_FAST_SYSCALL=y
# CONFIG_FAST_SYSCALL is not set
CONFIG_ARCH_SUPPORTS_FAST_IRQ=y
# CONFIG_FAST_IRQ is not set
# end of General architecture-dependent options

CONFIG_RT_MUTEXES=y
+26 −0
Original line number Diff line number Diff line
@@ -2395,6 +2395,24 @@ static bool has_xcall_support(const struct arm64_cpu_capabilities *entry, int __
}
#endif

#ifdef CONFIG_FAST_IRQ
bool is_xint_support;
static int __init xint_setup(char *str)
{
	if (!cpus_have_cap(ARM64_HAS_GIC_CPUIF_SYSREGS))
		return 1;

	is_xint_support = true;
	return 1;
}
__setup("xint", xint_setup);

static bool has_xint_support(const struct arm64_cpu_capabilities *entry, int __unused)
{
	return is_xint_support;
}
#endif

static const struct arm64_cpu_capabilities arm64_features[] = {
	{
		.capability = ARM64_ALWAYS_BOOT,
@@ -2919,6 +2937,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
		.matches = has_xcall_support,
	},
#endif
#ifdef CONFIG_FAST_IRQ
	{
		.desc = "Xint Support",
		.capability = ARM64_HAS_XINT,
		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
		.matches = has_xint_support,
	},
#endif
	{},
};
+113 −0
Original line number Diff line number Diff line
@@ -512,6 +512,119 @@ static __always_inline void __el1_pnmi(struct pt_regs *regs,
	arm64_exit_nmi(regs);
}

#ifdef CONFIG_FAST_IRQ
static __always_inline void __el1_xint(struct pt_regs *regs,
				       void (*handler)(struct pt_regs *))
{
#ifndef CONFIG_DEBUG_FEATURE_BYPASS
	enter_from_kernel_mode(regs);
#endif

	xint_enter_rcu();
	do_interrupt_handler(regs, handler);
	xint_exit_rcu();

	arm64_preempt_schedule_irq();

#ifndef CONFIG_DEBUG_FEATURE_BYPASS
	exit_to_kernel_mode(regs);
#endif
}

static void noinstr el1_xint(struct pt_regs *regs, u64 nmi_flag,
			     void (*handler)(struct pt_regs *),
			     void (*nmi_handler)(struct pt_regs *))
{
	/* Is there a NMI to handle? */
#ifndef CONFIG_DEBUG_FEATURE_BYPASS
	if (system_uses_nmi() && (read_sysreg(isr_el1) & nmi_flag)) {
		__el1_nmi(regs, nmi_handler);
		return;
	}
#endif

	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);

	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
		__el1_pnmi(regs, handler);
	else
		__el1_xint(regs, handler);
}

asmlinkage void noinstr el1h_64_xint_handler(struct pt_regs *regs)
{
	el1_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq);
}

static __always_inline void xint_exit_to_user_mode_prepare(struct pt_regs *regs)
{
	unsigned long flags;

	local_daif_mask();

	flags = read_thread_flags();
	if (unlikely(flags & _TIF_WORK_MASK))
		do_notify_resume(regs, flags);

#ifndef CONFIG_DEBUG_FEATURE_BYPASS
	lockdep_sys_exit();
#endif
}

static __always_inline void xint_exit_to_user_mode(struct pt_regs *regs)
{
	xint_exit_to_user_mode_prepare(regs);
#ifndef CONFIG_DEBUG_FEATURE_BYPASS
	mte_check_tfsr_exit();
	__exit_to_user_mode();
#endif
}

static void noinstr el0_xint(struct pt_regs *regs, u64 nmi_flag,
			     void (*handler)(struct pt_regs *),
			     void (*nmi_handler)(struct pt_regs *))
{
#ifndef CONFIG_DEBUG_FEATURE_BYPASS
	enter_from_user_mode(regs);

	/* Is there a NMI to handle? */
	if (system_uses_nmi() && (read_sysreg(isr_el1) & nmi_flag)) {
		/*
		 * Any system with FEAT_NMI should have FEAT_CSV2 and
		 * not be affected by Spectre v2 so we don't mitigate
		 * here.
		 */

		arm64_enter_nmi(regs);
		do_interrupt_handler(regs, nmi_handler);
		arm64_exit_nmi(regs);

		exit_to_user_mode(regs);
		return;
	}
#endif

	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);

#ifndef CONFIG_SECURITY_FEATURE_BYPASS
	if (regs->pc & BIT(55))
		arm64_apply_bp_hardening();
#endif

	xint_enter_rcu();
	do_interrupt_handler(regs, handler);
	xint_exit_rcu();

	xint_exit_to_user_mode(regs);
}


asmlinkage void noinstr el0t_64_xint_handler(struct pt_regs *regs)
{
	el0_xint(regs, ISR_EL1_IS, handle_arch_irq, handle_arch_nmi_irq);
}
#endif /* CONFIG_FAST_IRQ */

static __always_inline void __el1_irq(struct pt_regs *regs,
				      void (*handler)(struct pt_regs *))
{
Loading