Commit 76f64f0a authored by Jinjie Ruan's avatar Jinjie Ruan
Browse files

Revert "powerpc: Avoid nmi_enter/nmi_exit in real mode interrupt."

hulk inclusion
category: bugfix
bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAGPSI



--------------------------------

This reverts commit 247ff920.

Because CVE-2024-42126 has been fixed in stable 5.10 and is different
from this.

Fixes: 247ff920 ("powerpc: Avoid nmi_enter/nmi_exit in real mode interrupt.")
Signed-off-by: default avatarJinjie Ruan <ruanjinjie@huawei.com>
parent 6b96457d
Loading
Loading
Loading
Loading
+0 −10
Original line number Diff line number Diff line
@@ -15,16 +15,6 @@
#endif /* CONFIG_SMP */
#endif /* __powerpc64__ */

#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) && defined(CONFIG_SMP)
#include <linux/jump_label.h>
DECLARE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);

#define percpu_first_chunk_is_paged	\
		(static_key_enabled(&__percpu_first_chunk_is_paged.key))
#else
#define percpu_first_chunk_is_paged	false
#endif /* CONFIG_PPC64 && CONFIG_SMP */

#include <asm-generic/percpu.h>

#include <asm/paca.h>
+3 −11
Original line number Diff line number Diff line
@@ -594,15 +594,8 @@ long notrace machine_check_early(struct pt_regs *regs)
	u8 ftrace_enabled = this_cpu_get_ftrace_enabled();

	this_cpu_set_ftrace_enabled(0);
	/*
	 * Do not use nmi_enter/exit for pseries hpte guest.
	 *
	 * Likewise, do not use it in real mode if percpu first chunk is not
	 * embedded. With CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK enabled there
	 * are chances where percpu allocation can come from vmalloc area.
	 */
	if ((radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) &&
	    !percpu_first_chunk_is_paged)
	/* Do not use nmi_enter/exit for pseries hpte guest */
	if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
		nmi_enter();

	hv_nmi_check_nonrecoverable(regs);
@@ -613,8 +606,7 @@ long notrace machine_check_early(struct pt_regs *regs)
	if (ppc_md.machine_check_early)
		handled = ppc_md.machine_check_early(regs);

	if ((radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR)) &&
	    !percpu_first_chunk_is_paged)
	if (radix_enabled() || !firmware_has_feature(FW_FEATURE_LPAR))
		nmi_exit();

	this_cpu_set_ftrace_enabled(ftrace_enabled);
+0 −2
Original line number Diff line number Diff line
@@ -827,7 +827,6 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)

unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
DEFINE_STATIC_KEY_FALSE(__percpu_first_chunk_is_paged);

static void __init pcpu_populate_pte(unsigned long addr)
{
@@ -907,7 +906,6 @@ void __init setup_per_cpu_areas(void)
	if (rc < 0)
		panic("cannot initialize percpu area (err=%d)", rc);

	static_key_enable(&__percpu_first_chunk_is_paged.key);
	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
	for_each_possible_cpu(cpu) {
                __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];