Commit f548b2ae authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Aichun Shi
Browse files

init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()

stable inclusion
from stable-v5.10.189
commit 18fcd72da1ed6166f1cbb03f713bed50c839fc22
category: bugfix
bugzilla: https://gitee.com/openeuler/intel-kernel/issues/I8LVBS
CVE: N/A
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=18fcd72da1ed6166f1cbb03f713bed50c839fc22



-------------------------------------

Intel-SIG: commit 18fcd72da1ed ("init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init()")
Backport x86 related patches from 5.10.189 upstream

-------------------------------------

commit 439e1757 upstream

Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and
remove the weak fallback from the core code.

No functional change.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de


Signed-off-by: default avatarDaniel Sneddon <daniel.sneddon@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarAichun Shi <aichun.shi@intel.com>
parent 0f82152a
Loading
Loading
Loading
Loading
+4 −3
Original line number Diff line number Diff line
@@ -47,14 +47,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);

void __init mem_encrypt_free_decrypted_mem(void);

/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);

void __init sev_es_init_vc_handling(void);
bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);

void __init mem_encrypt_init(void);

#define __bss_decrypted __section(".bss..decrypted")

#else	/* !CONFIG_AMD_MEM_ENCRYPT */
@@ -86,6 +85,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;

static inline void mem_encrypt_free_decrypted_mem(void) { }

static inline void mem_encrypt_init(void) { }

#define __bss_decrypted

#endif	/* CONFIG_AMD_MEM_ENCRYPT */
+11 −0
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
#include <linux/mem_encrypt.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/io.h>
@@ -2287,4 +2288,14 @@ void __init arch_cpu_finalize_init(void)
	} else {
		fpu__init_check_bugs();
	}

	/*
	 * This needs to be called before any devices perform DMA
	 * operations that might use the SWIOTLB bounce buffers. It will
	 * mark the bounce buffers as decrypted so that their usage will
	 * not cause "plain-text" data to be decrypted when accessed. It
	 * must be called after late_time_init() so that Hyper-V x86/x64
	 * hypercalls work when the SWIOTLB bounce buffers are decrypted.
	 */
	mem_encrypt_init();
}
+0 −11
Original line number Diff line number Diff line
@@ -96,7 +96,6 @@
#include <linux/cache.h>
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/mem_encrypt.h>
#include <linux/kcsan.h>
#include <linux/init_syscalls.h>
#include <linux/randomize_kstack.h>
@@ -777,8 +776,6 @@ void __init __weak thread_stack_cache_init(void)
}
#endif

void __init __weak mem_encrypt_init(void) { }

void __init __weak poking_init(void) { }

void __init __weak pgtable_cache_init(void) { }
@@ -1022,14 +1019,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
	 */
	locking_selftest();

	/*
	 * This needs to be called before any devices perform DMA
	 * operations that might use the SWIOTLB bounce buffers. It will
	 * mark the bounce buffers as decrypted so that their usage will
	 * not cause "plain-text" data to be decrypted when accessed.
	 */
	mem_encrypt_init();

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && !initrd_below_start_ok &&
	    page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {