Commit 61786170 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas
Browse files

efi: arm64: enter with MMU and caches enabled



Instead of cleaning the entire loaded kernel image to the PoC and
disabling the MMU and caches before branching to the kernel's bare metal
entry point, we can leave the MMU and caches enabled, and rely on EFI's
cacheable 1:1 mapping of all of system RAM (which is mandated by the
spec) to populate the initial page tables.

This removes the need for managing coherency in software, which is
tedious and error prone.

Note that we still need to clean the executable region of the image to
the PoU if this is required for I/D coherency, but only if we actually
decided to move the image in memory, as otherwise, this will have been
taken care of by the loader.

This change affects both the builtin EFI stub as well as the zboot
decompressor, which now carries the entire EFI stub along with the
decompression code and the compressed image.

Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20230111102236.1430401-7-ardb@kernel.org


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 3dcf60bb
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -105,6 +105,8 @@ static inline unsigned long efi_get_kimg_min_align(void)
#define EFI_ALLOC_ALIGN		SZ_64K
#define EFI_ALLOC_LIMIT		((1UL << 48) - 1)

extern unsigned long primary_entry_offset(void);

/*
 * On ARM systems, virtually remapped UEFI runtime services are set up in two
 * distinct stages:
+3 −2
Original line number Diff line number Diff line
@@ -10,7 +10,7 @@
#error This file should only be included in vmlinux.lds.S
#endif

PROVIDE(__efistub_primary_entry_offset	= primary_entry - _text);
PROVIDE(__efistub_primary_entry		= primary_entry);

/*
 * The EFI stub has its own symbol namespace prefixed by __efistub_, to
@@ -21,10 +21,11 @@ PROVIDE(__efistub_primary_entry_offset = primary_entry - _text);
 * linked at. The routines below are all implemented in assembler in a
 * position independent manner
 */
PROVIDE(__efistub_dcache_clean_poc	= __pi_dcache_clean_poc);
PROVIDE(__efistub_caches_clean_inval_pou = __pi_caches_clean_inval_pou);

PROVIDE(__efistub__text			= _text);
PROVIDE(__efistub__end			= _end);
PROVIDE(__efistub___inittext_end       	= __inittext_end);
PROVIDE(__efistub__edata		= _edata);
PROVIDE(__efistub_screen_info		= screen_info);
PROVIDE(__efistub__ctype		= _ctype);
+1 −0
Original line number Diff line number Diff line
@@ -56,6 +56,7 @@ SYM_FUNC_START(caches_clean_inval_pou)
	caches_clean_inval_pou_macro
	ret
SYM_FUNC_END(caches_clean_inval_pou)
SYM_FUNC_ALIAS(__pi_caches_clean_inval_pou, caches_clean_inval_pou)

/*
 *	caches_clean_inval_user_pou(start,end)
+2 −2
Original line number Diff line number Diff line
@@ -87,7 +87,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
				   screen_info.o efi-stub-entry.o

lib-$(CONFIG_ARM)		+= arm32-stub.o
lib-$(CONFIG_ARM64)		+= arm64.o arm64-stub.o arm64-entry.o smbios.o
lib-$(CONFIG_ARM64)		+= arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86)		+= x86-stub.o
lib-$(CONFIG_RISCV)		+= riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH)		+= loongarch.o loongarch-stub.o
@@ -141,7 +141,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
#
STUBCOPY_FLAGS-$(CONFIG_ARM64)	+= --prefix-alloc-sections=.init \
				   --prefix-symbols=__efistub_
STUBCOPY_RELOC-$(CONFIG_ARM64)	:= R_AARCH64_ABS64
STUBCOPY_RELOC-$(CONFIG_ARM64)	:= R_AARCH64_ABS

# For RISC-V, we don't need anything special other than arm64. Keep all the
# symbols in .init section and make sure that no absolute symbols references
+0 −67
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * EFI entry point.
 *
 * Copyright (C) 2013, 2014 Red Hat, Inc.
 * Author: Mark Salter <msalter@redhat.com>
 */
#include <linux/linkage.h>
#include <asm/assembler.h>

	/*
	 * The entrypoint of a arm64 bare metal image is at offset #0 of the
	 * image, so this is a reasonable default for primary_entry_offset.
	 * Only when the EFI stub is integrated into the core kernel, it is not
	 * guaranteed that the PE/COFF header has been copied to memory too, so
	 * in this case, primary_entry_offset should be overridden by the
	 * linker and point to primary_entry() directly.
	 */
	.weak	primary_entry_offset

SYM_CODE_START(efi_enter_kernel)
	/*
	 * efi_pe_entry() will have copied the kernel image if necessary and we
	 * end up here with device tree address in x1 and the kernel entry
	 * point stored in x0. Save those values in registers which are
	 * callee preserved.
	 */
	ldr	w2, =primary_entry_offset
	add	x19, x0, x2		// relocated Image entrypoint

	mov	x0, x1			// DTB address
	mov	x1, xzr
	mov	x2, xzr
	mov	x3, xzr

	/*
	 * Clean the remainder of this routine to the PoC
	 * so that we can safely disable the MMU and caches.
	 */
	adr	x4, 1f
	dc	civac, x4
	dsb	sy

	/* Turn off Dcache and MMU */
	mrs	x4, CurrentEL
	cmp	x4, #CurrentEL_EL2
	mrs	x4, sctlr_el1
	b.ne	0f
	mrs	x4, sctlr_el2
0:	bic	x4, x4, #SCTLR_ELx_M
	bic	x4, x4, #SCTLR_ELx_C
	b.eq	1f
	b	2f

	.balign	32
1:	pre_disable_mmu_workaround
	msr	sctlr_el2, x4
	isb
	br	x19		// jump to kernel entrypoint

2:	pre_disable_mmu_workaround
	msr	sctlr_el1, x4
	isb
	br	x19		// jump to kernel entrypoint

	.org	1b + 32
SYM_CODE_END(efi_enter_kernel)
Loading