Commit 32b135a7 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Catalin Marinas
Browse files

arm64: head: avoid cache invalidation when entering with the MMU on



If we enter with the MMU on, there is no need for explicit cache
invalidation for stores to memory, as they will be coherent with the
caches.

Let's take advantage of this, and create the ID map with the MMU still
enabled if that is how we entered, and avoid any cache invalidation
calls in that case.

Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20230111102236.1430401-5-ardb@kernel.org


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 9d7c13e5
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -89,9 +89,9 @@
SYM_CODE_START(primary_entry)
	bl	record_mmu_state
	bl	preserve_boot_args
	bl	create_idmap
	bl	init_kernel_el			// w0=cpu_boot_mode
	mov	x20, x0
	bl	create_idmap

	/*
	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
@@ -377,12 +377,13 @@ SYM_FUNC_START_LOCAL(create_idmap)
	 * accesses (MMU disabled), invalidate those tables again to
	 * remove any speculatively loaded cache lines.
	 */
	cbnz	x19, 0f				// skip cache invalidation if MMU is on
	dmb	sy

	adrp	x0, init_idmap_pg_dir
	adrp	x1, init_idmap_pg_end
	bl	dcache_inval_poc
	ret	x28
0:	ret	x28
SYM_FUNC_END(create_idmap)

SYM_FUNC_START_LOCAL(create_kernel_mapping)