Commit 8b63cba7 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Borislav Petkov (AMD)
Browse files

x86/decompressor: Store boot_params pointer in callee save register



Instead of pushing and popping %RSI several times to preserve the struct
boot_params pointer across the execution of the startup code, move it
into a callee save register before the first call into C, and copy it
back when needed.

Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarBorislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230807162720.545787-8-ardb@kernel.org
parent d7156b98
Loading
Loading
Loading
Loading
+16 −26
Original line number Diff line number Diff line
@@ -405,10 +405,14 @@ SYM_CODE_START(startup_64)
	lretq

.Lon_kernel_cs:
	/*
	 * RSI holds a pointer to a boot_params structure provided by the
	 * loader, and this needs to be preserved across C function calls. So
	 * move it into a callee saved register.
	 */
	movq	%rsi, %r15

	pushq	%rsi
	call	load_stage1_idt
	popq	%rsi

#ifdef CONFIG_AMD_MEM_ENCRYPT
	/*
@@ -419,12 +423,10 @@ SYM_CODE_START(startup_64)
	 * CPUID instructions being issued, so go ahead and do that now via
	 * sev_enable(), which will also handle the rest of the SEV-related
	 * detection/setup to ensure that has been done in advance of any dependent
	 * code.
	 * code. Pass the boot_params pointer as the first argument.
	 */
	pushq	%rsi
	movq	%rsi, %rdi		/* real mode address */
	movq	%r15, %rdi
	call	sev_enable
	popq	%rsi
#endif

	/*
@@ -437,13 +439,10 @@ SYM_CODE_START(startup_64)
	 *   - Non zero RDX means trampoline needs to enable 5-level
	 *     paging.
	 *
	 * RSI holds real mode data and needs to be preserved across
	 * this function call.
	 * Pass the boot_params pointer as the first argument.
	 */
	pushq	%rsi
	movq	%rsi, %rdi		/* real mode address */
	movq	%r15, %rdi
	call	paging_prepare
	popq	%rsi

	/* Save the trampoline address in RCX */
	movq	%rax, %rcx
@@ -456,9 +455,9 @@ SYM_CODE_START(startup_64)
	 * because the architecture does not guarantee that GPRs will retain
	 * their full 64-bit values across a 32-bit mode switch.
	 */
	pushq	%r15
	pushq	%rbp
	pushq	%rbx
	pushq	%rsi

	/*
	 * Push the 64-bit address of trampoline_return() onto the new stack.
@@ -475,9 +474,9 @@ SYM_CODE_START(startup_64)
	lretq
trampoline_return:
	/* Restore live 64-bit registers */
	popq	%rsi
	popq	%rbx
	popq	%rbp
	popq	%r15

	/* Restore the stack, the 32-bit trampoline uses its own stack */
	leaq	rva(boot_stack_end)(%rbx), %rsp
@@ -487,14 +486,9 @@ trampoline_return:
	 *
	 * RDI is address of the page table to use instead of page table
	 * in trampoline memory (if required).
	 *
	 * RSI holds real mode data and needs to be preserved across
	 * this function call.
	 */
	pushq	%rsi
	leaq	rva(top_pgtable)(%rbx), %rdi
	call	cleanup_trampoline
	popq	%rsi

	/* Zero EFLAGS */
	pushq	$0
@@ -504,7 +498,6 @@ trampoline_return:
 * Copy the compressed kernel to the end of our buffer
 * where decompression in place becomes safe.
 */
	pushq	%rsi
	leaq	(_bss-8)(%rip), %rsi
	leaq	rva(_bss-8)(%rbx), %rdi
	movl	$(_bss - startup_32), %ecx
@@ -512,7 +505,6 @@ trampoline_return:
	std
	rep	movsq
	cld
	popq	%rsi

	/*
	 * The GDT may get overwritten either during the copy we just did or
@@ -544,30 +536,28 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
	shrq	$3, %rcx
	rep	stosq

	pushq	%rsi
	call	load_stage2_idt

	/* Pass boot_params to initialize_identity_maps() */
	movq	(%rsp), %rdi
	movq	%r15, %rdi
	call	initialize_identity_maps
	popq	%rsi

/*
 * Do the extraction, and jump to the new kernel..
 */
	pushq	%rsi			/* Save the real mode argument */
	movq	%rsi, %rdi		/* real mode address */
	/* pass struct boot_params pointer */
	movq	%r15, %rdi
	leaq	boot_heap(%rip), %rsi	/* malloc area for uncompression */
	leaq	input_data(%rip), %rdx  /* input_data */
	movl	input_len(%rip), %ecx	/* input_len */
	movq	%rbp, %r8		/* output target address */
	movl	output_len(%rip), %r9d	/* decompressed length, end of relocs */
	call	extract_kernel		/* returns kernel entry point in %rax */
	popq	%rsi

/*
 * Jump to the decompressed kernel.
 */
	movq	%r15, %rsi
	jmp	*%rax
SYM_FUNC_END(.Lrelocated)