Commit 4eb77fa1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull x86 boot updates from Borislav Petkov:
 "A  of early boot cleanups and fixes.

   - Do some spring cleaning to the compressed boot code by moving the
     EFI mixed-mode code to a separate compilation unit, the AMD memory
     encryption early code where it belongs and fixing up build
     dependencies. Make the deprecated EFI handover protocol optional
     with the goal of removing it at some point (Ard Biesheuvel)

   - Skip realmode init code on Xen PV guests as it is not needed there

   - Remove an old 32-bit PIC code compiler workaround"

* tag 'x86_boot_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/boot: Remove x86_32 PIC using %ebx workaround
  x86/boot: Skip realmode init code when running as Xen PV guest
  x86/efi: Make the deprecated EFI handover protocol optional
  x86/boot/compressed: Only build mem_encrypt.S if AMD_MEM_ENCRYPT=y
  x86/boot/compressed: Adhere to calling convention in get_sev_encryption_bit()
  x86/boot/compressed: Move startup32_check_sev_cbit() out of head_64.S
  x86/boot/compressed: Move startup32_check_sev_cbit() into .text
  x86/boot/compressed: Move startup32_load_idt() out of head_64.S
  x86/boot/compressed: Move startup32_load_idt() into .text section
  x86/boot/compressed: Pull global variable reference into startup32_load_idt()
  x86/boot/compressed: Avoid touching ECX in startup32_set_idt_entry()
  x86/boot/compressed: Simplify IDT/GDT preserve/restore in the EFI thunk
  x86/boot/compressed, efi: Merge multiple definitions of image_offset into one
  x86/boot/compressed: Move efi32_pe_entry() out of head_64.S
  x86/boot/compressed: Move efi32_entry out of head_64.S
  x86/boot/compressed: Move efi32_pe_entry into .text section
  x86/boot/compressed: Move bootargs parsing out of 32-bit startup code
  x86/boot/compressed: Move 32-bit entrypoint code into .text section
  x86/boot/compressed: Rename efi_thunk_64.S to efi-mixed.S
parents 8b9ed79c 60253f10
Loading
Loading
Loading
Loading
+17 −0
Original line number Diff line number Diff line
@@ -1981,6 +1981,23 @@ config EFI_STUB

	  See Documentation/admin-guide/efi-stub.rst for more information.

config EFI_HANDOVER_PROTOCOL
	bool "EFI handover protocol (DEPRECATED)"
	depends on EFI_STUB
	default y
	help
	  Select this in order to include support for the deprecated EFI
	  handover protocol, which defines alternative entry points into the
	  EFI stub.  This is a practice that has no basis in the UEFI
	  specification, and requires a priori knowledge on the part of the
	  bootloader about Linux/x86 specific ways of passing the command line
	  and initrd, and where in memory those assets may be loaded.

	  If in doubt, say Y. Even though the corresponding support is not
	  present in upstream GRUB or other bootloaders, most distros build
	  GRUB with numerous downstream patches applied, and may rely on the
	  handover protocol as as result.

config EFI_MIXED
	bool "EFI mixed-mode support"
	depends on EFI_STUB && X86_64
+4 −4
Original line number Diff line number Diff line
@@ -100,7 +100,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
ifdef CONFIG_X86_64
	vmlinux-objs-y += $(obj)/ident_map_64.o
	vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
	vmlinux-objs-y += $(obj)/mem_encrypt.o
	vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o
	vmlinux-objs-y += $(obj)/pgtable_64.o
	vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
endif
@@ -108,11 +108,11 @@ endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
vmlinux-objs-$(CONFIG_INTEL_TDX_GUEST) += $(obj)/tdx.o $(obj)/tdcall.o

vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
vmlinux-objs-$(CONFIG_EFI) += $(obj)/efi.o
efi-obj-$(CONFIG_EFI_STUB) = $(objtree)/drivers/firmware/efi/libstub/lib.a
vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_mixed.o
vmlinux-objs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a

$(obj)/vmlinux: $(vmlinux-objs-y) $(efi-obj-y) FORCE
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
	$(call if_changed,ld)

OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
+345 −0
Original line number Diff line number Diff line
@@ -22,6 +22,49 @@

	.code64
	.text
/*
 * When booting in 64-bit mode on 32-bit EFI firmware, startup_64_mixed_mode()
 * is the first thing that runs after switching to long mode. Depending on
 * whether the EFI handover protocol or the compat entry point was used to
 * enter the kernel, it will either branch to the 64-bit EFI handover
 * entrypoint at offset 0x390 in the image, or to the 64-bit EFI PE/COFF
 * entrypoint efi_pe_entry(). In the former case, the bootloader must provide a
 * struct bootparams pointer as the third argument, so the presence of such a
 * pointer is used to disambiguate.
 *
 *                                                             +--------------+
 *  +------------------+     +------------+            +------>| efi_pe_entry |
 *  | efi32_pe_entry   |---->|            |            |       +-----------+--+
 *  +------------------+     |            |     +------+----------------+  |
 *                           | startup_32 |---->| startup_64_mixed_mode |  |
 *  +------------------+     |            |     +------+----------------+  V
 *  | efi32_stub_entry |---->|            |            |     +------------------+
 *  +------------------+     +------------+            +---->| efi64_stub_entry |
 *                                                           +-------------+----+
 *                           +------------+     +----------+               |
 *                           | startup_64 |<----| efi_main |<--------------+
 *                           +------------+     +----------+
 */
SYM_FUNC_START(startup_64_mixed_mode)
	lea	efi32_boot_args(%rip), %rdx
	mov	0(%rdx), %edi
	mov	4(%rdx), %esi
	mov	8(%rdx), %edx		// saved bootparams pointer
	test	%edx, %edx
	jnz	efi64_stub_entry
	/*
	 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
	 * shadow space on the stack even if all arguments are passed in
	 * registers. We also need an additional 8 bytes for the space that
	 * would be occupied by the return address, and this also results in
	 * the correct stack alignment for entry.
	 */
	sub	$40, %rsp
	mov	%rdi, %rcx		// MS calling convention
	mov	%rsi, %rdx
	jmp	efi_pe_entry
SYM_FUNC_END(startup_64_mixed_mode)

SYM_FUNC_START(__efi64_thunk)
	push	%rbp
	push	%rbx
@@ -53,24 +96,20 @@ SYM_FUNC_START(__efi64_thunk)

	leaq	0x20(%rsp), %rbx
	sgdt	(%rbx)

	addq	$16, %rbx
	sidt	(%rbx)
	sidt	16(%rbx)

	leaq	1f(%rip), %rbp

	/*
	 * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
	 * and IDT that was installed when the kernel started executing. The
	 * pointers were saved at the EFI stub entry point in head_64.S.
	 * Switch to IDT and GDT with 32-bit segments. These are the firmware
	 * GDT and IDT that were installed when the kernel started executing.
	 * The pointers were saved by the efi32_entry() routine below.
	 *
	 * Pass the saved DS selector to the 32-bit code, and use far return to
	 * restore the saved CS selector.
	 */
	leaq	efi32_boot_idt(%rip), %rax
	lidt	(%rax)
	leaq	efi32_boot_gdt(%rip), %rax
	lgdt	(%rax)
	lidt	efi32_boot_idt(%rip)
	lgdt	efi32_boot_gdt(%rip)

	movzwl	efi32_boot_ds(%rip), %edx
	movzwq	efi32_boot_cs(%rip), %rax
@@ -138,9 +177,7 @@ SYM_FUNC_START_LOCAL(efi_enter32)
	 */
	cli

	lidtl	(%ebx)
	subl	$16, %ebx

	lidtl	16(%ebx)
	lgdtl	(%ebx)

	movl	%cr4, %eax
@@ -168,22 +205,141 @@ SYM_FUNC_START_LOCAL(efi_enter32)
	lret
SYM_FUNC_END(efi_enter32)

/*
 * This is the common EFI stub entry point for mixed mode.
 *
 * Arguments:	%ecx	image handle
 * 		%edx	EFI system table pointer
 *		%esi	struct bootparams pointer (or NULL when not using
 *			the EFI handover protocol)
 *
 * Since this is the point of no return for ordinary execution, no registers
 * are considered live except for the function parameters. [Note that the EFI
 * stub may still exit and return to the firmware using the Exit() EFI boot
 * service.]
 */
SYM_FUNC_START(efi32_entry)
	call	1f
1:	pop	%ebx

	/* Save firmware GDTR and code/data selectors */
	sgdtl	(efi32_boot_gdt - 1b)(%ebx)
	movw	%cs, (efi32_boot_cs - 1b)(%ebx)
	movw	%ds, (efi32_boot_ds - 1b)(%ebx)

	/* Store firmware IDT descriptor */
	sidtl	(efi32_boot_idt - 1b)(%ebx)

	/* Store boot arguments */
	leal	(efi32_boot_args - 1b)(%ebx), %ebx
	movl	%ecx, 0(%ebx)
	movl	%edx, 4(%ebx)
	movl	%esi, 8(%ebx)
	movb	$0x0, 12(%ebx)          // efi_is64

	/* Disable paging */
	movl	%cr0, %eax
	btrl	$X86_CR0_PG_BIT, %eax
	movl	%eax, %cr0

	jmp	startup_32
SYM_FUNC_END(efi32_entry)

#define ST32_boottime		60 // offsetof(efi_system_table_32_t, boottime)
#define BS32_handle_protocol	88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base		32 // offsetof(efi_loaded_image_32_t, image_base)

/*
 * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
 *			       efi_system_table_32_t *sys_table)
 */
SYM_FUNC_START(efi32_pe_entry)
	pushl	%ebp
	movl	%esp, %ebp
	pushl	%eax				// dummy push to allocate loaded_image

	pushl	%ebx				// save callee-save registers
	pushl	%edi

	call	verify_cpu			// check for long mode support
	testl	%eax, %eax
	movl	$0x80000003, %eax		// EFI_UNSUPPORTED
	jnz	2f

	call	1f
1:	pop	%ebx

	/* Get the loaded image protocol pointer from the image handle */
	leal	-4(%ebp), %eax
	pushl	%eax				// &loaded_image
	leal	(loaded_image_proto - 1b)(%ebx), %eax
	pushl	%eax				// pass the GUID address
	pushl	8(%ebp)				// pass the image handle

	/*
	 * Note the alignment of the stack frame.
	 *   sys_table
	 *   handle             <-- 16-byte aligned on entry by ABI
	 *   return address
	 *   frame pointer
	 *   loaded_image       <-- local variable
	 *   saved %ebx		<-- 16-byte aligned here
	 *   saved %edi
	 *   &loaded_image
	 *   &loaded_image_proto
	 *   handle             <-- 16-byte aligned for call to handle_protocol
	 */

	movl	12(%ebp), %eax			// sys_table
	movl	ST32_boottime(%eax), %eax	// sys_table->boottime
	call	*BS32_handle_protocol(%eax)	// sys_table->boottime->handle_protocol
	addl	$12, %esp			// restore argument space
	testl	%eax, %eax
	jnz	2f

	movl	8(%ebp), %ecx			// image_handle
	movl	12(%ebp), %edx			// sys_table
	movl	-4(%ebp), %esi			// loaded_image
	movl	LI32_image_base(%esi), %esi	// loaded_image->image_base
	leal	(startup_32 - 1b)(%ebx), %ebp	// runtime address of startup_32
	/*
	 * We need to set the image_offset variable here since startup_32() will
	 * use it before we get to the 64-bit efi_pe_entry() in C code.
	 */
	subl	%esi, %ebp			// calculate image_offset
	movl	%ebp, (image_offset - 1b)(%ebx)	// save image_offset
	xorl	%esi, %esi
	jmp	efi32_entry			// pass %ecx, %edx, %esi
						// no other registers remain live

2:	popl	%edi				// restore callee-save registers
	popl	%ebx
	leave
	RET
SYM_FUNC_END(efi32_pe_entry)

	.section ".rodata"
	/* EFI loaded image protocol GUID */
	.balign 4
SYM_DATA_START_LOCAL(loaded_image_proto)
	.long	0x5b1b31a1
	.word	0x9562, 0x11d2
	.byte	0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
SYM_DATA_END(loaded_image_proto)

	.data
	.balign	8
SYM_DATA_START(efi32_boot_gdt)
SYM_DATA_START_LOCAL(efi32_boot_gdt)
	.word	0
	.quad	0
SYM_DATA_END(efi32_boot_gdt)

SYM_DATA_START(efi32_boot_idt)
SYM_DATA_START_LOCAL(efi32_boot_idt)
	.word	0
	.quad	0
SYM_DATA_END(efi32_boot_idt)

SYM_DATA_START(efi32_boot_cs)
	.word	0
SYM_DATA_END(efi32_boot_cs)

SYM_DATA_START(efi32_boot_ds)
	.word	0
SYM_DATA_END(efi32_boot_ds)
SYM_DATA_LOCAL(efi32_boot_cs, .word 0)
SYM_DATA_LOCAL(efi32_boot_ds, .word 0)
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)
+0 −4
Original line number Diff line number Diff line
@@ -208,10 +208,6 @@ SYM_DATA_START_LOCAL(gdt)
	.quad	0x00cf92000000ffff	/* __KERNEL_DS */
SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)

#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif

/*
 * Stack and heap for uncompression
 */
+18 −285
Original line number Diff line number Diff line
@@ -118,7 +118,9 @@ SYM_FUNC_START(startup_32)
1:

	/* Setup Exception handling for SEV-ES */
#ifdef CONFIG_AMD_MEM_ENCRYPT
	call	startup32_load_idt
#endif

	/* Make sure cpu supports long mode. */
	call	verify_cpu
@@ -178,12 +180,13 @@ SYM_FUNC_START(startup_32)
  */
	/*
	 * If SEV is active then set the encryption mask in the page tables.
	 * This will insure that when the kernel is copied and decompressed
	 * This will ensure that when the kernel is copied and decompressed
	 * it will be done so encrypted.
	 */
	call	get_sev_encryption_bit
	xorl	%edx, %edx
#ifdef	CONFIG_AMD_MEM_ENCRYPT
	call	get_sev_encryption_bit
	xorl	%edx, %edx
	testl	%eax, %eax
	jz	1f
	subl	$32, %eax	/* Encryption bit is always above bit 31 */
@@ -249,6 +252,11 @@ SYM_FUNC_START(startup_32)
	movl    $__BOOT_TSS, %eax
	ltr	%ax

#ifdef CONFIG_AMD_MEM_ENCRYPT
	/* Check if the C-bit position is correct when SEV is active */
	call	startup32_check_sev_cbit
#endif

	/*
	 * Setup for the jump to 64bit mode
	 *
@@ -261,29 +269,11 @@ SYM_FUNC_START(startup_32)
	 */
	leal	rva(startup_64)(%ebp), %eax
#ifdef CONFIG_EFI_MIXED
	movl	rva(efi32_boot_args)(%ebp), %edi
	testl	%edi, %edi
	jz	1f
	leal	rva(efi64_stub_entry)(%ebp), %eax
	movl	rva(efi32_boot_args+4)(%ebp), %esi
	movl	rva(efi32_boot_args+8)(%ebp), %edx	// saved bootparams pointer
	testl	%edx, %edx
	jnz	1f
	/*
	 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
	 * shadow space on the stack even if all arguments are passed in
	 * registers. We also need an additional 8 bytes for the space that
	 * would be occupied by the return address, and this also results in
	 * the correct stack alignment for entry.
	 */
	subl	$40, %esp
	leal	rva(efi_pe_entry)(%ebp), %eax
	movl	%edi, %ecx			// MS calling convention
	movl	%esi, %edx
	cmpb	$1, rva(efi_is64)(%ebp)
	je	1f
	leal	rva(startup_64_mixed_mode)(%ebp), %eax
1:
#endif
	/* Check if the C-bit position is correct when SEV is active */
	call	startup32_check_sev_cbit

	pushl	$__KERNEL_CS
	pushl	%eax
@@ -296,38 +286,14 @@ SYM_FUNC_START(startup_32)
	lret
SYM_FUNC_END(startup_32)

#ifdef CONFIG_EFI_MIXED
#if IS_ENABLED(CONFIG_EFI_MIXED) && IS_ENABLED(CONFIG_EFI_HANDOVER_PROTOCOL)
	.org 0x190
SYM_FUNC_START(efi32_stub_entry)
	add	$0x4, %esp		/* Discard return address */
	popl	%ecx
	popl	%edx
	popl	%esi

	call	1f
1:	pop	%ebp
	subl	$ rva(1b), %ebp

	movl	%esi, rva(efi32_boot_args+8)(%ebp)
SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
	movl	%ecx, rva(efi32_boot_args)(%ebp)
	movl	%edx, rva(efi32_boot_args+4)(%ebp)
	movb	$0, rva(efi_is64)(%ebp)

	/* Save firmware GDTR and code/data selectors */
	sgdtl	rva(efi32_boot_gdt)(%ebp)
	movw	%cs, rva(efi32_boot_cs)(%ebp)
	movw	%ds, rva(efi32_boot_ds)(%ebp)

	/* Store firmware IDT descriptor */
	sidtl	rva(efi32_boot_idt)(%ebp)

	/* Disable paging */
	movl	%cr0, %eax
	btrl	$X86_CR0_PG_BIT, %eax
	movl	%eax, %cr0

	jmp	startup_32
	jmp	efi32_entry
SYM_FUNC_END(efi32_stub_entry)
#endif

@@ -550,7 +516,9 @@ trampoline_return:
SYM_CODE_END(startup_64)

#ifdef CONFIG_EFI_STUB
#ifdef CONFIG_EFI_HANDOVER_PROTOCOL
	.org 0x390
#endif
SYM_FUNC_START(efi64_stub_entry)
	and	$~0xf, %rsp			/* realign the stack */
	movq	%rdx, %rbx			/* save boot_params pointer */
@@ -713,6 +681,7 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
	jmp     1b
SYM_FUNC_END(.Lno_longmode)

	.globl	verify_cpu
#include "../../kernel/verify_cpu.S"

	.data
@@ -744,242 +713,6 @@ SYM_DATA_START(boot_idt)
	.endr
SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)

#ifdef CONFIG_AMD_MEM_ENCRYPT
SYM_DATA_START(boot32_idt_desc)
	.word   boot32_idt_end - boot32_idt - 1
	.long   0
SYM_DATA_END(boot32_idt_desc)
	.balign 8
SYM_DATA_START(boot32_idt)
	.rept 32
	.quad 0
	.endr
SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
#endif

#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
#ifdef CONFIG_EFI_MIXED
SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
SYM_DATA(efi_is64, .byte 1)

#define ST32_boottime		60 // offsetof(efi_system_table_32_t, boottime)
#define BS32_handle_protocol	88 // offsetof(efi_boot_services_32_t, handle_protocol)
#define LI32_image_base		32 // offsetof(efi_loaded_image_32_t, image_base)

	__HEAD
	.code32
SYM_FUNC_START(efi32_pe_entry)
/*
 * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
 *			       efi_system_table_32_t *sys_table)
 */

	pushl	%ebp
	movl	%esp, %ebp
	pushl	%eax				// dummy push to allocate loaded_image

	pushl	%ebx				// save callee-save registers
	pushl	%edi

	call	verify_cpu			// check for long mode support
	testl	%eax, %eax
	movl	$0x80000003, %eax		// EFI_UNSUPPORTED
	jnz	2f

	call	1f
1:	pop	%ebx
	subl	$ rva(1b), %ebx

	/* Get the loaded image protocol pointer from the image handle */
	leal	-4(%ebp), %eax
	pushl	%eax				// &loaded_image
	leal	rva(loaded_image_proto)(%ebx), %eax
	pushl	%eax				// pass the GUID address
	pushl	8(%ebp)				// pass the image handle

	/*
	 * Note the alignment of the stack frame.
	 *   sys_table
	 *   handle             <-- 16-byte aligned on entry by ABI
	 *   return address
	 *   frame pointer
	 *   loaded_image       <-- local variable
	 *   saved %ebx		<-- 16-byte aligned here
	 *   saved %edi
	 *   &loaded_image
	 *   &loaded_image_proto
	 *   handle             <-- 16-byte aligned for call to handle_protocol
	 */

	movl	12(%ebp), %eax			// sys_table
	movl	ST32_boottime(%eax), %eax	// sys_table->boottime
	call	*BS32_handle_protocol(%eax)	// sys_table->boottime->handle_protocol
	addl	$12, %esp			// restore argument space
	testl	%eax, %eax
	jnz	2f

	movl	8(%ebp), %ecx			// image_handle
	movl	12(%ebp), %edx			// sys_table
	movl	-4(%ebp), %esi			// loaded_image
	movl	LI32_image_base(%esi), %esi	// loaded_image->image_base
	movl	%ebx, %ebp			// startup_32 for efi32_pe_stub_entry
	/*
	 * We need to set the image_offset variable here since startup_32() will
	 * use it before we get to the 64-bit efi_pe_entry() in C code.
	 */
	subl	%esi, %ebx
	movl	%ebx, rva(image_offset)(%ebp)	// save image_offset
	jmp	efi32_pe_stub_entry

2:	popl	%edi				// restore callee-save registers
	popl	%ebx
	leave
	RET
SYM_FUNC_END(efi32_pe_entry)

	.section ".rodata"
	/* EFI loaded image protocol GUID */
	.balign 4
SYM_DATA_START_LOCAL(loaded_image_proto)
	.long	0x5b1b31a1
	.word	0x9562, 0x11d2
	.byte	0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
SYM_DATA_END(loaded_image_proto)
#endif

#ifdef CONFIG_AMD_MEM_ENCRYPT
	__HEAD
	.code32
/*
 * Write an IDT entry into boot32_idt
 *
 * Parameters:
 *
 * %eax:	Handler address
 * %edx:	Vector number
 *
 * Physical offset is expected in %ebp
 */
SYM_FUNC_START(startup32_set_idt_entry)
	push    %ebx
	push    %ecx

	/* IDT entry address to %ebx */
	leal    rva(boot32_idt)(%ebp), %ebx
	shl	$3, %edx
	addl    %edx, %ebx

	/* Build IDT entry, lower 4 bytes */
	movl    %eax, %edx
	andl    $0x0000ffff, %edx	# Target code segment offset [15:0]
	movl    $__KERNEL32_CS, %ecx	# Target code segment selector
	shl     $16, %ecx
	orl     %ecx, %edx

	/* Store lower 4 bytes to IDT */
	movl    %edx, (%ebx)

	/* Build IDT entry, upper 4 bytes */
	movl    %eax, %edx
	andl    $0xffff0000, %edx	# Target code segment offset [31:16]
	orl     $0x00008e00, %edx	# Present, Type 32-bit Interrupt Gate

	/* Store upper 4 bytes to IDT */
	movl    %edx, 4(%ebx)

	pop     %ecx
	pop     %ebx
	RET
SYM_FUNC_END(startup32_set_idt_entry)
#endif

SYM_FUNC_START(startup32_load_idt)
#ifdef CONFIG_AMD_MEM_ENCRYPT
	/* #VC handler */
	leal    rva(startup32_vc_handler)(%ebp), %eax
	movl    $X86_TRAP_VC, %edx
	call    startup32_set_idt_entry

	/* Load IDT */
	leal	rva(boot32_idt)(%ebp), %eax
	movl	%eax, rva(boot32_idt_desc+2)(%ebp)
	lidt    rva(boot32_idt_desc)(%ebp)
#endif
	RET
SYM_FUNC_END(startup32_load_idt)

/*
 * Check for the correct C-bit position when the startup_32 boot-path is used.
 *
 * The check makes use of the fact that all memory is encrypted when paging is
 * disabled. The function creates 64 bits of random data using the RDRAND
 * instruction. RDRAND is mandatory for SEV guests, so always available. If the
 * hypervisor violates that the kernel will crash right here.
 *
 * The 64 bits of random data are stored to a memory location and at the same
 * time kept in the %eax and %ebx registers. Since encryption is always active
 * when paging is off the random data will be stored encrypted in main memory.
 *
 * Then paging is enabled. When the C-bit position is correct all memory is
 * still mapped encrypted and comparing the register values with memory will
 * succeed. An incorrect C-bit position will map all memory unencrypted, so that
 * the compare will use the encrypted random data and fail.
 */
SYM_FUNC_START(startup32_check_sev_cbit)
#ifdef CONFIG_AMD_MEM_ENCRYPT
	pushl	%eax
	pushl	%ebx
	pushl	%ecx
	pushl	%edx

	/* Check for non-zero sev_status */
	movl	rva(sev_status)(%ebp), %eax
	testl	%eax, %eax
	jz	4f

	/*
	 * Get two 32-bit random values - Don't bail out if RDRAND fails
	 * because it is better to prevent forward progress if no random value
	 * can be gathered.
	 */
1:	rdrand	%eax
	jnc	1b
2:	rdrand	%ebx
	jnc	2b

	/* Store to memory and keep it in the registers */
	movl	%eax, rva(sev_check_data)(%ebp)
	movl	%ebx, rva(sev_check_data+4)(%ebp)

	/* Enable paging to see if encryption is active */
	movl	%cr0, %edx			 /* Backup %cr0 in %edx */
	movl	$(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
	movl	%ecx, %cr0

	cmpl	%eax, rva(sev_check_data)(%ebp)
	jne	3f
	cmpl	%ebx, rva(sev_check_data+4)(%ebp)
	jne	3f

	movl	%edx, %cr0	/* Restore previous %cr0 */

	jmp	4f

3:	/* Check failed - hlt the machine */
	hlt
	jmp	3b

4:
	popl	%edx
	popl	%ecx
	popl	%ebx
	popl	%eax
#endif
	RET
SYM_FUNC_END(startup32_check_sev_cbit)

/*
 * Stack and heap for uncompression
 */
Loading