Commit 697977d8 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Borislav Petkov
Browse files

x86/kexec: Disable RET on kexec



All the invocations unroll to __x86_return_thunk and this file
must be PIC independent.

This fixes kexec on 64-bit AMD boxes.

  [ bp: Fix 32-bit build. ]

Reported-by: default avatarEdward Tran <edward.tran@oracle.com>
Reported-by: default avatarAwais Tanveer <awais.tanveer@oracle.com>
Suggested-by: default avatarAnkur Arora <ankur.a.arora@oracle.com>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
parent 2259da15
Loading
Loading
Loading
Loading
+19 −6
Original line number Diff line number Diff line
@@ -7,10 +7,12 @@
#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/kexec.h>
#include <asm/nospec-branch.h>
#include <asm/processor-flags.h>

/*
 * Must be relocatable PIC code callable as a C function
 * Must be relocatable PIC code callable as a C function, in particular
 * there must be a plain RET and not jump to return thunk.
 */

#define PTR(x) (x << 2)
@@ -91,7 +93,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
	movl    %edi, %eax
	addl    $(identity_mapped - relocate_kernel), %eax
	pushl   %eax
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(relocate_kernel)

SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@@ -159,12 +163,15 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
	xorl    %edx, %edx
	xorl    %esi, %esi
	xorl    %ebp, %ebp
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
1:
	popl	%edx
	movl	CP_PA_SWAP_PAGE(%edi), %esp
	addl	$PAGE_SIZE, %esp
2:
	ANNOTATE_RETPOLINE_SAFE
	call	*%edx

	/* get the re-entry point of the peer system */
@@ -190,7 +197,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
	movl	%edi, %eax
	addl	$(virtual_mapped - relocate_kernel), %eax
	pushl	%eax
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(identity_mapped)

SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@@ -208,7 +217,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
	popl	%edi
	popl	%esi
	popl	%ebx
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(virtual_mapped)

	/* Do the copies */
@@ -271,7 +282,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
	popl	%edi
	popl	%ebx
	popl	%ebp
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(swap_pages)

	.globl kexec_control_code_size
+17 −6
Original line number Diff line number Diff line
@@ -13,7 +13,8 @@
#include <asm/unwind_hints.h>

/*
 * Must be relocatable PIC code callable as a C function
 * Must be relocatable PIC code callable as a C function, in particular
 * there must be a plain RET and not jump to return thunk.
 */

#define PTR(x) (x << 3)
@@ -105,7 +106,9 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
	/* jump to identity mapped page */
	addq	$(identity_mapped - relocate_kernel), %r8
	pushq	%r8
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(relocate_kernel)

SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@@ -200,7 +203,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
	xorl	%r14d, %r14d
	xorl	%r15d, %r15d

	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3

1:
	popq	%rdx
@@ -219,7 +224,9 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
	call	swap_pages
	movq	$virtual_mapped, %rax
	pushq	%rax
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(identity_mapped)

SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@@ -241,7 +248,9 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
	popq	%r12
	popq	%rbp
	popq	%rbx
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(virtual_mapped)

	/* Do the copies */
@@ -298,7 +307,9 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
	lea	PAGE_SIZE(%rax), %rsi
	jmp	0b
3:
	RET
	ANNOTATE_UNRET_SAFE
	ret
	int3
SYM_CODE_END(swap_pages)

	.globl kexec_control_code_size