Commit a13f2ef1 authored by Juergen Gross's avatar Juergen Gross
Browse files

x86/xen: remove 32-bit Xen PV guest support



Xen is requiring 64-bit machines today and since Xen 4.14 it can be
built without 32-bit PV guest support. There is no need to carry the
burden of 32-bit PV guest support in the kernel any longer, as new
guests can be either HVM or PVH, or they can use a 64 bit kernel.

Remove the 32-bit Xen PV support from the kernel.

Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarBoris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
parent d7b461ca
Loading
Loading
Loading
Loading
+2 −107
Original line number Diff line number Diff line
@@ -449,8 +449,6 @@

.macro SWITCH_TO_KERNEL_STACK

	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV

	BUG_IF_WRONG_CR3

	SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
@@ -599,8 +597,6 @@
 */
.macro SWITCH_TO_ENTRY_STACK

	ALTERNATIVE     "", "jmp .Lend_\@", X86_FEATURE_XENPV

	/* Bytes to copy */
	movl	$PTREGS_SIZE, %ecx

@@ -872,17 +868,6 @@ SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 * will ignore all of the single-step traps generated in this range.
 */

#ifdef CONFIG_XEN_PV
/*
 * Xen doesn't set %esp to be precisely what the normal SYSENTER
 * entry point expects, so fix it up before using the normal path.
 */
SYM_CODE_START(xen_sysenter_target)
	addl	$5*4, %esp			/* remove xen-provided frame */
	jmp	.Lsysenter_past_esp
SYM_CODE_END(xen_sysenter_target)
#endif

/*
 * 32-bit SYSENTER entry.
 *
@@ -965,9 +950,8 @@ SYM_FUNC_START(entry_SYSENTER_32)

	movl	%esp, %eax
	call	do_SYSENTER_32
	/* XEN PV guests always use IRET path */
	ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
		    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
	testl	%eax, %eax
	jz	.Lsyscall_32_done

	STACKLEAK_ERASE

@@ -1165,95 +1149,6 @@ SYM_FUNC_END(entry_INT80_32)
#endif
.endm

#ifdef CONFIG_PARAVIRT
SYM_CODE_START(native_iret)
	iret
	_ASM_EXTABLE(native_iret, asm_iret_error)
SYM_CODE_END(native_iret)
#endif

#ifdef CONFIG_XEN_PV
/*
 * See comment in entry_64.S for further explanation
 *
 * Note: This is not an actual IDT entry point. It's a XEN specific entry
 * point and therefore named to match the 64-bit trampoline counterpart.
 */
SYM_FUNC_START(xen_asm_exc_xen_hypervisor_callback)
	/*
	 * Check to see if we got the event in the critical
	 * region in xen_iret_direct, after we've reenabled
	 * events and checked for pending events.  This simulates
	 * iret instruction's behaviour where it delivers a
	 * pending interrupt when enabling interrupts:
	 */
	cmpl	$xen_iret_start_crit, (%esp)
	jb	1f
	cmpl	$xen_iret_end_crit, (%esp)
	jae	1f
	call	xen_iret_crit_fixup
1:
	pushl	$-1				/* orig_ax = -1 => not a system call */
	SAVE_ALL
	ENCODE_FRAME_POINTER

	mov	%esp, %eax
	call	xen_pv_evtchn_do_upcall
	jmp	handle_exception_return
SYM_FUNC_END(xen_asm_exc_xen_hypervisor_callback)

/*
 * Hypervisor uses this for application faults while it executes.
 * We get here for two reasons:
 *  1. Fault while reloading DS, ES, FS or GS
 *  2. Fault while executing IRET
 * Category 1 we fix up by reattempting the load, and zeroing the segment
 * register if the load fails.
 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
 * normal Linux return path in this case because if we use the IRET hypercall
 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
 * We distinguish between categories by maintaining a status value in EAX.
 */
SYM_FUNC_START(xen_failsafe_callback)
	pushl	%eax
	movl	$1, %eax
1:	mov	4(%esp), %ds
2:	mov	8(%esp), %es
3:	mov	12(%esp), %fs
4:	mov	16(%esp), %gs
	/* EAX == 0 => Category 1 (Bad segment)
	   EAX != 0 => Category 2 (Bad IRET) */
	testl	%eax, %eax
	popl	%eax
	lea	16(%esp), %esp
	jz	5f
	jmp	asm_iret_error
5:	pushl	$-1				/* orig_ax = -1 => not a system call */
	SAVE_ALL
	ENCODE_FRAME_POINTER
	jmp	handle_exception_return

.section .fixup, "ax"
6:	xorl	%eax, %eax
	movl	%eax, 4(%esp)
	jmp	1b
7:	xorl	%eax, %eax
	movl	%eax, 8(%esp)
	jmp	2b
8:	xorl	%eax, %eax
	movl	%eax, 12(%esp)
	jmp	3b
9:	xorl	%eax, %eax
	movl	%eax, 16(%esp)
	jmp	4b
.previous
	_ASM_EXTABLE(1b, 6b)
	_ASM_EXTABLE(2b, 7b)
	_ASM_EXTABLE(3b, 8b)
	_ASM_EXTABLE(4b, 9b)
SYM_FUNC_END(xen_failsafe_callback)
#endif /* CONFIG_XEN_PV */

SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
	/* the function address is in %gs's slot on the stack */
	SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
+0 −30
Original line number Diff line number Diff line
@@ -16,33 +16,3 @@ ELFNOTE_START(Linux, 0, "a")
ELFNOTE_END

BUILD_SALT

#ifdef CONFIG_XEN
/*
 * Add a special note telling glibc's dynamic linker a fake hardware
 * flavor that it will use to choose the search path for libraries in the
 * same way it uses real hardware capabilities like "mmx".
 * We supply "nosegneg" as the fake capability, to indicate that we
 * do not like negative offsets in instructions using segment overrides,
 * since we implement those inefficiently.  This makes it possible to
 * install libraries optimized to avoid those access patterns in someplace
 * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
 * corresponding to the bits here is needed to make ldconfig work right.
 * It should contain:
 *	hwcap 1 nosegneg
 * to match the mapping of bit to name that we give here.
 *
 * At runtime, the fake hardware feature will be considered to be present
 * if its bit is set in the mask word.  So, we start with the mask 0, and
 * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
 */

#include "../../xen/vdso.h"	/* Defines VDSO_NOTE_NONEGSEG_BIT.  */

ELFNOTE_START(GNU, 2, "a")
	.long 1			/* ncaps */
VDSO32_NOTE_MASK:		/* Symbol used by arch/x86/xen/setup.c */
	.long 0			/* mask */
	.byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg"	/* bit, name */
ELFNOTE_END
#endif
+1 −1
Original line number Diff line number Diff line
@@ -25,7 +25,7 @@ void entry_SYSENTER_compat(void);
void __end_entry_SYSENTER_compat(void);
void entry_SYSCALL_compat(void);
void entry_INT80_compat(void);
#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
#ifdef CONFIG_XEN_PV
void xen_entry_INT80_compat(void);
#endif
#endif
+1 −1
Original line number Diff line number Diff line
@@ -301,7 +301,7 @@ static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node)
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
extern void early_ignore_irq(void);

#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
#ifdef CONFIG_XEN_PV
extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
#endif

+0 −31
Original line number Diff line number Diff line
@@ -134,38 +134,7 @@ SYM_CODE_START(startup_32)
	movl %eax,pa(initial_page_table+0xffc)
#endif

#ifdef CONFIG_PARAVIRT
	/* This is can only trip for a broken bootloader... */
	cmpw $0x207, pa(boot_params + BP_version)
	jb .Ldefault_entry

	/* Paravirt-compatible boot parameters.  Look to see what architecture
		we're booting under. */
	movl pa(boot_params + BP_hardware_subarch), %eax
	cmpl $num_subarch_entries, %eax
	jae .Lbad_subarch

	movl pa(subarch_entries)(,%eax,4), %eax
	subl $__PAGE_OFFSET, %eax
	jmp *%eax

.Lbad_subarch:
SYM_INNER_LABEL_ALIGN(xen_entry, SYM_L_WEAK)
	/* Unknown implementation; there's really
	   nothing we can do at this point. */
	ud2a

	__INITDATA

subarch_entries:
	.long .Ldefault_entry		/* normal x86/PC */
	.long xen_entry			/* Xen hypervisor */
	.long .Ldefault_entry		/* Moorestown MID */
num_subarch_entries = (. - subarch_entries) / 4
.previous
#else
	jmp .Ldefault_entry
#endif /* CONFIG_PARAVIRT */
SYM_CODE_END(startup_32)

#ifdef CONFIG_HOTPLUG_CPU
Loading