Commit 2f2bbaa4 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon
Browse files

arm64: entry: organise entry handlers consistently



In entry.S we have two comments which distinguish EL0 and EL1 exception
handlers, but the code isn't actually laid out to match, and there are a
few other inconsistencies that would be good to clear up.

This patch organizes the entry handers consistently:

* The handlers are laid out in order of the vectors, to make them easier
  to navigate.

* The inconsistently-applied alignment is removed

* The handlers are consistently marked with SYM_CODE_START_LOCAL()
  rather than SYM_CODE_START_LOCAL_NOALIGN(), giving them the same
  default alignment as other assembly code snippets.

There should be no functional change as a result of this patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarJoey Gouly <joey.gouly@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-9-mark.rutland@arm.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 064dbfb4
Loading
Loading
Loading
Loading
+36 −42
Original line number Diff line number Diff line
@@ -607,65 +607,88 @@ SYM_CODE_END(el1_error_invalid)
/*
 * EL1 mode handlers.
 */
	.align	6
SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
SYM_CODE_START_LOCAL(el1_sync)
	kernel_entry 1
	mov	x0, sp
	bl	el1_sync_handler
	kernel_exit 1
SYM_CODE_END(el1_sync)

	.align	6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
SYM_CODE_START_LOCAL(el1_irq)
	kernel_entry 1
	mov	x0, sp
	bl	el1_irq_handler
	kernel_exit 1
SYM_CODE_END(el1_irq)

SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
SYM_CODE_START_LOCAL(el1_fiq)
	kernel_entry 1
	mov	x0, sp
	bl	el1_fiq_handler
	kernel_exit 1
SYM_CODE_END(el1_fiq)

SYM_CODE_START_LOCAL(el1_error)
	kernel_entry 1
	mov	x0, sp
	bl	el1_error_handler
	kernel_exit 1
SYM_CODE_END(el1_error)

/*
 * EL0 mode handlers.
 */
	.align	6
SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
SYM_CODE_START_LOCAL(el0_sync)
	kernel_entry 0
	mov	x0, sp
	bl	el0_sync_handler
	b	ret_to_user
SYM_CODE_END(el0_sync)

SYM_CODE_START_LOCAL(el0_irq)
	kernel_entry 0
	mov	x0, sp
	bl	el0_irq_handler
	b	ret_to_user
SYM_CODE_END(el0_irq)

SYM_CODE_START_LOCAL(el0_fiq)
	kernel_entry 0
	mov	x0, sp
	bl	el0_fiq_handler
	b	ret_to_user
SYM_CODE_END(el0_fiq)

SYM_CODE_START_LOCAL(el0_error)
	kernel_entry 0
	mov	x0, sp
	bl	el0_error_handler
	b	ret_to_user
SYM_CODE_END(el0_error)

#ifdef CONFIG_COMPAT
	.align	6
SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
SYM_CODE_START_LOCAL(el0_sync_compat)
	kernel_entry 0, 32
	mov	x0, sp
	bl	el0_sync_compat_handler
	b	ret_to_user
SYM_CODE_END(el0_sync_compat)

	.align	6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
SYM_CODE_START_LOCAL(el0_irq_compat)
	kernel_entry 0, 32
	mov	x0, sp
	bl	el0_irq_compat_handler
	b	ret_to_user
SYM_CODE_END(el0_irq_compat)

SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
SYM_CODE_START_LOCAL(el0_fiq_compat)
	kernel_entry 0, 32
	mov	x0, sp
	bl	el0_fiq_compat_handler
	b	ret_to_user
SYM_CODE_END(el0_fiq_compat)

SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
SYM_CODE_START_LOCAL(el0_error_compat)
	kernel_entry 0, 32
	mov	x0, sp
	bl	el0_error_compat_handler
@@ -673,35 +696,6 @@ SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
SYM_CODE_END(el0_error_compat)
#endif

	.align	6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
	kernel_entry 0
	mov	x0, sp
	bl	el0_irq_handler
	b	ret_to_user
SYM_CODE_END(el0_irq)

SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
	kernel_entry 0
	mov	x0, sp
	bl	el0_fiq_handler
	b	ret_to_user
SYM_CODE_END(el0_fiq)

SYM_CODE_START_LOCAL(el1_error)
	kernel_entry 1
	mov	x0, sp
	bl	el1_error_handler
	kernel_exit 1
SYM_CODE_END(el1_error)

SYM_CODE_START_LOCAL(el0_error)
	kernel_entry 0
	mov	x0, sp
	bl	el0_error_handler
	b	ret_to_user
SYM_CODE_END(el0_error)

/*
 * "slow" syscall return path.
 */