Commit dbd82fee authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Will Deacon
Browse files

arm64: kexec: arm64_relocate_new_kernel clean-ups and optimizations



In preparation to bigger changes to arm64_relocate_new_kernel that would
enable this function to do MMU backed memory copy, do few clean-ups and
optimizations. These include:

1. Call raw_dcache_line_size()  only when relocation is actually going to
   happen. i.e. kdump type kexec, does not need it.

2.  copy_page(dest, src, tmps...) increments dest and src by PAGE_SIZE, so
    no need to store dest prior to calling copy_page and increment it
    after. Also, src is not used after a copy, not need to copy either.

3. For consistency use comment on the same line with instruction when it
   describes the instruction itself.

4. Some comment corrections

Signed-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Link: https://lore.kernel.org/r/20210125191923.1060122-12-pasha.tatashin@soleen.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 77a43be1
Loading
Loading
Loading
Loading
+8 −28
Original line number Diff line number Diff line
@@ -17,28 +17,24 @@
/*
 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
 *
 * The memory that the old kernel occupies may be overwritten when coping the
 * The memory that the old kernel occupies may be overwritten when copying the
 * new image to its final location.  To assure that the
 * arm64_relocate_new_kernel routine which does that copy is not overwritten,
 * all code and data needed by arm64_relocate_new_kernel must be between the
 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end.  The
 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
 * control_code_page, a special page which has been set up to be preserved
 * during the copy operation.
 * safe memory that has been set up to be preserved during the copy operation.
 */
SYM_CODE_START(arm64_relocate_new_kernel)

	/* Setup the list loop variables. */
	mov	x18, x2				/* x18 = dtb address */
	mov	x17, x1				/* x17 = kimage_start */
	mov	x16, x0				/* x16 = kimage_head */
	raw_dcache_line_size x15, x0		/* x15 = dcache line size */
	mov	x14, xzr			/* x14 = entry ptr */
	mov	x13, xzr			/* x13 = copy dest */

	/* Check if the new image needs relocation. */
	tbnz	x16, IND_DONE_BIT, .Ldone

	raw_dcache_line_size x15, x0		/* x15 = dcache line size */
.Lloop:
	and	x12, x16, PAGE_MASK		/* x12 = addr */

@@ -57,34 +53,18 @@ SYM_CODE_START(arm64_relocate_new_kernel)
	b.lo    2b
	dsb     sy

	mov x20, x13
	mov x21, x12
	copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7

	/* dest += PAGE_SIZE */
	add	x13, x13, PAGE_SIZE
	copy_page x13, x12, x0, x1, x2, x3, x4, x5, x6, x7
	b	.Lnext

.Ltest_indirection:
	tbz	x16, IND_INDIRECTION_BIT, .Ltest_destination

	/* ptr = addr */
	mov	x14, x12
	mov	x14, x12			/* ptr = addr */
	b	.Lnext

.Ltest_destination:
	tbz	x16, IND_DESTINATION_BIT, .Lnext

	/* dest = addr */
	mov	x13, x12

	mov	x13, x12			/* dest = addr */
.Lnext:
	/* entry = *ptr++ */
	ldr	x16, [x14], #8

	/* while (!(entry & DONE)) */
	tbz	x16, IND_DONE_BIT, .Lloop

	ldr	x16, [x14], #8			/* entry = *ptr++ */
	tbz	x16, IND_DONE_BIT, .Lloop	/* while (!(entry & DONE)) */
.Ldone:
	/* wait for writes from copy_page to finish */
	dsb	nsh