Commit 53645b5c authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Zheng Zengkai
Browse files

ARM: kernel: implement randomization of the kernel load address

maillist inclusion
commit c11744cd7b351b0fbc5233c04c32822544c96fc1
category: feature
feature: ARM kaslr support
bugzilla: 47952
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git/commit/?h=arm-kaslr-latest&id=c11744cd7b351b0fbc5233c04c32822544c96fc1



Update the Kconfig RANDOMIZE_BASE depends on !JUMP_LABEL to resolve
compilation conflicts between fpic and JUMP_LABEL.

-------------------------------------------------

This implements randomization of the placement of the kernel image
inside the lowmem region. It is intended to work together with the
decompressor to place the kernel at an offset in physical memory
that is a multiple of 2 MB, and to take the same offset into account
when creating the virtual mapping.

This uses runtime relocation of the kernel built as a PIE binary, to
fix up all absolute symbol references to refer to their runtime virtual
address. The physical-to-virtual mapping remains unchanged.

In order to allow the decompressor to hand over to the core kernel
without making assumptions that are not guaranteed to hold when
invoking the core kernel directly using bootloaders that are not
KASLR aware, the KASLR offset is expected to be placed in r3 when
entering the kernel 4 bytes past the entry point, skipping the first
instruction.

Cc: Russell King <linux@armlinux.org.uk>
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarCui GaoSheng <cuigaosheng1@huawei.com>
Reviewed-by: default avatarXiu Jianfeng <xiujianfeng@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent fc08a5d5
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -1683,6 +1683,18 @@ config RELOCATABLE
	depends on !XIP_KERNEL && !JUMP_LABEL
	select HAVE_ARCH_PREL32_RELOCATIONS

config RANDOMIZE_BASE
	bool "Randomize the address of the kernel image"
	depends on MMU && AUTO_ZRELADDR
	depends on !XIP_KERNEL && !ZBOOT_ROM && !JUMP_LABEL
	select RELOCATABLE
	select ARM_MODULE_PLTS if MODULES
	select MODULE_REL_CRCS if MODVERSIONS
	help
	  Randomizes the virtual and physical address at which the kernel
	  image is loaded, as a security feature that deters exploit attempts
	  relying on knowledge of the location of kernel internals.

endmenu

menu "Boot options"
+93 −9
Original line number Diff line number Diff line
@@ -45,6 +45,28 @@
#define PMD_ORDER	2
#endif

	.macro	get_kaslr_offset, reg
#ifdef CONFIG_RANDOMIZE_BASE
	ldr_l			\reg, __kaslr_offset
#else
	mov			\reg, #0
#endif
	.endm

	.macro	add_kaslr_offset, reg, tmp
#ifdef CONFIG_RANDOMIZE_BASE
	get_kaslr_offset	\tmp
	add			\reg, \reg, \tmp
#endif
	.endm

	.macro	sub_kaslr_offset, reg, tmp
#ifdef CONFIG_RANDOMIZE_BASE
	get_kaslr_offset	\tmp
	sub			\reg, \reg, \tmp
#endif
	.endm

/*
 * Kernel startup entry point.
 * ---------------------------
@@ -70,6 +92,7 @@
	.equ	swapper_pg_dir, . - PG_DIR_SIZE

ENTRY(stext)
	mov	r3, #0			@ normal entry point - clear r3
 ARM_BE8(setend	be )			@ ensure we are in BE8 mode

 THUMB(	badr	r9, 1f		)	@ Kernel is always entered in ARM.
@@ -77,6 +100,16 @@ ENTRY(stext)
 THUMB(	.thumb			)	@ switch to Thumb now.
 THUMB(1:			)

#ifdef CONFIG_RANDOMIZE_BASE
	str_l	r3, __kaslr_offset, r9	@ offset in r3 if entered via kaslr ep

	.section ".bss", "aw", %nobits
	.align	2
__kaslr_offset:
	.long	0			@ will be wiped before entering C code
	.previous
#endif

#ifdef CONFIG_ARM_VIRT_EXT
	bl	__hyp_stub_install
#endif
@@ -100,6 +133,7 @@ ENTRY(stext)
#ifndef CONFIG_XIP_KERNEL
	adr_l	r8, _text			@ __pa(_text)
	sub	r8, r8, #TEXT_OFFSET		@ PHYS_OFFSET
	sub_kaslr_offset r8, r12
#else
	ldr	r8, =PLAT_PHYS_OFFSET		@ always constant in this case
#endif
@@ -136,8 +170,8 @@ ENTRY(stext)
	 * r0 will hold the CPU control register value, r1, r2, r4, and
	 * r9 will be preserved.  r5 will also be preserved if LPAE.
	 */
	ldr	r13, =__mmap_switched		@ address to jump to after
						@ mmu has been enabled
	adr_l	lr, __primary_switch		@ address to jump to after
	mov	r13, lr				@ mmu has been enabled
	badr	lr, 1f				@ return (PIC) address
#ifdef CONFIG_ARM_LPAE
	mov	r5, #0				@ high TTBR0
@@ -148,7 +182,8 @@ ENTRY(stext)
	ldr	r12, [r10, #PROCINFO_INITFUNC]
	add	r12, r12, r10
	ret	r12
1:	b	__enable_mmu
1:	get_kaslr_offset r12			@ get before turning MMU on
	b	__enable_mmu
ENDPROC(stext)
	.ltorg

@@ -227,9 +262,14 @@ __create_page_tables:
	/*
	 * Map our RAM from the start to the end of the kernel .bss section.
	 */
	add	r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
	ldr	r6, =(_end - 1)
	orr	r3, r8, r7
	get_kaslr_offset r3
	add	r0, r3, #PAGE_OFFSET
	add	r0, r4, r0, lsr #(SECTION_SHIFT - PMD_ORDER)
	adr_l	r6, _end - 1
	sub	r6, r6, r8
	add	r6, r6, #PAGE_OFFSET
	add	r3, r3, r8
	orr	r3, r3, r7
	add	r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1:	str	r3, [r0], #1 << PMD_ORDER
	add	r3, r3, #1 << SECTION_SHIFT
@@ -372,7 +412,7 @@ ENTRY(secondary_startup)
	 * Use the page tables supplied from  __cpu_up.
	 */
	adr_l	r3, secondary_data
	mov_l	r12, __secondary_switched
	mov_l	r12, __secondary_switch
	ldrd	r4, r5, [r3, #0]		@ get secondary_data.pgdir
ARM_BE8(eor	r4, r4, r5)			@ Swap r5 and r4 in BE:
ARM_BE8(eor	r5, r4, r5)			@ it can be done in 3 steps
@@ -410,6 +450,7 @@ ENDPROC(__secondary_switched)
 *  r4  = TTBR pointer (low word)
 *  r5  = TTBR pointer (high word if LPAE)
 *  r9  = processor ID
 *  r12 = KASLR offset
 *  r13 = *virtual* address to jump to upon completion
 */
__enable_mmu:
@@ -451,6 +492,7 @@ ENDPROC(__enable_mmu)
 *  r1  = machine ID
 *  r2  = atags or dtb pointer
 *  r9  = processor ID
 *  r12 = KASLR offset
 *  r13 = *virtual* address to jump to upon completion
 *
 * other registers depend on the function called upon completion
@@ -466,10 +508,52 @@ ENTRY(__turn_mmu_on)
	mov	r3, r3
	mov	r3, r13
	ret	r3
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
	.popsection

__primary_switch:
#ifdef CONFIG_RELOCATABLE
	adr_l	r7, _text			@ r7 := __pa(_text)
	sub	r7, r7, #TEXT_OFFSET		@ r7 := PHYS_OFFSET

	adr_l	r5, __rel_begin
	adr_l	r6, __rel_end
	sub	r5, r5, r7
	sub	r6, r6, r7

	add	r5, r5, #PAGE_OFFSET
	add	r6, r6, #PAGE_OFFSET
	add	r5, r5, r12
	add	r6, r6, r12

	adr_l	r3, __stubs_start		@ __pa(__stubs_start)
	sub	r3, r3, r7			@ offset of __stubs_start
	add	r3, r3, #PAGE_OFFSET		@ __va(__stubs_start)
	sub	r3, r3, #0xffff1000		@ subtract VA of stubs section

0:	cmp	r5, r6
	bge	1f
	ldm	r5!, {r7, r8}			@ load next relocation entry
	cmp	r8, #23				@ R_ARM_RELATIVE
	bne	0b
	cmp	r7, #0xff000000			@ vector page?
	addgt	r7, r7, r3			@ fix up VA offset
	ldr	r8, [r7, r12]
	add	r8, r8, r12
	str	r8, [r7, r12]
	b	0b
1:
#endif
	ldr	pc, =__mmap_switched
ENDPROC(__primary_switch)

#ifdef CONFIG_SMP
__secondary_switch:
	ldr	pc, =__secondary_switched
ENDPROC(__secondary_switch)
#endif
	.ltorg
__turn_mmu_on_end:
	.popsection

#ifdef CONFIG_SMP_ON_UP
	__HEAD