Commit 2b0e86cc authored by Jason Yan's avatar Jason Yan Committed by Michael Ellerman
Browse files

powerpc/fsl_booke/32: implement KASLR infrastructure



This patch add support to boot kernel from places other than KERNELBASE.
Since CONFIG_RELOCATABLE has already supported, what we need to do is
map or copy kernel to a proper place and relocate. Freescale Book-E
parts expect lowmem to be mapped by fixed TLB entries(TLB1). The TLB1
entries are not suitable to map the kernel directly in a randomized
region, so we chose to copy the kernel to a proper place and restart to
relocate.

The offset of the kernel was not randomized yet(a fixed 64M is set). We
will randomize it in the next patch.

Signed-off-by: default avatarJason Yan <yanaijie@huawei.com>
Tested-by: default avatarDiana Craciun <diana.craciun@nxp.com>
Reviewed-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarScott Wood <oss@buserror.net>
[mpe: Use PTRRELOC() in early_init()]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent c061b38a
Loading
Loading
Loading
Loading
+11 −0
Original line number Original line Diff line number Diff line
@@ -551,6 +551,17 @@ config RELOCATABLE
	  setting can still be useful to bootwrappers that need to know the
	  setting can still be useful to bootwrappers that need to know the
	  load address of the kernel (eg. u-boot/mkimage).
	  load address of the kernel (eg. u-boot/mkimage).


config RANDOMIZE_BASE
	bool "Randomize the address of the kernel image"
	depends on (FSL_BOOKE && FLATMEM && PPC32)
	depends on RELOCATABLE
	help
	  Randomizes the virtual address at which the kernel image is
	  loaded, as a security feature that deters exploit attempts
	  relying on knowledge of the location of kernel internals.

	  If unsure, say Y.

config RELOCATABLE_TEST
config RELOCATABLE_TEST
	bool "Test relocatable kernel"
	bool "Test relocatable kernel"
	depends on (PPC64 && RELOCATABLE)
	depends on (PPC64 && RELOCATABLE)
+0 −1
Original line number Original line Diff line number Diff line
@@ -75,7 +75,6 @@
#define MAS2_E			0x00000001
#define MAS2_E			0x00000001
#define MAS2_WIMGE_MASK		0x0000001f
#define MAS2_WIMGE_MASK		0x0000001f
#define MAS2_EPN_MASK(size)		(~0 << (size + 10))
#define MAS2_EPN_MASK(size)		(~0 << (size + 10))
#define MAS2_VAL(addr, size, flags)	((addr) & MAS2_EPN_MASK(size) | (flags))


#define MAS3_RPN		0xFFFFF000
#define MAS3_RPN		0xFFFFF000
#define MAS3_U0			0x00000200
#define MAS3_U0			0x00000200
+6 −3
Original line number Original line Diff line number Diff line
@@ -19,9 +19,12 @@
 */
 */
notrace unsigned long __init early_init(unsigned long dt_ptr)
notrace unsigned long __init early_init(unsigned long dt_ptr)
{
{
	unsigned long offset = reloc_offset();
	unsigned long kva, offset = reloc_offset();

	kva = *PTRRELOC(&kernstart_virt_addr);


	/* First zero the BSS */
	/* First zero the BSS */
	if (kva == KERNELBASE)
		memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
		memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);


	/*
	/*
@@ -32,5 +35,5 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)


	apply_feature_fixups();
	apply_feature_fixups();


	return KERNELBASE + offset;
	return kva + offset;
}
}
+7 −8
Original line number Original line Diff line number Diff line
@@ -155,23 +155,22 @@ skpinv: addi r6,r6,1 /* Increment */


#if defined(ENTRY_MAPPING_BOOT_SETUP)
#if defined(ENTRY_MAPPING_BOOT_SETUP)


/* 6. Setup KERNELBASE mapping in TLB1[0] */
/* 6. Setup kernstart_virt_addr mapping in TLB1[0] */
	lis	r6,0x1000		/* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
	lis	r6,0x1000		/* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
	mtspr	SPRN_MAS0,r6
	mtspr	SPRN_MAS0,r6
	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
	mtspr	SPRN_MAS1,r6
	mtspr	SPRN_MAS1,r6
	lis	r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, MAS2_M_IF_NEEDED)@h
	lis	r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
	ori	r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, MAS2_M_IF_NEEDED)@l
	ori	r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
	and	r6,r6,r20
	ori	r6,r6,MAS2_M_IF_NEEDED@l
	mtspr	SPRN_MAS2,r6
	mtspr	SPRN_MAS2,r6
	mtspr	SPRN_MAS3,r8
	mtspr	SPRN_MAS3,r8
	tlbwe
	tlbwe


/* 7. Jump to KERNELBASE mapping */
/* 7. Jump to kernstart_virt_addr mapping */
	lis	r6,(KERNELBASE & ~0xfff)@h
	mr	r6,r20
	ori	r6,r6,(KERNELBASE & ~0xfff)@l
	rlwinm	r7,r25,0,0x03ffffff
	add	r6,r7,r6


#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
/*
/*
+10 −3
Original line number Original line Diff line number Diff line
@@ -155,6 +155,8 @@ _ENTRY(_start);
 */
 */


_ENTRY(__early_start)
_ENTRY(__early_start)
	LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
	lwz     r20,0(r20)


#define ENTRY_MAPPING_BOOT_SETUP
#define ENTRY_MAPPING_BOOT_SETUP
#include "fsl_booke_entry_mapping.S"
#include "fsl_booke_entry_mapping.S"
@@ -277,8 +279,8 @@ set_ivor:
	ori	r6, r6, swapper_pg_dir@l
	ori	r6, r6, swapper_pg_dir@l
	lis	r5, abatron_pteptrs@h
	lis	r5, abatron_pteptrs@h
	ori	r5, r5, abatron_pteptrs@l
	ori	r5, r5, abatron_pteptrs@l
	lis	r4, KERNELBASE@h
	lis     r3, kernstart_virt_addr@ha
	ori	r4, r4, KERNELBASE@l
	lwz     r4, kernstart_virt_addr@l(r3)
	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
	stw	r6, 0(r5)
	stw	r6, 0(r5)


@@ -1067,7 +1069,12 @@ __secondary_start:
	mr	r5,r25		/* phys kernel start */
	mr	r5,r25		/* phys kernel start */
	rlwinm	r5,r5,0,~0x3ffffff	/* aligned 64M */
	rlwinm	r5,r5,0,~0x3ffffff	/* aligned 64M */
	subf	r4,r5,r4	/* memstart_addr - phys kernel start */
	subf	r4,r5,r4	/* memstart_addr - phys kernel start */
	li	r5,0		/* no device tree */
	lis	r7,KERNELBASE@h
	ori	r7,r7,KERNELBASE@l
	cmpw	r20,r7		/* if kernstart_virt_addr != KERNELBASE, randomized */
	beq	2f
	li	r4,0
2:	li	r5,0		/* no device tree */
	li	r6,0		/* not boot cpu */
	li	r6,0		/* not boot cpu */
	bl	restore_to_as0
	bl	restore_to_as0


Loading