Unverified Commit 6d225089 authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!3475 kaslr: ppc64: Introduce KASLR for PPC64

Merge Pull Request from: @ci-robot 
 
PR sync from: GUO Zihua <guozihua@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/XBDTRKAJR4UKNYN7ABWJOI5YLPUM6DSH/ 
This patchset introduces KASLR for PowerPC64 chips.

v3:
  Fixed one more commit message.
v2:
  Replaced bugzilla in commit messages.

GUO Zihua (2):
  powerpc/fsl_booke/kaslr: Provide correct r5 value for relocated kernel
  powerpc/fsl_booke/kaslr: Fix preserved memory size for int-vectors
    issue

Jason Yan (6):
  powerpc/fsl_booke/kaslr: refactor kaslr_legal_offset() and
    kaslr_early_init()
  powerpc/fsl_booke/64: introduce reloc_kernel_entry() helper
  powerpc/fsl_booke/64: implement KASLR for fsl_booke64
  powerpc/fsl_booke/64: do not clear the BSS for the second pass
  powerpc/fsl_booke/64: clear the original kernel if randomized
  powerpc/fsl_booke/kaslr: rename kaslr-booke32.rst to kaslr-booke.rst
    and add 64bit part

 rename Documentation/powerpc/{kaslr-booke32.rst => kaslr-booke.rst} (59%)

-- 
2.34.1
 
https://gitee.com/openeuler/kernel/issues/I8OHAZ 
 
Link:https://gitee.com/openeuler/kernel/pulls/3475

 

Reviewed-by: default avatarZhang Jianhua <chris.zjh@huawei.com>
Reviewed-by: default avatarKefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parents c9e132d6 d1dda26f
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ powerpc
    hvcs
    imc
    isa-versions
    kaslr-booke32
    kaslr-booke
    mpc52xx
    papr_hcalls
    pci_iov_resource_on_powernv
+31 −4
Original line number Diff line number Diff line
.. SPDX-License-Identifier: GPL-2.0

===========================
KASLR for Freescale BookE32
===========================
=========================
KASLR for Freescale BookE
=========================

The word KASLR stands for Kernel Address Space Layout Randomization.

This document tries to explain the implementation of the KASLR for
Freescale BookE32. KASLR is a security feature that deters exploit
Freescale BookE. KASLR is a security feature that deters exploit
attempts relying on knowledge of the location of kernel internals.

KASLR for Freescale BookE32
-------------------------

Since CONFIG_RELOCATABLE has already supported, what we need to do is
map or copy kernel to a proper place and relocate. Freescale Book-E
parts expect lowmem to be mapped by fixed TLB entries(TLB1). The TLB1
@@ -38,5 +41,29 @@ bit of the entropy to decide the index of the 64M zone. Then we chose a

                              kernstart_virt_addr


KASLR for Freescale BookE64
---------------------------

The implementation for Freescale BookE64 is similar to BookE32. One
difference is that Freescale BookE64 set up a TLB mapping of 1G during
booting. Another difference is that ppc64 needs the kernel to be
64K-aligned. So we can randomize the kernel in this 1G mapping and make
it 64K-aligned. This can save some code to creat another TLB map at early
boot. The disadvantage is that we only have about 1G/64K = 16384 slots to
put the kernel in::

    KERNELBASE

          64K                     |--> kernel <--|
           |                      |              |
        +--+--+--+    +--+--+--+--+--+--+--+--+--+    +--+--+
        |  |  |  |....|  |  |  |  |  |  |  |  |  |....|  |  |
        +--+--+--+    +--+--+--+--+--+--+--+--+--+    +--+--+
        |                         |                        1G
        |----->   offset    <-----|

                              kernstart_virt_addr

To enable KASLR, set CONFIG_RANDOMIZE_BASE = y. If KASLR is enabled and you
want to disable it at runtime, add "nokaslr" to the kernel cmdline.
+3 −2
Original line number Diff line number Diff line
@@ -660,14 +660,15 @@ config RELOCATABLE

config RANDOMIZE_BASE
	bool "Randomize the address of the kernel image"
	depends on PPC_85xx && FLATMEM
	depends on PPC_E500 && FLATMEM
	depends on RELOCATABLE
	default n
	help
	  Randomizes the virtual address at which the kernel image is
	  loaded, as a security feature that deters exploit attempts
	  relying on knowledge of the location of kernel internals.

	  If unsure, say Y.
	  If unsure, say N.

config RELOCATABLE_TEST
	bool "Test relocatable kernel"
+27 −0
Original line number Diff line number Diff line
@@ -1249,6 +1249,7 @@ skpinv: addi r6,r6,1 /* Increment */
1:	mflr	r6
	addi	r6,r6,(2f - 1b)
	tovirt(r6,r6)
	add	r6,r6,r19
	lis	r7,MSR_KERNEL@h
	ori	r7,r7,MSR_KERNEL@l
	mtspr	SPRN_SRR0,r6
@@ -1271,6 +1272,7 @@ skpinv: addi r6,r6,1 /* Increment */

	/* We translate LR and return */
	tovirt(r8,r8)
	add	r8,r8,r19
	mtlr	r8
	blr

@@ -1403,6 +1405,7 @@ a2_tlbinit_code_end:
 */
_GLOBAL(start_initialization_book3e)
	mflr	r28
	li	r19, 0

	/* First, we need to setup some initial TLBs to map the kernel
	 * text, data and bss at PAGE_OFFSET. We don't have a real mode
@@ -1445,6 +1448,12 @@ _GLOBAL(book3e_secondary_core_init)
	cmplwi	r4,0
	bne	2f

	li	r19, 0
#ifdef CONFIG_RANDOMIZE_BASE
	LOAD_REG_ADDR_PIC(r19, __kaslr_offset)
	ld	r19,0(r19)
	rlwinm  r19,r19,0,0,5
#endif
	/* Setup TLB for this core */
	bl	initial_tlb_book3e

@@ -1477,6 +1486,7 @@ _GLOBAL(book3e_secondary_core_init)
	lis	r3,PAGE_OFFSET@highest
	sldi	r3,r3,32
	or	r28,r28,r3
	add	r28,r28,r19
1:	mtlr	r28
	blr

@@ -1554,3 +1564,20 @@ _GLOBAL(setup_ehv_ivors)
_GLOBAL(setup_lrat_ivor)
	SET_IVOR(42, 0x340) /* LRAT Error */
	blr

/*
 * Return to the start of the relocated kernel and run again
 * r3 - virtual address of fdt
 * r4 - entry of the kernel
 */
_GLOBAL(reloc_kernel_entry)
	mfmsr	r7
	rlwinm	r7, r7, 0, ~(MSR_IS | MSR_DS)

	mtspr	SPRN_SRR0,r4
	mtspr	SPRN_SRR1,r7
#ifdef CONFIG_RANDOMIZE_BASE
	LOAD_REG_ADDR_PIC(r19, __is_prom)
	lwz	r5,0(r19)
#endif
	rfi
+22 −0
Original line number Diff line number Diff line
@@ -116,6 +116,15 @@ __secondary_hold_acknowledge:
	.8byte	0x0

#ifdef CONFIG_RELOCATABLE
#ifdef CONFIG_RANDOMIZE_BASE
	.globl	__kaslr_offset
__kaslr_offset:
	.8byte	0x0
	.globl	__is_prom
__is_prom:
	.8byte	0x0
#endif

	/* This flag is set to 1 by a loader if the kernel should run
	 * at the loaded address instead of the linked address.  This
	 * is used by kexec-tools to keep the kdump kernel in the
@@ -535,6 +544,12 @@ __start_initialization_multiplatform:
	/* Poison TOC */
	li	r2,-1

#ifdef CONFIG_RANDOMIZE_BASE
	/* Store value in r5 for relocation */
	LOAD_REG_ADDR_PIC(r19, __is_prom)
	stw r5,0(r19)
#endif

	/*
	 * Are we booted from a PROM Of-type client-interface ?
	 */
@@ -952,6 +967,13 @@ start_here_multiplatform:
	/* Adjust TOC for moved kernel. Could adjust when moving it instead. */
	bl	relative_toc

	/* Do not clear the BSS for the second pass if randomized */
	LOAD_REG_ADDR(r3, kernstart_virt_addr)
	ld	r3,0(r3)
	LOAD_REG_IMMEDIATE(r4, KERNELBASE)
	cmpd	r3,r4
	bne	4f

	/* Clear out the BSS. It may have been done in prom_init,
	 * already but that's irrelevant since prom_init will soon
	 * be detached from the kernel completely. Besides, we need
Loading