Commit 2142b7f0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull kernel hardening updates from Kees Cook:

 - Add arm64 Shadow Call Stack support for GCC 12 (Dan Li)

 - Avoid memset with stack offset randomization under Clang (Marco
   Elver)

 - Clean up stackleak plugin to play nice with .noinstr (Kees Cook)

 - Check stack depth for greater usercopy hardening coverage (Kees Cook)

* tag 'hardening-v5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  arm64: Add gcc Shadow Call Stack support
  m68k: Implement "current_stack_pointer"
  xtensa: Implement "current_stack_pointer"
  usercopy: Check valid lifetime via stack depth
  stack: Constrain and fix stack offset randomization with Clang builds
  stack: Introduce CONFIG_RANDOMIZE_KSTACK_OFFSET
  gcc-plugins/stackleak: Ignore .noinstr.text and .entry.text
  gcc-plugins/stackleak: Exactly match strings instead of prefixes
  gcc-plugins/stackleak: Provide verbose mode
parents fd2d7a4a afcf5441
Loading
Loading
Loading
Loading
+29 −14
Original line number Diff line number Diff line
@@ -599,21 +599,22 @@ config STACKPROTECTOR_STRONG
config ARCH_SUPPORTS_SHADOW_CALL_STACK
	bool
	help
	  An architecture should select this if it supports Clang's Shadow
	  Call Stack and implements runtime support for shadow stack
	  An architecture should select this if it supports the compiler's
	  Shadow Call Stack and implements runtime support for shadow stack
	  switching.

config SHADOW_CALL_STACK
	bool "Clang Shadow Call Stack"
	depends on CC_IS_CLANG && ARCH_SUPPORTS_SHADOW_CALL_STACK
	bool "Shadow Call Stack"
	depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
	depends on DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
	help
	  This option enables Clang's Shadow Call Stack, which uses a
	  shadow stack to protect function return addresses from being
	  overwritten by an attacker. More information can be found in
	  Clang's documentation:
	  This option enables the compiler's Shadow Call Stack, which
	  uses a shadow stack to protect function return addresses from
	  being overwritten by an attacker. More information can be found
	  in the compiler's documentation:

	    https://clang.llvm.org/docs/ShadowCallStack.html
	  - Clang: https://clang.llvm.org/docs/ShadowCallStack.html
	  - GCC: https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html#Instrumentation-Options

	  Note that security guarantees in the kernel differ from the
	  ones documented for user space. The kernel must store addresses
@@ -1159,16 +1160,30 @@ config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
	  to the compiler, so it will attempt to add canary checks regardless
	  of the static branch state.

config RANDOMIZE_KSTACK_OFFSET_DEFAULT
	bool "Randomize kernel stack offset on syscall entry"
config RANDOMIZE_KSTACK_OFFSET
	bool "Support for randomizing kernel stack offset on syscall entry" if EXPERT
	default y
	depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
	depends on INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION >= 140000
	help
	  The kernel stack offset can be randomized (after pt_regs) by
	  roughly 5 bits of entropy, frustrating memory corruption
	  attacks that depend on stack address determinism or
	  cross-syscall address exposures. This feature is controlled
	  by kernel boot param "randomize_kstack_offset=on/off", and this
	  config chooses the default boot state.
	  cross-syscall address exposures.

	  The feature is controlled via the "randomize_kstack_offset=on/off"
	  kernel boot param, and if turned off has zero overhead due to its use
	  of static branches (see JUMP_LABEL).

	  If unsure, say Y.

config RANDOMIZE_KSTACK_OFFSET_DEFAULT
	bool "Default state of kernel stack offset randomization"
	depends on RANDOMIZE_KSTACK_OFFSET
	help
	  Kernel stack offset randomization is controlled by kernel boot param
	  "randomize_kstack_offset=on/off", and this config chooses the default
	  boot state.

config ARCH_OPTIONAL_KERNEL_RWX
	def_bool n
+1 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@ config ARM
	select ARCH_32BIT_OFF_T
	select ARCH_CORRECT_STACKTRACE_ON_KRETPROBE if HAVE_KRETPROBES && FRAME_POINTER && !ARM_UNWIND
	select ARCH_HAS_BINFMT_FLAT
	select ARCH_HAS_CURRENT_STACK_POINTER
	select ARCH_HAS_DEBUG_VIRTUAL if MMU
	select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
	select ARCH_HAS_ELF_RANDOMIZE
+2 −1
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ config ARM64
	select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
	select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
	select ARCH_HAS_CACHE_LINE_SIZE
	select ARCH_HAS_CURRENT_STACK_POINTER
	select ARCH_HAS_DEBUG_VIRTUAL
	select ARCH_HAS_DEBUG_VM_PGTABLE
	select ARCH_HAS_DMA_PREP_COHERENT
@@ -1257,7 +1258,7 @@ config HW_PERF_EVENTS
	def_bool y
	depends on ARM_PMU

# Supported by clang >= 7.0
# Supported by clang >= 7.0 or GCC >= 12.0.0
config CC_HAVE_SHADOW_CALL_STACK
	def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)

+1 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@ config M68K
	default y
	select ARCH_32BIT_OFF_T
	select ARCH_HAS_BINFMT_FLAT
	select ARCH_HAS_CURRENT_STACK_POINTER
	select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
	select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
+3 −1
Original line number Diff line number Diff line
@@ -24,6 +24,8 @@ static inline struct task_struct *get_current(void)

#define	current	get_current()

#endif /* CONFNIG_MMU */
#endif /* CONFIG_MMU */

register unsigned long current_stack_pointer __asm__("sp");

#endif /* !(_M68K_CURRENT_H) */
Loading