Commit 883bbbff authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

ftrace,kcfi: Separate ftrace_stub() and ftrace_stub_graph()



Different function signatures means they needs to be different
functions; otherwise CFI gets upset.

As triggered by the ftrace boot tests:

  [] CFI failure at ftrace_return_to_handler+0xac/0x16c (target: ftrace_stub+0x0/0x14; expected type: 0x0a5d5347)

Fixes: 3c516f89 ("x86: Add support for CONFIG_CFI_CLANG")
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Link: https://lkml.kernel.org/r/Y06dg4e1xF6JTdQq@hirez.programming.kicks-ass.net
parent b5f1fc31
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
 */

#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/ftrace.h>
@@ -294,10 +295,14 @@ SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */

SYM_FUNC_START(ftrace_stub)
SYM_TYPED_FUNC_START(ftrace_stub)
	ret
SYM_FUNC_END(ftrace_stub)

SYM_TYPED_FUNC_START(ftrace_stub_graph)
	ret
SYM_FUNC_END(ftrace_stub_graph)

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
 * void return_to_handler(void)
+9 −8
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
 */

#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/ptrace.h>
#include <asm/ftrace.h>
#include <asm/export.h>
@@ -129,6 +130,14 @@

	.endm

SYM_TYPED_FUNC_START(ftrace_stub)
	RET
SYM_FUNC_END(ftrace_stub)

SYM_TYPED_FUNC_START(ftrace_stub_graph)
	RET
SYM_FUNC_END(ftrace_stub_graph)

#ifdef CONFIG_DYNAMIC_FTRACE

SYM_FUNC_START(__fentry__)
@@ -176,11 +185,6 @@ SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
SYM_FUNC_END(ftrace_caller);
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)

SYM_FUNC_START(ftrace_stub)
	UNWIND_HINT_FUNC
	RET
SYM_FUNC_END(ftrace_stub)

SYM_FUNC_START(ftrace_regs_caller)
	/* Save the current flags before any operations that can change them */
	pushfq
@@ -282,9 +286,6 @@ STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
SYM_FUNC_START(__fentry__)
	cmpq $ftrace_stub, ftrace_trace_function
	jnz trace

SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
	ENDBR
	RET

trace:
+12 −6
Original line number Diff line number Diff line
@@ -162,6 +162,16 @@
#define PATCHABLE_DISCARDS	*(__patchable_function_entries)
#endif

#ifndef CONFIG_ARCH_SUPPORTS_CFI_CLANG
/*
 * Simply points to ftrace_stub, but with the proper protocol.
 * Defined by the linker script in linux/vmlinux.lds.h
 */
#define	FTRACE_STUB_HACK	ftrace_stub_graph = ftrace_stub;
#else
#define FTRACE_STUB_HACK
#endif

#ifdef CONFIG_FTRACE_MCOUNT_RECORD
/*
 * The ftrace call sites are logged to a section whose name depends on the
@@ -169,10 +179,6 @@
 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
 * dependencies for FTRACE_CALLSITE_SECTION's definition.
 *
 * Need to also make ftrace_stub_graph point to ftrace_stub
 * so that the same stub location may have different protocols
 * and not mess up with C verifiers.
 *
 * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
 * as some archs will have a different prototype for that function
 * but ftrace_ops_list_func() will have a single prototype.
@@ -182,11 +188,11 @@
			KEEP(*(__mcount_loc))			\
			KEEP_PATCHABLE				\
			__stop_mcount_loc = .;			\
			ftrace_stub_graph = ftrace_stub;	\
			FTRACE_STUB_HACK			\
			ftrace_ops_list_func = arch_ftrace_ops_list_func;
#else
# ifdef CONFIG_FUNCTION_TRACER
#  define MCOUNT_REC()	ftrace_stub_graph = ftrace_stub;	\
#  define MCOUNT_REC()	FTRACE_STUB_HACK			\
			ftrace_ops_list_func = arch_ftrace_ops_list_func;
# else
#  define MCOUNT_REC()