Loading arch/x86/kernel/ftrace.c +26 −0 Original line number Diff line number Diff line Loading @@ -30,14 +30,32 @@ #ifdef CONFIG_DYNAMIC_FTRACE /* * modifying_code is set to notify NMIs that they need to use * memory barriers when entering or exiting. But we don't want * to burden NMIs with unnecessary memory barriers when code * modification is not being done (which is most of the time). * * A mutex is already held when ftrace_arch_code_modify_prepare * and post_process are called. No locks need to be taken here. * * Stop machine will make sure currently running NMIs are done * and new NMIs will see the updated variable before we need * to worry about NMIs doing memory barriers. */ static int modifying_code __read_mostly; static DEFINE_PER_CPU(int, save_modifying_code); int ftrace_arch_code_modify_prepare(void) { set_kernel_text_rw(); modifying_code = 1; return 0; } int ftrace_arch_code_modify_post_process(void) { modifying_code = 0; set_kernel_text_ro(); return 0; } Loading Loading @@ -149,6 +167,11 @@ static void ftrace_mod_code(void) void ftrace_nmi_enter(void) { __get_cpu_var(save_modifying_code) = modifying_code; if (!__get_cpu_var(save_modifying_code)) return; if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { smp_rmb(); ftrace_mod_code(); Loading @@ -160,6 +183,9 @@ void ftrace_nmi_enter(void) void ftrace_nmi_exit(void) { if (!__get_cpu_var(save_modifying_code)) return; /* Finish all executions before clearing nmi_running */ smp_mb(); atomic_dec(&nmi_running); Loading include/linux/syscalls.h +4 −2 Original line number Diff line number Diff line Loading @@ -132,7 +132,8 @@ struct perf_event_attr; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_enter_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ static struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ }; \ Loading @@ -153,7 +154,8 @@ struct perf_event_attr; #define SYSCALL_TRACE_EXIT_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_exit_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ static struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ }; \ Loading include/trace/ftrace.h +2 −1 Original line number Diff line number Diff line Loading @@ -65,7 +65,8 @@ }; #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ static struct ftrace_event_call event_##name static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_##name #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ Loading kernel/trace/Kconfig +0 −9 Original line number Diff line number Diff line Loading @@ -328,15 +328,6 @@ config BRANCH_TRACER Say N if unsure. config POWER_TRACER bool "Trace power consumption behavior" depends on X86 select GENERIC_TRACER help This tracer helps developers to analyze and optimize the kernel's power management decisions, specifically the C-state and P-state behavior. config KSYM_TRACER bool "Trace read and write access on kernel memory locations" depends on HAVE_HW_BREAKPOINT Loading kernel/trace/trace.h +2 −1 Original line number Diff line number Diff line Loading @@ -792,7 +792,8 @@ extern const char *__stop___trace_bprintk_fmt[]; #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ extern struct ftrace_event_call event_##call; extern struct ftrace_event_call \ __attribute__((__aligned__(4))) event_##call; #undef FTRACE_ENTRY_DUP #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) Loading Loading
arch/x86/kernel/ftrace.c +26 −0 Original line number Diff line number Diff line Loading @@ -30,14 +30,32 @@ #ifdef CONFIG_DYNAMIC_FTRACE /* * modifying_code is set to notify NMIs that they need to use * memory barriers when entering or exiting. But we don't want * to burden NMIs with unnecessary memory barriers when code * modification is not being done (which is most of the time). * * A mutex is already held when ftrace_arch_code_modify_prepare * and post_process are called. No locks need to be taken here. * * Stop machine will make sure currently running NMIs are done * and new NMIs will see the updated variable before we need * to worry about NMIs doing memory barriers. */ static int modifying_code __read_mostly; static DEFINE_PER_CPU(int, save_modifying_code); int ftrace_arch_code_modify_prepare(void) { set_kernel_text_rw(); modifying_code = 1; return 0; } int ftrace_arch_code_modify_post_process(void) { modifying_code = 0; set_kernel_text_ro(); return 0; } Loading Loading @@ -149,6 +167,11 @@ static void ftrace_mod_code(void) void ftrace_nmi_enter(void) { __get_cpu_var(save_modifying_code) = modifying_code; if (!__get_cpu_var(save_modifying_code)) return; if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { smp_rmb(); ftrace_mod_code(); Loading @@ -160,6 +183,9 @@ void ftrace_nmi_enter(void) void ftrace_nmi_exit(void) { if (!__get_cpu_var(save_modifying_code)) return; /* Finish all executions before clearing nmi_running */ smp_mb(); atomic_dec(&nmi_running); Loading
include/linux/syscalls.h +4 −2 Original line number Diff line number Diff line Loading @@ -132,7 +132,8 @@ struct perf_event_attr; #define SYSCALL_TRACE_ENTER_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_enter_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_enter_##sname; \ static struct trace_event enter_syscall_print_##sname = { \ .trace = print_syscall_enter, \ }; \ Loading @@ -153,7 +154,8 @@ struct perf_event_attr; #define SYSCALL_TRACE_EXIT_EVENT(sname) \ static const struct syscall_metadata __syscall_meta_##sname; \ static struct ftrace_event_call event_exit_##sname; \ static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_exit_##sname; \ static struct trace_event exit_syscall_print_##sname = { \ .trace = print_syscall_exit, \ }; \ Loading
include/trace/ftrace.h +2 −1 Original line number Diff line number Diff line Loading @@ -65,7 +65,8 @@ }; #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ static struct ftrace_event_call event_##name static struct ftrace_event_call \ __attribute__((__aligned__(4))) event_##name #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ Loading
kernel/trace/Kconfig +0 −9 Original line number Diff line number Diff line Loading @@ -328,15 +328,6 @@ config BRANCH_TRACER Say N if unsure. config POWER_TRACER bool "Trace power consumption behavior" depends on X86 select GENERIC_TRACER help This tracer helps developers to analyze and optimize the kernel's power management decisions, specifically the C-state and P-state behavior. config KSYM_TRACER bool "Trace read and write access on kernel memory locations" depends on HAVE_HW_BREAKPOINT Loading
kernel/trace/trace.h +2 −1 Original line number Diff line number Diff line Loading @@ -792,7 +792,8 @@ extern const char *__stop___trace_bprintk_fmt[]; #undef FTRACE_ENTRY #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ extern struct ftrace_event_call event_##call; extern struct ftrace_event_call \ __attribute__((__aligned__(4))) event_##call; #undef FTRACE_ENTRY_DUP #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) Loading