Loading block/blktrace.c +1 −1 Original line number Diff line number Diff line Loading @@ -245,7 +245,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, if (pid != 0 && !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && (trace_flags & TRACE_ITER_STACKTRACE) != 0) __trace_stack(blk_tr, NULL, flags, 5, pc); __trace_stack(blk_tr, flags, 5, pc); trace_wake_up(); return; } Loading kernel/trace/trace.c +18 −29 Original line number Diff line number Diff line Loading @@ -776,7 +776,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, } void trace_function(struct trace_array *tr, struct trace_array_cpu *data, trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { Loading @@ -802,7 +802,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, #ifdef CONFIG_FUNCTION_GRAPH_TRACER static void __trace_graph_entry(struct trace_array *tr, struct trace_array_cpu *data, struct ftrace_graph_ent *trace, unsigned long flags, int pc) Loading @@ -826,7 +825,6 @@ static void __trace_graph_entry(struct trace_array *tr, } static void __trace_graph_return(struct trace_array *tr, struct trace_array_cpu *data, struct ftrace_graph_ret *trace, unsigned long flags, int pc) Loading Loading @@ -856,11 +854,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, int pc) { if (likely(!atomic_read(&data->disabled))) trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); } static void __ftrace_trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { Loading Loading @@ -891,26 +888,23 @@ static void __ftrace_trace_stack(struct trace_array *tr, } static void ftrace_trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(tr, data, flags, skip, pc); __ftrace_trace_stack(tr, flags, skip, pc); } void __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { __ftrace_trace_stack(tr, data, flags, skip, pc); __ftrace_trace_stack(tr, flags, skip, pc); } static void ftrace_trace_userstack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int pc) { #ifdef CONFIG_STACKTRACE Loading Loading @@ -942,20 +936,17 @@ static void ftrace_trace_userstack(struct trace_array *tr, #endif } void __trace_userstack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags) void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, data, flags, preempt_count()); ftrace_trace_userstack(tr, flags, preempt_count()); } static void ftrace_trace_special(void *__tr, void *__data, ftrace_trace_special(void *__tr, unsigned long arg1, unsigned long arg2, unsigned long arg3, int pc) { struct ring_buffer_event *event; struct trace_array_cpu *data = __data; struct trace_array *tr = __tr; struct special_entry *entry; unsigned long irq_flags; Loading @@ -971,8 +962,8 @@ ftrace_trace_special(void *__tr, void *__data, entry->arg2 = arg2; entry->arg3 = arg3; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, irq_flags, 4, pc); ftrace_trace_userstack(tr, data, irq_flags, pc); ftrace_trace_stack(tr, irq_flags, 4, pc); ftrace_trace_userstack(tr, irq_flags, pc); trace_wake_up(); } Loading @@ -981,12 +972,11 @@ void __trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3) { ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); } void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) Loading @@ -1010,13 +1000,12 @@ tracing_sched_switch_trace(struct trace_array *tr, entry->next_state = next->state; entry->next_cpu = task_cpu(next); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, flags, 5, pc); ftrace_trace_userstack(tr, data, flags, pc); ftrace_trace_stack(tr, flags, 5, pc); ftrace_trace_userstack(tr, flags, pc); } void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *curr, unsigned long flags, int pc) Loading @@ -1040,8 +1029,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, entry->next_state = wakee->state; entry->next_cpu = task_cpu(wakee); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, flags, 6, pc); ftrace_trace_userstack(tr, data, flags, pc); ftrace_trace_stack(tr, flags, 6, pc); ftrace_trace_userstack(tr, flags, pc); trace_wake_up(); } Loading @@ -1064,7 +1053,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) data = tr->data[cpu]; if (likely(atomic_inc_return(&data->disabled) == 1)) ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); ftrace_trace_special(tr, arg1, arg2, arg3, pc); atomic_dec(&data->disabled); local_irq_restore(flags); Loading Loading @@ -1092,7 +1081,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); __trace_graph_entry(tr, data, trace, flags, pc); __trace_graph_entry(tr, trace, flags, pc); } /* Only do the atomic if it is not already set */ if (!test_tsk_trace_graph(current)) Loading @@ -1118,7 +1107,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); __trace_graph_return(tr, data, trace, flags, pc); __trace_graph_return(tr, trace, flags, pc); } if (!trace->depth) clear_tsk_trace_graph(current); Loading kernel/trace/trace.h +0 −4 Original line number Diff line number Diff line Loading @@ -419,14 +419,12 @@ void ftrace(struct trace_array *tr, unsigned long parent_ip, unsigned long flags, int pc); void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc); void tracing_record_cmdline(struct task_struct *tsk); void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *cur, unsigned long flags, int pc); Loading @@ -436,7 +434,6 @@ void trace_special(struct trace_array *tr, unsigned long arg2, unsigned long arg3, int pc); void trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); Loading @@ -462,7 +459,6 @@ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu); void __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc); Loading kernel/trace/trace_functions.c +4 −4 Original line number Diff line number Diff line Loading @@ -78,7 +78,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); ftrace_preempt_enable(resched); Loading Loading @@ -108,7 +108,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); } atomic_dec(&data->disabled); Loading Loading @@ -139,7 +139,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); /* * skip over 5 funcs: * __ftrace_trace_stack, Loading @@ -148,7 +148,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) * ftrace_list_func * ftrace_call */ __trace_stack(tr, data, flags, 5, pc); __trace_stack(tr, flags, 5, pc); } atomic_dec(&data->disabled); Loading kernel/trace/trace_irqsoff.c +5 −5 Original line number Diff line number Diff line Loading @@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, data, ip, parent_ip, flags, preempt_count()); trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } Loading Loading @@ -153,7 +153,7 @@ check_critical_timing(struct trace_array *tr, if (!report_latency(delta)) goto out_unlock; trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); latency = nsecs_to_usecs(delta); Loading @@ -177,7 +177,7 @@ check_critical_timing(struct trace_array *tr, data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); tracing_reset(tr, cpu); trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); } static inline void Loading Loading @@ -210,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) local_save_flags(flags); trace_function(tr, data, ip, parent_ip, flags, preempt_count()); trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; Loading Loading @@ -244,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) atomic_inc(&data->disabled); local_save_flags(flags); trace_function(tr, data, ip, parent_ip, flags, preempt_count()); trace_function(tr, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); Loading Loading
block/blktrace.c +1 −1 Original line number Diff line number Diff line Loading @@ -245,7 +245,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, if (pid != 0 && !(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC) && (trace_flags & TRACE_ITER_STACKTRACE) != 0) __trace_stack(blk_tr, NULL, flags, 5, pc); __trace_stack(blk_tr, flags, 5, pc); trace_wake_up(); return; } Loading
kernel/trace/trace.c +18 −29 Original line number Diff line number Diff line Loading @@ -776,7 +776,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, } void trace_function(struct trace_array *tr, struct trace_array_cpu *data, trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc) { Loading @@ -802,7 +802,6 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, #ifdef CONFIG_FUNCTION_GRAPH_TRACER static void __trace_graph_entry(struct trace_array *tr, struct trace_array_cpu *data, struct ftrace_graph_ent *trace, unsigned long flags, int pc) Loading @@ -826,7 +825,6 @@ static void __trace_graph_entry(struct trace_array *tr, } static void __trace_graph_return(struct trace_array *tr, struct trace_array_cpu *data, struct ftrace_graph_ret *trace, unsigned long flags, int pc) Loading Loading @@ -856,11 +854,10 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, int pc) { if (likely(!atomic_read(&data->disabled))) trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); } static void __ftrace_trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { Loading Loading @@ -891,26 +888,23 @@ static void __ftrace_trace_stack(struct trace_array *tr, } static void ftrace_trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { if (!(trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(tr, data, flags, skip, pc); __ftrace_trace_stack(tr, flags, skip, pc); } void __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc) { __ftrace_trace_stack(tr, data, flags, skip, pc); __ftrace_trace_stack(tr, flags, skip, pc); } static void ftrace_trace_userstack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int pc) { #ifdef CONFIG_STACKTRACE Loading Loading @@ -942,20 +936,17 @@ static void ftrace_trace_userstack(struct trace_array *tr, #endif } void __trace_userstack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags) void __trace_userstack(struct trace_array *tr, unsigned long flags) { ftrace_trace_userstack(tr, data, flags, preempt_count()); ftrace_trace_userstack(tr, flags, preempt_count()); } static void ftrace_trace_special(void *__tr, void *__data, ftrace_trace_special(void *__tr, unsigned long arg1, unsigned long arg2, unsigned long arg3, int pc) { struct ring_buffer_event *event; struct trace_array_cpu *data = __data; struct trace_array *tr = __tr; struct special_entry *entry; unsigned long irq_flags; Loading @@ -971,8 +962,8 @@ ftrace_trace_special(void *__tr, void *__data, entry->arg2 = arg2; entry->arg3 = arg3; ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, irq_flags, 4, pc); ftrace_trace_userstack(tr, data, irq_flags, pc); ftrace_trace_stack(tr, irq_flags, 4, pc); ftrace_trace_userstack(tr, irq_flags, pc); trace_wake_up(); } Loading @@ -981,12 +972,11 @@ void __trace_special(void *__tr, void *__data, unsigned long arg1, unsigned long arg2, unsigned long arg3) { ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); } void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc) Loading @@ -1010,13 +1000,12 @@ tracing_sched_switch_trace(struct trace_array *tr, entry->next_state = next->state; entry->next_cpu = task_cpu(next); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, flags, 5, pc); ftrace_trace_userstack(tr, data, flags, pc); ftrace_trace_stack(tr, flags, 5, pc); ftrace_trace_userstack(tr, flags, pc); } void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *curr, unsigned long flags, int pc) Loading @@ -1040,8 +1029,8 @@ tracing_sched_wakeup_trace(struct trace_array *tr, entry->next_state = wakee->state; entry->next_cpu = task_cpu(wakee); ring_buffer_unlock_commit(tr->buffer, event, irq_flags); ftrace_trace_stack(tr, data, flags, 6, pc); ftrace_trace_userstack(tr, data, flags, pc); ftrace_trace_stack(tr, flags, 6, pc); ftrace_trace_userstack(tr, flags, pc); trace_wake_up(); } Loading @@ -1064,7 +1053,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) data = tr->data[cpu]; if (likely(atomic_inc_return(&data->disabled) == 1)) ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); ftrace_trace_special(tr, arg1, arg2, arg3, pc); atomic_dec(&data->disabled); local_irq_restore(flags); Loading Loading @@ -1092,7 +1081,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); __trace_graph_entry(tr, data, trace, flags, pc); __trace_graph_entry(tr, trace, flags, pc); } /* Only do the atomic if it is not already set */ if (!test_tsk_trace_graph(current)) Loading @@ -1118,7 +1107,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { pc = preempt_count(); __trace_graph_return(tr, data, trace, flags, pc); __trace_graph_return(tr, trace, flags, pc); } if (!trace->depth) clear_tsk_trace_graph(current); Loading
kernel/trace/trace.h +0 −4 Original line number Diff line number Diff line Loading @@ -419,14 +419,12 @@ void ftrace(struct trace_array *tr, unsigned long parent_ip, unsigned long flags, int pc); void tracing_sched_switch_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *prev, struct task_struct *next, unsigned long flags, int pc); void tracing_record_cmdline(struct task_struct *tsk); void tracing_sched_wakeup_trace(struct trace_array *tr, struct trace_array_cpu *data, struct task_struct *wakee, struct task_struct *cur, unsigned long flags, int pc); Loading @@ -436,7 +434,6 @@ void trace_special(struct trace_array *tr, unsigned long arg2, unsigned long arg3, int pc); void trace_function(struct trace_array *tr, struct trace_array_cpu *data, unsigned long ip, unsigned long parent_ip, unsigned long flags, int pc); Loading @@ -462,7 +459,6 @@ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu); void __trace_stack(struct trace_array *tr, struct trace_array_cpu *data, unsigned long flags, int skip, int pc); Loading
kernel/trace/trace_functions.c +4 −4 Original line number Diff line number Diff line Loading @@ -78,7 +78,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); atomic_dec(&data->disabled); ftrace_preempt_enable(resched); Loading Loading @@ -108,7 +108,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); } atomic_dec(&data->disabled); Loading Loading @@ -139,7 +139,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) if (likely(disabled == 1)) { pc = preempt_count(); trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc); /* * skip over 5 funcs: * __ftrace_trace_stack, Loading @@ -148,7 +148,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) * ftrace_list_func * ftrace_call */ __trace_stack(tr, data, flags, 5, pc); __trace_stack(tr, flags, 5, pc); } atomic_dec(&data->disabled); Loading
kernel/trace/trace_irqsoff.c +5 −5 Original line number Diff line number Diff line Loading @@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) trace_function(tr, data, ip, parent_ip, flags, preempt_count()); trace_function(tr, ip, parent_ip, flags, preempt_count()); atomic_dec(&data->disabled); } Loading Loading @@ -153,7 +153,7 @@ check_critical_timing(struct trace_array *tr, if (!report_latency(delta)) goto out_unlock; trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); latency = nsecs_to_usecs(delta); Loading @@ -177,7 +177,7 @@ check_critical_timing(struct trace_array *tr, data->critical_sequence = max_sequence; data->preempt_timestamp = ftrace_now(cpu); tracing_reset(tr, cpu); trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); } static inline void Loading Loading @@ -210,7 +210,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) local_save_flags(flags); trace_function(tr, data, ip, parent_ip, flags, preempt_count()); trace_function(tr, ip, parent_ip, flags, preempt_count()); per_cpu(tracing_cpu, cpu) = 1; Loading Loading @@ -244,7 +244,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) atomic_inc(&data->disabled); local_save_flags(flags); trace_function(tr, data, ip, parent_ip, flags, preempt_count()); trace_function(tr, ip, parent_ip, flags, preempt_count()); check_critical_timing(tr, data, parent_ip ? : ip, cpu); data->critical_start = 0; atomic_dec(&data->disabled); Loading