Loading arch/x86/entry/vdso/vclock_gettime.c +1 −1 Original line number Diff line number Diff line Loading @@ -178,7 +178,7 @@ notrace static cycle_t vread_tsc(void) /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * predictable (it's just a function of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function Loading arch/x86/events/intel/lbr.c +1 −1 Original line number Diff line number Diff line Loading @@ -649,7 +649,7 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) /* * return the type of control flow change at address "from" * intruction is not necessarily a branch (in case of interrupt). * instruction is not necessarily a branch (in case of interrupt). * * The branch type returned also includes the priv level of the * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). Loading arch/x86/events/perf_event.h +1 −1 Original line number Diff line number Diff line Loading @@ -272,7 +272,7 @@ struct cpu_hw_events { * events to select for counter rescheduling. * * Care must be taken as the rescheduling algorithm is O(n!) which * will increase scheduling cycles for an over-commited system * will increase scheduling cycles for an over-committed system * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros * and its counter masks must be kept at a minimum. */ Loading arch/x86/include/asm/ftrace.h +1 −1 Original line number Diff line number Diff line Loading @@ -52,7 +52,7 @@ int ftrace_int3_handler(struct pt_regs *regs); * this screws up the trace output when tracing a ia32 task. * Instead of reporting bogus syscalls, just do not trace them. * * If the user realy wants these, then they should use the * If the user really wants these, then they should use the * raw syscall tracepoints with filtering. */ #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 Loading arch/x86/include/asm/hw_irq.h +0 −14 Original line number Diff line number Diff line Loading @@ -168,20 +168,6 @@ extern atomic_t irq_mis_count; extern void elcr_set_level_irq(unsigned int irq); /* SMP */ extern __visible void smp_apic_timer_interrupt(struct pt_regs *); extern __visible void smp_spurious_interrupt(struct pt_regs *); extern __visible void smp_x86_platform_ipi(struct pt_regs *); extern __visible void smp_error_interrupt(struct pt_regs *); #ifdef CONFIG_X86_IO_APIC extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #endif #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); extern __visible void smp_call_function_single_interrupt(struct pt_regs *); #endif extern char irq_entries_start[]; #ifdef CONFIG_TRACING #define trace_irq_entries_start irq_entries_start Loading Loading
arch/x86/entry/vdso/vclock_gettime.c +1 −1 Original line number Diff line number Diff line Loading @@ -178,7 +178,7 @@ notrace static cycle_t vread_tsc(void) /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * predictable (it's just a function of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function Loading
arch/x86/events/intel/lbr.c +1 −1 Original line number Diff line number Diff line Loading @@ -649,7 +649,7 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) /* * return the type of control flow change at address "from" * intruction is not necessarily a branch (in case of interrupt). * instruction is not necessarily a branch (in case of interrupt). * * The branch type returned also includes the priv level of the * target of the control flow change (X86_BR_USER, X86_BR_KERNEL). Loading
arch/x86/events/perf_event.h +1 −1 Original line number Diff line number Diff line Loading @@ -272,7 +272,7 @@ struct cpu_hw_events { * events to select for counter rescheduling. * * Care must be taken as the rescheduling algorithm is O(n!) which * will increase scheduling cycles for an over-commited system * will increase scheduling cycles for an over-committed system * dramatically. The number of such EVENT_CONSTRAINT_OVERLAP() macros * and its counter masks must be kept at a minimum. */ Loading
arch/x86/include/asm/ftrace.h +1 −1 Original line number Diff line number Diff line Loading @@ -52,7 +52,7 @@ int ftrace_int3_handler(struct pt_regs *regs); * this screws up the trace output when tracing a ia32 task. * Instead of reporting bogus syscalls, just do not trace them. * * If the user realy wants these, then they should use the * If the user really wants these, then they should use the * raw syscall tracepoints with filtering. */ #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1 Loading
arch/x86/include/asm/hw_irq.h +0 −14 Original line number Diff line number Diff line Loading @@ -168,20 +168,6 @@ extern atomic_t irq_mis_count; extern void elcr_set_level_irq(unsigned int irq); /* SMP */ extern __visible void smp_apic_timer_interrupt(struct pt_regs *); extern __visible void smp_spurious_interrupt(struct pt_regs *); extern __visible void smp_x86_platform_ipi(struct pt_regs *); extern __visible void smp_error_interrupt(struct pt_regs *); #ifdef CONFIG_X86_IO_APIC extern asmlinkage void smp_irq_move_cleanup_interrupt(void); #endif #ifdef CONFIG_SMP extern __visible void smp_reschedule_interrupt(struct pt_regs *); extern __visible void smp_call_function_interrupt(struct pt_regs *); extern __visible void smp_call_function_single_interrupt(struct pt_regs *); #endif extern char irq_entries_start[]; #ifdef CONFIG_TRACING #define trace_irq_entries_start irq_entries_start Loading