Loading arch/x86/events/core.c +7 −20 Original line number Diff line number Diff line Loading @@ -2490,7 +2490,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent /* 32-bit process in 64-bit kernel. */ unsigned long ss_base, cs_base; struct stack_frame_ia32 frame; const void __user *fp; const struct stack_frame_ia32 __user *fp; if (!test_thread_flag(TIF_IA32)) return 0; Loading @@ -2501,18 +2501,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent fp = compat_ptr(ss_base + regs->bp); pagefault_disable(); while (entry->nr < entry->max_stack) { unsigned long bytes; frame.next_frame = 0; frame.return_address = 0; if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); if (bytes != 0) if (__get_user(frame.next_frame, &fp->next_frame)) break; bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4); if (bytes != 0) if (__get_user(frame.return_address, &fp->return_address)) break; perf_callchain_store(entry, cs_base + frame.return_address); Loading @@ -2533,7 +2527,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct stack_frame frame; const unsigned long __user *fp; const struct stack_frame __user *fp; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ Loading @@ -2546,7 +2540,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) return; fp = (unsigned long __user *)regs->bp; fp = (void __user *)regs->bp; perf_callchain_store(entry, regs->ip); Loading @@ -2558,19 +2552,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs pagefault_disable(); while (entry->nr < entry->max_stack) { unsigned long bytes; frame.next_frame = NULL; frame.return_address = 0; if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); if (bytes != 0) if (__get_user(frame.next_frame, &fp->next_frame)) break; bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); if (bytes != 0) if (__get_user(frame.return_address, &fp->return_address)) break; perf_callchain_store(entry, frame.return_address); Loading arch/x86/include/asm/uaccess.h +0 −9 Original line number Diff line number Diff line Loading @@ -694,15 +694,6 @@ extern struct movsl_mask { # include <asm/uaccess_64.h> #endif /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. * * Caller must use pagefault_enable/disable, or run in interrupt context, * and also do a uaccess_ok() check */ #define __copy_from_user_nmi __copy_from_user_inatomic /* * The "unsafe" user accesses aren't really "unsafe", but the naming * is a big fat warning: you have to not only do the access_ok() Loading arch/x86/kernel/stacktrace.c +4 −2 Original line number Diff line number Diff line Loading @@ -96,7 +96,8 @@ struct stack_frame_user { }; static int copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) copy_stack_frame(const struct stack_frame_user __user *fp, struct stack_frame_user *frame) { int ret; Loading @@ -105,7 +106,8 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) ret = 1; pagefault_disable(); if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) if (__get_user(frame->next_fp, &fp->next_fp) || __get_user(frame->ret_addr, &fp->ret_addr)) ret = 0; pagefault_enable(); Loading Loading
arch/x86/events/core.c +7 −20 Original line number Diff line number Diff line Loading @@ -2490,7 +2490,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent /* 32-bit process in 64-bit kernel. */ unsigned long ss_base, cs_base; struct stack_frame_ia32 frame; const void __user *fp; const struct stack_frame_ia32 __user *fp; if (!test_thread_flag(TIF_IA32)) return 0; Loading @@ -2501,18 +2501,12 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent fp = compat_ptr(ss_base + regs->bp); pagefault_disable(); while (entry->nr < entry->max_stack) { unsigned long bytes; frame.next_frame = 0; frame.return_address = 0; if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); if (bytes != 0) if (__get_user(frame.next_frame, &fp->next_frame)) break; bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4); if (bytes != 0) if (__get_user(frame.return_address, &fp->return_address)) break; perf_callchain_store(entry, cs_base + frame.return_address); Loading @@ -2533,7 +2527,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { struct stack_frame frame; const unsigned long __user *fp; const struct stack_frame __user *fp; if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { /* TODO: We don't support guest os callchain now */ Loading @@ -2546,7 +2540,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) return; fp = (unsigned long __user *)regs->bp; fp = (void __user *)regs->bp; perf_callchain_store(entry, regs->ip); Loading @@ -2558,19 +2552,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs pagefault_disable(); while (entry->nr < entry->max_stack) { unsigned long bytes; frame.next_frame = NULL; frame.return_address = 0; if (!valid_user_frame(fp, sizeof(frame))) break; bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); if (bytes != 0) if (__get_user(frame.next_frame, &fp->next_frame)) break; bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp)); if (bytes != 0) if (__get_user(frame.return_address, &fp->return_address)) break; perf_callchain_store(entry, frame.return_address); Loading
arch/x86/include/asm/uaccess.h +0 −9 Original line number Diff line number Diff line Loading @@ -694,15 +694,6 @@ extern struct movsl_mask { # include <asm/uaccess_64.h> #endif /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. * * Caller must use pagefault_enable/disable, or run in interrupt context, * and also do a uaccess_ok() check */ #define __copy_from_user_nmi __copy_from_user_inatomic /* * The "unsafe" user accesses aren't really "unsafe", but the naming * is a big fat warning: you have to not only do the access_ok() Loading
arch/x86/kernel/stacktrace.c +4 −2 Original line number Diff line number Diff line Loading @@ -96,7 +96,8 @@ struct stack_frame_user { }; static int copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) copy_stack_frame(const struct stack_frame_user __user *fp, struct stack_frame_user *frame) { int ret; Loading @@ -105,7 +106,8 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) ret = 1; pagefault_disable(); if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) if (__get_user(frame->next_fp, &fp->next_fp) || __get_user(frame->ret_addr, &fp->ret_addr)) ret = 0; pagefault_enable(); Loading