Loading arch/x86/entry/vdso/vma.c +2 −2 Original line number Diff line number Diff line Loading @@ -413,10 +413,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) #ifdef CONFIG_COMPAT int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) int uses_interp, bool x32) { #ifdef CONFIG_X86_X32_ABI if (test_thread_flag(TIF_X32)) { if (x32) { if (!vdso64_enabled) return 0; return map_vdso_randomized(&vdso_image_x32); Loading arch/x86/entry/vsyscall/vsyscall_64.c +1 −1 Original line number Diff line number Diff line Loading @@ -316,7 +316,7 @@ static struct vm_area_struct gate_vma __ro_after_init = { struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef CONFIG_COMPAT if (!mm || mm->context.ia32_compat) if (!mm || !(mm->context.flags & MM_CONTEXT_HAS_VSYSCALL)) return NULL; #endif if (vsyscall_mode == NONE) Loading arch/x86/events/core.c +1 −1 Original line number Diff line number Diff line Loading @@ -2602,7 +2602,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent struct stack_frame_ia32 frame; const struct stack_frame_ia32 __user *fp; if (!test_thread_flag(TIF_IA32)) if (user_64bit_mode(regs)) return 0; cs_base = get_segment_base(regs->cs); Loading arch/x86/events/intel/ds.c +1 −1 Original line number Diff line number Diff line Loading @@ -1259,7 +1259,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) old_to = to; #ifdef CONFIG_X86_64 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); is_64bit = kernel_ip(to) || any_64bit_mode(regs); #endif insn_init(&insn, kaddr, size, is_64bit); insn_get_length(&insn); Loading arch/x86/events/intel/lbr.c +1 −1 Original line number Diff line number Diff line Loading @@ -1221,7 +1221,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) * on 64-bit systems running 32-bit apps */ #ifdef CONFIG_X86_64 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs()); #endif insn_init(&insn, addr, bytes_read, is64); insn_get_opcode(&insn); Loading Loading
arch/x86/entry/vdso/vma.c +2 −2 Original line number Diff line number Diff line Loading @@ -413,10 +413,10 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) #ifdef CONFIG_COMPAT int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) int uses_interp, bool x32) { #ifdef CONFIG_X86_X32_ABI if (test_thread_flag(TIF_X32)) { if (x32) { if (!vdso64_enabled) return 0; return map_vdso_randomized(&vdso_image_x32); Loading
arch/x86/entry/vsyscall/vsyscall_64.c +1 −1 Original line number Diff line number Diff line Loading @@ -316,7 +316,7 @@ static struct vm_area_struct gate_vma __ro_after_init = { struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef CONFIG_COMPAT if (!mm || mm->context.ia32_compat) if (!mm || !(mm->context.flags & MM_CONTEXT_HAS_VSYSCALL)) return NULL; #endif if (vsyscall_mode == NONE) Loading
arch/x86/events/core.c +1 −1 Original line number Diff line number Diff line Loading @@ -2602,7 +2602,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent struct stack_frame_ia32 frame; const struct stack_frame_ia32 __user *fp; if (!test_thread_flag(TIF_IA32)) if (user_64bit_mode(regs)) return 0; cs_base = get_segment_base(regs->cs); Loading
arch/x86/events/intel/ds.c +1 −1 Original line number Diff line number Diff line Loading @@ -1259,7 +1259,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) old_to = to; #ifdef CONFIG_X86_64 is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32); is_64bit = kernel_ip(to) || any_64bit_mode(regs); #endif insn_init(&insn, kaddr, size, is_64bit); insn_get_length(&insn); Loading
arch/x86/events/intel/lbr.c +1 −1 Original line number Diff line number Diff line Loading @@ -1221,7 +1221,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort) * on 64-bit systems running 32-bit apps */ #ifdef CONFIG_X86_64 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32); is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs()); #endif insn_init(&insn, addr, bytes_read, is64); insn_get_opcode(&insn); Loading