Loading arch/x86/Kconfig.debug +0 −2 Original line number Diff line number Diff line Loading @@ -305,8 +305,6 @@ config DEBUG_ENTRY Some of these sanity checks may slow down kernel entries and exits or otherwise impact performance. This is currently used to help test NMI code. If unsure, say N. config DEBUG_NMI_SELFTEST Loading arch/x86/entry/Makefile +0 −1 Original line number Diff line number Diff line Loading @@ -2,7 +2,6 @@ # Makefile for the x86 low level entry code # OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) Loading arch/x86/entry/calling.h +5 −0 Original line number Diff line number Diff line #include <linux/jump_label.h> #include <asm/unwind_hints.h> /* Loading Loading @@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with movq %rdx, 12*8+\offset(%rsp) movq %rsi, 13*8+\offset(%rsp) movq %rdi, 14*8+\offset(%rsp) UNWIND_HINT_REGS offset=\offset extra=0 .endm .macro SAVE_C_REGS offset=0 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 Loading @@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with movq %r12, 3*8+\offset(%rsp) movq %rbp, 4*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) UNWIND_HINT_REGS offset=\offset .endm .macro RESTORE_EXTRA_REGS offset=0 Loading @@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with movq 3*8+\offset(%rsp), %r12 movq 4*8+\offset(%rsp), %rbp movq 5*8+\offset(%rsp), %rbx UNWIND_HINT_REGS offset=\offset extra=0 .endm .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 Loading @@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with .endif movq 13*8(%rsp), %rsi movq 14*8(%rsp), %rdi UNWIND_HINT_IRET_REGS offset=16*8 .endm .macro RESTORE_C_REGS RESTORE_C_REGS_HELPER 1,1,1,1,1 Loading arch/x86/entry/entry_64.S +140 −30 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ #include <asm/smap.h> #include <asm/pgtable_types.h> #include <asm/export.h> #include <asm/frame.h> #include <linux/err.h> .code64 Loading @@ -43,9 +44,10 @@ #ifdef CONFIG_PARAVIRT ENTRY(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq ENDPROC(native_usergs_sysret64) END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_IRETQ Loading Loading @@ -134,6 +136,7 @@ ENDPROC(native_usergs_sysret64) */ ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, Loading Loading @@ -169,6 +172,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs) pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ UNWIND_HINT_REGS extra=0 /* * If we need to do entry work or if we guess we'll need to do Loading Loading @@ -223,6 +227,7 @@ entry_SYSCALL_64_fastpath: movq EFLAGS(%rsp), %r11 RESTORE_C_REGS_EXCEPT_RCX_R11 movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY USERGS_SYSRET64 1: Loading Loading @@ -316,6 +321,7 @@ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ RESTORE_C_REGS_EXCEPT_RCX_R11 movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY USERGS_SYSRET64 opportunistic_sysret_failed: Loading Loading @@ -343,6 +349,7 @@ ENTRY(stub_ptregs_64) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF popq %rax UNWIND_HINT_REGS extra=0 jmp entry_SYSCALL64_slow_path 1: Loading @@ -351,6 +358,7 @@ END(stub_ptregs_64) .macro ptregs_stub func ENTRY(ptregs_\func) UNWIND_HINT_FUNC leaq \func(%rip), %rax jmp stub_ptregs_64 END(ptregs_\func) Loading @@ -367,6 +375,7 @@ END(ptregs_\func) * %rsi: next task */ ENTRY(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers * This must match the order in inactive_task_frame Loading Loading @@ -406,6 +415,7 @@ END(__switch_to_asm) * r12: kernel thread arg */ ENTRY(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ Loading @@ -413,6 +423,7 @@ ENTRY(ret_from_fork) jnz 1f /* kernel threads are uncommon */ 2: UNWIND_HINT_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ Loading Loading @@ -440,13 +451,102 @@ END(ret_from_fork) ENTRY(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS pushq $(~vector+0x80) /* Note: always in signed byte range */ vector=vector+1 jmp common_interrupt .align 8 vector=vector+1 .endr END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushfq testl $X86_EFLAGS_IF, (%rsp) jz .Lokay_\@ ud2 .Lokay_\@: addq $8, %rsp #endif .endm /* * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. * Requires kernel GSBASE. * * The invariant is that, if irq_count != -1, then the IRQ stack is in use. */ .macro ENTER_IRQ_STACK regs=1 old_rsp DEBUG_ENTRY_ASSERT_IRQS_OFF movq %rsp, \old_rsp .if \regs UNWIND_HINT_REGS base=\old_rsp .endif incl PER_CPU_VAR(irq_count) jnz .Lirq_stack_push_old_rsp_\@ /* * Right now, if we just incremented irq_count to zero, we've * claimed the IRQ stack but we haven't switched to it yet. * * If anything is added that can interrupt us here without using IST, * it must be *extremely* careful to limit its stack usage. This * could include kprobes and a hypothetical future IST-less #DB * handler. * * The OOPS unwinder relies on the word at the top of the IRQ * stack linking back to the previous RSP for the entire time we're * on the IRQ stack. For this to work reliably, we need to write * it before we actually move ourselves to the IRQ stack. */ movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) movq PER_CPU_VAR(irq_stack_ptr), %rsp #ifdef CONFIG_DEBUG_ENTRY /* * If the first movq above becomes wrong due to IRQ stack layout * changes, the only way we'll notice is if we try to unwind right * here. Assert that we set up the stack right to catch this type * of bug quickly. */ cmpq -8(%rsp), \old_rsp je .Lirq_stack_okay\@ ud2 .Lirq_stack_okay\@: #endif .Lirq_stack_push_old_rsp_\@: pushq \old_rsp .if \regs UNWIND_HINT_REGS indirect=1 .endif .endm /* * Undoes ENTER_IRQ_STACK. */ .macro LEAVE_IRQ_STACK regs=1 DEBUG_ENTRY_ASSERT_IRQS_OFF /* We need to be off the IRQ stack before decrementing irq_count. */ popq %rsp .if \regs UNWIND_HINT_REGS .endif /* * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming * the irq stack but we're not on it. */ decl PER_CPU_VAR(irq_count) .endm /* * Interrupt entry/exit. * Loading Loading @@ -485,17 +585,7 @@ END(irq_entries_start) CALL_enter_from_user_mode 1: /* * Save previous stack pointer, optionally switch to interrupt stack. * irq_count is used to check if a CPU is already on an interrupt stack * or not. While this is essentially redundant with preempt_count it is * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ movq %rsp, %rdi incl PER_CPU_VAR(irq_count) cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp pushq %rdi ENTER_IRQ_STACK old_rsp=%rdi /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF Loading @@ -515,10 +605,8 @@ common_interrupt: ret_from_intr: DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) /* Restore saved previous stack */ popq %rsp LEAVE_IRQ_STACK testb $3, CS(%rsp) jz retint_kernel Loading Loading @@ -561,6 +649,7 @@ restore_c_regs_and_iret: INTERRUPT_RETURN ENTRY(native_iret) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in * 64-bit mode SS:RSP on the exception stack is always valid. Loading Loading @@ -633,6 +722,7 @@ native_irq_return_ldt: orq PER_CPU_VAR(espfix_stack), %rax SWAPGS movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 /* * At this point, we cannot write to the stack any more, but we can Loading @@ -654,6 +744,7 @@ END(common_interrupt) */ .macro apicinterrupt3 num sym do_sym ENTRY(\sym) UNWIND_HINT_IRET_REGS ASM_CLAC pushq $~(\num) .Lcommon_\sym: Loading Loading @@ -740,6 +831,8 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=8 /* Sanity check */ .if \shift_ist != -1 && \paranoid == 0 .error "using shift_ist requires paranoid=1" Loading @@ -763,6 +856,7 @@ ENTRY(\sym) .else call error_entry .endif UNWIND_HINT_REGS /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ .if \paranoid Loading Loading @@ -860,6 +954,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 * edi: new selector */ ENTRY(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) SWAPGS Loading @@ -868,8 +963,9 @@ ENTRY(native_load_gs_index) 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS popfq FRAME_END ret END(native_load_gs_index) ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) Loading @@ -892,14 +988,12 @@ bad_gs: ENTRY(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp incl PER_CPU_VAR(irq_count) cmove PER_CPU_VAR(irq_stack_ptr), %rsp push %rbp /* frame pointer backlink */ ENTER_IRQ_STACK regs=0 old_rsp=%r11 call __do_softirq LEAVE_IRQ_STACK regs=0 leaveq decl PER_CPU_VAR(irq_count) ret END(do_softirq_own_stack) ENDPROC(do_softirq_own_stack) #ifdef CONFIG_XEN idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 Loading @@ -923,14 +1017,14 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ UNWIND_HINT_FUNC movq %rdi, %rsp /* we don't return, adjust the stack frame */ 11: incl PER_CPU_VAR(irq_count) movq %rsp, %rbp cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp pushq %rbp /* frame pointer backlink */ UNWIND_HINT_REGS ENTER_IRQ_STACK old_rsp=%r10 call xen_evtchn_do_upcall popq %rsp decl PER_CPU_VAR(irq_count) LEAVE_IRQ_STACK #ifndef CONFIG_PREEMPT call xen_maybe_preempt_hcall #endif Loading @@ -951,6 +1045,7 @@ END(xen_do_hypervisor_callback) * with its current contents: any discrepancy means we in category 1. */ ENTRY(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) jne 1f Loading @@ -970,11 +1065,13 @@ ENTRY(xen_failsafe_callback) pushq $0 /* RIP */ pushq %r11 pushq %rcx UNWIND_HINT_IRET_REGS offset=8 jmp general_protection 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS Loading Loading @@ -1020,6 +1117,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 Loading Loading @@ -1047,6 +1145,7 @@ END(paranoid_entry) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ Loading @@ -1068,6 +1167,7 @@ END(paranoid_exit) * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 Loading Loading @@ -1152,6 +1252,7 @@ END(error_entry) * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testl %ebx, %ebx Loading @@ -1161,6 +1262,7 @@ END(error_exit) /* Runs on exception stack */ ENTRY(nmi) UNWIND_HINT_IRET_REGS /* * Fix up the exception frame if we're on Xen. * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most Loading Loading @@ -1232,11 +1334,13 @@ ENTRY(nmi) cld movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ pushq 3*8(%rdx) /* pt_regs->flags */ pushq 2*8(%rdx) /* pt_regs->cs */ pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ Loading @@ -1253,6 +1357,7 @@ ENTRY(nmi) pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS ENCODE_FRAME_POINTER /* Loading Loading @@ -1407,6 +1512,7 @@ first_nmi: .rept 5 pushq 11*8(%rsp) .endr UNWIND_HINT_IRET_REGS /* Everything up to here is safe from nested NMIs */ Loading @@ -1422,6 +1528,7 @@ first_nmi: pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ INTERRUPT_RETURN /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif Loading Loading @@ -1471,6 +1578,7 @@ end_repeat_nmi: * exceptions might do. */ call paranoid_entry UNWIND_HINT_REGS /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp, %rdi Loading Loading @@ -1508,17 +1616,19 @@ nmi_restore: END(nmi) ENTRY(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret END(ignore_sysret) ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit 1: jmp 1b END(rewind_stack_do_exit) arch/x86/include/asm/io.h +47 −51 Original line number Diff line number Diff line Loading @@ -69,6 +69,9 @@ build_mmio_write(__writeb, "b", unsigned char, "q", ) build_mmio_write(__writew, "w", unsigned short, "r", ) build_mmio_write(__writel, "l", unsigned int, "r", ) #define readb readb #define readw readw #define readl readl #define readb_relaxed(a) __readb(a) #define readw_relaxed(a) __readw(a) #define readl_relaxed(a) __readl(a) Loading @@ -76,6 +79,9 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #define __raw_readw __readw #define __raw_readl __readl #define writeb writeb #define writew writew #define writel writel #define writeb_relaxed(v, a) __writeb(v, a) #define writew_relaxed(v, a) __writew(v, a) #define writel_relaxed(v, a) __writel(v, a) Loading @@ -88,13 +94,15 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #ifdef CONFIG_X86_64 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") build_mmio_read(__readq, "q", unsigned long, "=r", ) build_mmio_write(writeq, "q", unsigned long, "r", :"memory") build_mmio_write(__writeq, "q", unsigned long, "r", ) #define readq_relaxed(a) readq(a) #define writeq_relaxed(v, a) writeq(v, a) #define readq_relaxed(a) __readq(a) #define writeq_relaxed(v, a) __writeq(v, a) #define __raw_readq(a) readq(a) #define __raw_writeq(val, addr) writeq(val, addr) #define __raw_readq __readq #define __raw_writeq __writeq /* Let people know that we have them */ #define readq readq Loading @@ -119,6 +127,7 @@ static inline phys_addr_t virt_to_phys(volatile void *address) { return __pa(address); } #define virt_to_phys virt_to_phys /** * phys_to_virt - map physical address to virtual Loading @@ -137,6 +146,7 @@ static inline void *phys_to_virt(phys_addr_t address) { return __va(address); } #define phys_to_virt phys_to_virt /* * Change "struct page" to physical address. Loading Loading @@ -169,11 +179,14 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) * else, you probably want one of the following. */ extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); #define ioremap_nocache ioremap_nocache extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); #define ioremap_cache ioremap_cache extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); #define ioremap_prot ioremap_prot /** * ioremap - map bus memory into CPU space Loading @@ -193,8 +206,10 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) { return ioremap_nocache(offset, size); } #define ioremap ioremap extern void iounmap(volatile void __iomem *addr); #define iounmap iounmap extern void set_iounmap_nonlazy(void); Loading @@ -202,53 +217,6 @@ extern void set_iounmap_nonlazy(void); #include <asm-generic/iomap.h> /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p /** * memset_io Set a range of I/O memory to a constant value * @addr: The beginning of the I/O-memory range to set * @val: The value to set the memory to * @count: The number of bytes to set * * Set a range of I/O memory to a given value. */ static inline void memset_io(volatile void __iomem *addr, unsigned char val, size_t count) { memset((void __force *)addr, val, count); } /** * memcpy_fromio Copy a block of data from I/O memory * @dst: The (RAM) destination for the copy * @src: The (I/O memory) source for the data * @count: The number of bytes to copy * * Copy a block of data from I/O memory. */ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) { memcpy(dst, (const void __force *)src, count); } /** * memcpy_toio Copy a block of data into I/O memory * @dst: The (I/O memory) destination for the copy * @src: The (RAM) source for the data * @count: The number of bytes to copy * * Copy a block of data to I/O memory. */ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) { memcpy((void __force *)dst, src, count); } /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped Loading Loading @@ -341,13 +309,38 @@ BUILDIO(b, b, char) BUILDIO(w, w, short) BUILDIO(l, , int) #define inb inb #define inw inw #define inl inl #define inb_p inb_p #define inw_p inw_p #define inl_p inl_p #define insb insb #define insw insw #define insl insl #define outb outb #define outw outw #define outl outl #define outb_p outb_p #define outw_p outw_p #define outl_p outl_p #define outsb outsb #define outsw outsw #define outsl outsl extern void *xlate_dev_mem_ptr(phys_addr_t phys); extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define xlate_dev_mem_ptr xlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, enum page_cache_mode pcm); extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); #define ioremap_wc ioremap_wc extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); #define ioremap_wt ioremap_wt extern bool is_early_ioremap_ptep(pte_t *ptep); Loading @@ -365,6 +358,9 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, #define IO_SPACE_LIMIT 0xffff #include <asm-generic/io.h> #undef PCI_IOBASE #ifdef CONFIG_MTRR extern int __must_check arch_phys_wc_index(int handle); #define arch_phys_wc_index arch_phys_wc_index Loading Loading
arch/x86/Kconfig.debug +0 −2 Original line number Diff line number Diff line Loading @@ -305,8 +305,6 @@ config DEBUG_ENTRY Some of these sanity checks may slow down kernel entries and exits or otherwise impact performance. This is currently used to help test NMI code. If unsure, say N. config DEBUG_NMI_SELFTEST Loading
arch/x86/entry/Makefile +0 −1 Original line number Diff line number Diff line Loading @@ -2,7 +2,6 @@ # Makefile for the x86 low level entry code # OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,) Loading
arch/x86/entry/calling.h +5 −0 Original line number Diff line number Diff line #include <linux/jump_label.h> #include <asm/unwind_hints.h> /* Loading Loading @@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with movq %rdx, 12*8+\offset(%rsp) movq %rsi, 13*8+\offset(%rsp) movq %rdi, 14*8+\offset(%rsp) UNWIND_HINT_REGS offset=\offset extra=0 .endm .macro SAVE_C_REGS offset=0 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1 Loading @@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with movq %r12, 3*8+\offset(%rsp) movq %rbp, 4*8+\offset(%rsp) movq %rbx, 5*8+\offset(%rsp) UNWIND_HINT_REGS offset=\offset .endm .macro RESTORE_EXTRA_REGS offset=0 Loading @@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with movq 3*8+\offset(%rsp), %r12 movq 4*8+\offset(%rsp), %rbp movq 5*8+\offset(%rsp), %rbx UNWIND_HINT_REGS offset=\offset extra=0 .endm .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1 Loading @@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with .endif movq 13*8(%rsp), %rsi movq 14*8(%rsp), %rdi UNWIND_HINT_IRET_REGS offset=16*8 .endm .macro RESTORE_C_REGS RESTORE_C_REGS_HELPER 1,1,1,1,1 Loading
arch/x86/entry/entry_64.S +140 −30 Original line number Diff line number Diff line Loading @@ -36,6 +36,7 @@ #include <asm/smap.h> #include <asm/pgtable_types.h> #include <asm/export.h> #include <asm/frame.h> #include <linux/err.h> .code64 Loading @@ -43,9 +44,10 @@ #ifdef CONFIG_PARAVIRT ENTRY(native_usergs_sysret64) UNWIND_HINT_EMPTY swapgs sysretq ENDPROC(native_usergs_sysret64) END(native_usergs_sysret64) #endif /* CONFIG_PARAVIRT */ .macro TRACE_IRQS_IRETQ Loading Loading @@ -134,6 +136,7 @@ ENDPROC(native_usergs_sysret64) */ ENTRY(entry_SYSCALL_64) UNWIND_HINT_EMPTY /* * Interrupts are off on entry. * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, Loading Loading @@ -169,6 +172,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs) pushq %r10 /* pt_regs->r10 */ pushq %r11 /* pt_regs->r11 */ sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */ UNWIND_HINT_REGS extra=0 /* * If we need to do entry work or if we guess we'll need to do Loading Loading @@ -223,6 +227,7 @@ entry_SYSCALL_64_fastpath: movq EFLAGS(%rsp), %r11 RESTORE_C_REGS_EXCEPT_RCX_R11 movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY USERGS_SYSRET64 1: Loading Loading @@ -316,6 +321,7 @@ syscall_return_via_sysret: /* rcx and r11 are already restored (see code above) */ RESTORE_C_REGS_EXCEPT_RCX_R11 movq RSP(%rsp), %rsp UNWIND_HINT_EMPTY USERGS_SYSRET64 opportunistic_sysret_failed: Loading Loading @@ -343,6 +349,7 @@ ENTRY(stub_ptregs_64) DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF popq %rax UNWIND_HINT_REGS extra=0 jmp entry_SYSCALL64_slow_path 1: Loading @@ -351,6 +358,7 @@ END(stub_ptregs_64) .macro ptregs_stub func ENTRY(ptregs_\func) UNWIND_HINT_FUNC leaq \func(%rip), %rax jmp stub_ptregs_64 END(ptregs_\func) Loading @@ -367,6 +375,7 @@ END(ptregs_\func) * %rsi: next task */ ENTRY(__switch_to_asm) UNWIND_HINT_FUNC /* * Save callee-saved registers * This must match the order in inactive_task_frame Loading Loading @@ -406,6 +415,7 @@ END(__switch_to_asm) * r12: kernel thread arg */ ENTRY(ret_from_fork) UNWIND_HINT_EMPTY movq %rax, %rdi call schedule_tail /* rdi: 'prev' task parameter */ Loading @@ -413,6 +423,7 @@ ENTRY(ret_from_fork) jnz 1f /* kernel threads are uncommon */ 2: UNWIND_HINT_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ TRACE_IRQS_ON /* user mode is traced as IRQS on */ Loading Loading @@ -440,13 +451,102 @@ END(ret_from_fork) ENTRY(irq_entries_start) vector=FIRST_EXTERNAL_VECTOR .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) UNWIND_HINT_IRET_REGS pushq $(~vector+0x80) /* Note: always in signed byte range */ vector=vector+1 jmp common_interrupt .align 8 vector=vector+1 .endr END(irq_entries_start) .macro DEBUG_ENTRY_ASSERT_IRQS_OFF #ifdef CONFIG_DEBUG_ENTRY pushfq testl $X86_EFLAGS_IF, (%rsp) jz .Lokay_\@ ud2 .Lokay_\@: addq $8, %rsp #endif .endm /* * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. * Requires kernel GSBASE. * * The invariant is that, if irq_count != -1, then the IRQ stack is in use. */ .macro ENTER_IRQ_STACK regs=1 old_rsp DEBUG_ENTRY_ASSERT_IRQS_OFF movq %rsp, \old_rsp .if \regs UNWIND_HINT_REGS base=\old_rsp .endif incl PER_CPU_VAR(irq_count) jnz .Lirq_stack_push_old_rsp_\@ /* * Right now, if we just incremented irq_count to zero, we've * claimed the IRQ stack but we haven't switched to it yet. * * If anything is added that can interrupt us here without using IST, * it must be *extremely* careful to limit its stack usage. This * could include kprobes and a hypothetical future IST-less #DB * handler. * * The OOPS unwinder relies on the word at the top of the IRQ * stack linking back to the previous RSP for the entire time we're * on the IRQ stack. For this to work reliably, we need to write * it before we actually move ourselves to the IRQ stack. */ movq \old_rsp, PER_CPU_VAR(irq_stack_union + IRQ_STACK_SIZE - 8) movq PER_CPU_VAR(irq_stack_ptr), %rsp #ifdef CONFIG_DEBUG_ENTRY /* * If the first movq above becomes wrong due to IRQ stack layout * changes, the only way we'll notice is if we try to unwind right * here. Assert that we set up the stack right to catch this type * of bug quickly. */ cmpq -8(%rsp), \old_rsp je .Lirq_stack_okay\@ ud2 .Lirq_stack_okay\@: #endif .Lirq_stack_push_old_rsp_\@: pushq \old_rsp .if \regs UNWIND_HINT_REGS indirect=1 .endif .endm /* * Undoes ENTER_IRQ_STACK. */ .macro LEAVE_IRQ_STACK regs=1 DEBUG_ENTRY_ASSERT_IRQS_OFF /* We need to be off the IRQ stack before decrementing irq_count. */ popq %rsp .if \regs UNWIND_HINT_REGS .endif /* * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming * the irq stack but we're not on it. */ decl PER_CPU_VAR(irq_count) .endm /* * Interrupt entry/exit. * Loading Loading @@ -485,17 +585,7 @@ END(irq_entries_start) CALL_enter_from_user_mode 1: /* * Save previous stack pointer, optionally switch to interrupt stack. * irq_count is used to check if a CPU is already on an interrupt stack * or not. While this is essentially redundant with preempt_count it is * a little cheaper to use a separate counter in the PDA (short of * moving irq_enter into assembly, which would be too much work) */ movq %rsp, %rdi incl PER_CPU_VAR(irq_count) cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp pushq %rdi ENTER_IRQ_STACK old_rsp=%rdi /* We entered an interrupt context - irqs are off: */ TRACE_IRQS_OFF Loading @@ -515,10 +605,8 @@ common_interrupt: ret_from_intr: DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) /* Restore saved previous stack */ popq %rsp LEAVE_IRQ_STACK testb $3, CS(%rsp) jz retint_kernel Loading Loading @@ -561,6 +649,7 @@ restore_c_regs_and_iret: INTERRUPT_RETURN ENTRY(native_iret) UNWIND_HINT_IRET_REGS /* * Are we returning to a stack segment from the LDT? Note: in * 64-bit mode SS:RSP on the exception stack is always valid. Loading Loading @@ -633,6 +722,7 @@ native_irq_return_ldt: orq PER_CPU_VAR(espfix_stack), %rax SWAPGS movq %rax, %rsp UNWIND_HINT_IRET_REGS offset=8 /* * At this point, we cannot write to the stack any more, but we can Loading @@ -654,6 +744,7 @@ END(common_interrupt) */ .macro apicinterrupt3 num sym do_sym ENTRY(\sym) UNWIND_HINT_IRET_REGS ASM_CLAC pushq $~(\num) .Lcommon_\sym: Loading Loading @@ -740,6 +831,8 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=8 /* Sanity check */ .if \shift_ist != -1 && \paranoid == 0 .error "using shift_ist requires paranoid=1" Loading @@ -763,6 +856,7 @@ ENTRY(\sym) .else call error_entry .endif UNWIND_HINT_REGS /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */ .if \paranoid Loading Loading @@ -860,6 +954,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0 * edi: new selector */ ENTRY(native_load_gs_index) FRAME_BEGIN pushfq DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI) SWAPGS Loading @@ -868,8 +963,9 @@ ENTRY(native_load_gs_index) 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE SWAPGS popfq FRAME_END ret END(native_load_gs_index) ENDPROC(native_load_gs_index) EXPORT_SYMBOL(native_load_gs_index) _ASM_EXTABLE(.Lgs_change, bad_gs) Loading @@ -892,14 +988,12 @@ bad_gs: ENTRY(do_softirq_own_stack) pushq %rbp mov %rsp, %rbp incl PER_CPU_VAR(irq_count) cmove PER_CPU_VAR(irq_stack_ptr), %rsp push %rbp /* frame pointer backlink */ ENTER_IRQ_STACK regs=0 old_rsp=%r11 call __do_softirq LEAVE_IRQ_STACK regs=0 leaveq decl PER_CPU_VAR(irq_count) ret END(do_softirq_own_stack) ENDPROC(do_softirq_own_stack) #ifdef CONFIG_XEN idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 Loading @@ -923,14 +1017,14 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */ * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will * see the correct pointer to the pt_regs */ UNWIND_HINT_FUNC movq %rdi, %rsp /* we don't return, adjust the stack frame */ 11: incl PER_CPU_VAR(irq_count) movq %rsp, %rbp cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp pushq %rbp /* frame pointer backlink */ UNWIND_HINT_REGS ENTER_IRQ_STACK old_rsp=%r10 call xen_evtchn_do_upcall popq %rsp decl PER_CPU_VAR(irq_count) LEAVE_IRQ_STACK #ifndef CONFIG_PREEMPT call xen_maybe_preempt_hcall #endif Loading @@ -951,6 +1045,7 @@ END(xen_do_hypervisor_callback) * with its current contents: any discrepancy means we in category 1. */ ENTRY(xen_failsafe_callback) UNWIND_HINT_EMPTY movl %ds, %ecx cmpw %cx, 0x10(%rsp) jne 1f Loading @@ -970,11 +1065,13 @@ ENTRY(xen_failsafe_callback) pushq $0 /* RIP */ pushq %r11 pushq %rcx UNWIND_HINT_IRET_REGS offset=8 jmp general_protection 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ movq (%rsp), %rcx movq 8(%rsp), %r11 addq $0x30, %rsp UNWIND_HINT_IRET_REGS pushq $-1 /* orig_ax = -1 => not a system call */ ALLOC_PT_GPREGS_ON_STACK SAVE_C_REGS Loading Loading @@ -1020,6 +1117,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec * Return: ebx=0: need swapgs on exit, ebx=1: otherwise */ ENTRY(paranoid_entry) UNWIND_HINT_FUNC cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 Loading Loading @@ -1047,6 +1145,7 @@ END(paranoid_entry) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ Loading @@ -1068,6 +1167,7 @@ END(paranoid_exit) * Return: EBX=0: came from user mode; EBX=1: otherwise */ ENTRY(error_entry) UNWIND_HINT_FUNC cld SAVE_C_REGS 8 SAVE_EXTRA_REGS 8 Loading Loading @@ -1152,6 +1252,7 @@ END(error_entry) * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode */ ENTRY(error_exit) UNWIND_HINT_REGS DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF testl %ebx, %ebx Loading @@ -1161,6 +1262,7 @@ END(error_exit) /* Runs on exception stack */ ENTRY(nmi) UNWIND_HINT_IRET_REGS /* * Fix up the exception frame if we're on Xen. * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most Loading Loading @@ -1232,11 +1334,13 @@ ENTRY(nmi) cld movq %rsp, %rdx movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp UNWIND_HINT_IRET_REGS base=%rdx offset=8 pushq 5*8(%rdx) /* pt_regs->ss */ pushq 4*8(%rdx) /* pt_regs->rsp */ pushq 3*8(%rdx) /* pt_regs->flags */ pushq 2*8(%rdx) /* pt_regs->cs */ pushq 1*8(%rdx) /* pt_regs->rip */ UNWIND_HINT_IRET_REGS pushq $-1 /* pt_regs->orig_ax */ pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ Loading @@ -1253,6 +1357,7 @@ ENTRY(nmi) pushq %r13 /* pt_regs->r13 */ pushq %r14 /* pt_regs->r14 */ pushq %r15 /* pt_regs->r15 */ UNWIND_HINT_REGS ENCODE_FRAME_POINTER /* Loading Loading @@ -1407,6 +1512,7 @@ first_nmi: .rept 5 pushq 11*8(%rsp) .endr UNWIND_HINT_IRET_REGS /* Everything up to here is safe from nested NMIs */ Loading @@ -1422,6 +1528,7 @@ first_nmi: pushq $__KERNEL_CS /* CS */ pushq $1f /* RIP */ INTERRUPT_RETURN /* continues at repeat_nmi below */ UNWIND_HINT_IRET_REGS 1: #endif Loading Loading @@ -1471,6 +1578,7 @@ end_repeat_nmi: * exceptions might do. */ call paranoid_entry UNWIND_HINT_REGS /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ movq %rsp, %rdi Loading Loading @@ -1508,17 +1616,19 @@ nmi_restore: END(nmi) ENTRY(ignore_sysret) UNWIND_HINT_EMPTY mov $-ENOSYS, %eax sysret END(ignore_sysret) ENTRY(rewind_stack_do_exit) UNWIND_HINT_FUNC /* Prevent any naive code from trying to unwind to our caller. */ xorl %ebp, %ebp movq PER_CPU_VAR(cpu_current_top_of_stack), %rax leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp leaq -PTREGS_SIZE(%rax), %rsp UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE call do_exit 1: jmp 1b END(rewind_stack_do_exit)
arch/x86/include/asm/io.h +47 −51 Original line number Diff line number Diff line Loading @@ -69,6 +69,9 @@ build_mmio_write(__writeb, "b", unsigned char, "q", ) build_mmio_write(__writew, "w", unsigned short, "r", ) build_mmio_write(__writel, "l", unsigned int, "r", ) #define readb readb #define readw readw #define readl readl #define readb_relaxed(a) __readb(a) #define readw_relaxed(a) __readw(a) #define readl_relaxed(a) __readl(a) Loading @@ -76,6 +79,9 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #define __raw_readw __readw #define __raw_readl __readl #define writeb writeb #define writew writew #define writel writel #define writeb_relaxed(v, a) __writeb(v, a) #define writew_relaxed(v, a) __writew(v, a) #define writel_relaxed(v, a) __writel(v, a) Loading @@ -88,13 +94,15 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) #ifdef CONFIG_X86_64 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") build_mmio_read(__readq, "q", unsigned long, "=r", ) build_mmio_write(writeq, "q", unsigned long, "r", :"memory") build_mmio_write(__writeq, "q", unsigned long, "r", ) #define readq_relaxed(a) readq(a) #define writeq_relaxed(v, a) writeq(v, a) #define readq_relaxed(a) __readq(a) #define writeq_relaxed(v, a) __writeq(v, a) #define __raw_readq(a) readq(a) #define __raw_writeq(val, addr) writeq(val, addr) #define __raw_readq __readq #define __raw_writeq __writeq /* Let people know that we have them */ #define readq readq Loading @@ -119,6 +127,7 @@ static inline phys_addr_t virt_to_phys(volatile void *address) { return __pa(address); } #define virt_to_phys virt_to_phys /** * phys_to_virt - map physical address to virtual Loading @@ -137,6 +146,7 @@ static inline void *phys_to_virt(phys_addr_t address) { return __va(address); } #define phys_to_virt phys_to_virt /* * Change "struct page" to physical address. Loading Loading @@ -169,11 +179,14 @@ static inline unsigned int isa_virt_to_bus(volatile void *address) * else, you probably want one of the following. */ extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size); #define ioremap_nocache ioremap_nocache extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); #define ioremap_cache ioremap_cache extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val); #define ioremap_prot ioremap_prot /** * ioremap - map bus memory into CPU space Loading @@ -193,8 +206,10 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) { return ioremap_nocache(offset, size); } #define ioremap ioremap extern void iounmap(volatile void __iomem *addr); #define iounmap iounmap extern void set_iounmap_nonlazy(void); Loading @@ -202,53 +217,6 @@ extern void set_iounmap_nonlazy(void); #include <asm-generic/iomap.h> /* * Convert a virtual cached pointer to an uncached pointer */ #define xlate_dev_kmem_ptr(p) p /** * memset_io Set a range of I/O memory to a constant value * @addr: The beginning of the I/O-memory range to set * @val: The value to set the memory to * @count: The number of bytes to set * * Set a range of I/O memory to a given value. */ static inline void memset_io(volatile void __iomem *addr, unsigned char val, size_t count) { memset((void __force *)addr, val, count); } /** * memcpy_fromio Copy a block of data from I/O memory * @dst: The (RAM) destination for the copy * @src: The (I/O memory) source for the data * @count: The number of bytes to copy * * Copy a block of data from I/O memory. */ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count) { memcpy(dst, (const void __force *)src, count); } /** * memcpy_toio Copy a block of data into I/O memory * @dst: The (I/O memory) destination for the copy * @src: The (RAM) source for the data * @count: The number of bytes to copy * * Copy a block of data to I/O memory. */ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) { memcpy((void __force *)dst, src, count); } /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped Loading Loading @@ -341,13 +309,38 @@ BUILDIO(b, b, char) BUILDIO(w, w, short) BUILDIO(l, , int) #define inb inb #define inw inw #define inl inl #define inb_p inb_p #define inw_p inw_p #define inl_p inl_p #define insb insb #define insw insw #define insl insl #define outb outb #define outw outw #define outl outl #define outb_p outb_p #define outw_p outw_p #define outl_p outl_p #define outsb outsb #define outsw outsw #define outsl outsl extern void *xlate_dev_mem_ptr(phys_addr_t phys); extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define xlate_dev_mem_ptr xlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, enum page_cache_mode pcm); extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); #define ioremap_wc ioremap_wc extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); #define ioremap_wt ioremap_wt extern bool is_early_ioremap_ptep(pte_t *ptep); Loading @@ -365,6 +358,9 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, #define IO_SPACE_LIMIT 0xffff #include <asm-generic/io.h> #undef PCI_IOBASE #ifdef CONFIG_MTRR extern int __must_check arch_phys_wc_index(int handle); #define arch_phys_wc_index arch_phys_wc_index Loading