Loading arch/x86/mm/ioremap_32.c +48 −49 Original line number Diff line number Diff line /* * arch/i386/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's Loading @@ -21,10 +19,6 @@ #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses Loading @@ -34,7 +28,8 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void __iomem *addr; struct vm_struct *area; Loading Loading @@ -62,7 +57,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) for (page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if (!PageReserved(page)) return NULL; } Loading Loading @@ -114,11 +110,11 @@ EXPORT_SYMBOL(__ioremap); * * Must be freed with iounmap. */ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { unsigned long last_addr; void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); if (!p) return p; Loading Loading @@ -172,7 +168,8 @@ void iounmap(volatile void __iomem *addr) addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address Loading @@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr) read_unlock(&vmlist_lock); if (!p) { printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr); dump_stack(); return; } Loading Loading @@ -237,7 +234,7 @@ void __init early_ioremap_init(void) unsigned long *pgd; if (early_ioremap_debug) printk("early_ioremap_init()\n"); printk(KERN_DEBUG "early_ioremap_init()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = __pa(bm_pte) | _PAGE_TABLE; Loading @@ -248,15 +245,16 @@ void __init early_ioremap_init(void) */ if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk("pgd %p != %p\n", printk(KERN_WARNING "pgd %p != %p\n", pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); printk("fix_to_virt(FIX_BTMAP_END): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", fix_to_virt(FIX_BTMAP_END)); printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); } } Loading @@ -265,7 +263,7 @@ void __init early_ioremap_clear(void) unsigned long *pgd; if (early_ioremap_debug) printk("early_ioremap_clear()\n"); printk(KERN_DEBUG "early_ioremap_clear()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = 0; Loading Loading @@ -351,7 +349,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size) nesting = early_ioremap_nested; if (early_ioremap_debug) { printk("early_ioremap(%08lx, %08lx) [%d] => ", printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", phys_addr, size, nesting); dump_stack(); } Loading Loading @@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size) WARN_ON(nesting < 0); if (early_ioremap_debug) { printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); dump_stack(); } Loading arch/x86/mm/ioremap_64.c +22 −26 Original line number Diff line number Diff line /* * arch/x86_64/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's Loading Loading @@ -33,8 +31,7 @@ EXPORT_SYMBOL(__phys_addr); * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, unsigned long flags) { int err = 0; Loading @@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, if (!lookup_address(vaddr, &level)) return err; /* * Must use a address here and not struct page because the phys addr * can be a in hole between nodes and not have an memmap entry. * Must use a address here and not struct page because * the phys addr can be a in hole between nodes and * not have an memmap entry. */ err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags)); err = change_page_attr_addr(vaddr,npages, MAKE_GLOBAL(__PAGE_KERNEL|flags)); if (!err) global_flush_tlb(); } return err; } /* * Generic mapping function */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses Loading @@ -73,7 +68,8 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void *addr; struct vm_struct *area; Loading Loading @@ -142,7 +138,6 @@ EXPORT_SYMBOL(__ioremap); * * Must be freed with iounmap. */ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); Loading @@ -165,7 +160,8 @@ void iounmap(volatile void __iomem *addr) addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by Loading @@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr) read_unlock(&vmlist_lock); if (!p) { printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr); dump_stack(); return; } Loading Loading
arch/x86/mm/ioremap_32.c +48 −49 Original line number Diff line number Diff line /* * arch/i386/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's Loading @@ -21,10 +19,6 @@ #define ISA_START_ADDRESS 0xa0000 #define ISA_END_ADDRESS 0x100000 /* * Generic mapping function (not visible outside): */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses Loading @@ -34,7 +28,8 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void __iomem *addr; struct vm_struct *area; Loading Loading @@ -62,7 +57,8 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) for (page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if (!PageReserved(page)) return NULL; } Loading Loading @@ -114,11 +110,11 @@ EXPORT_SYMBOL(__ioremap); * * Must be freed with iounmap. */ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { unsigned long last_addr; void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); if (!p) return p; Loading Loading @@ -172,7 +168,8 @@ void iounmap(volatile void __iomem *addr) addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address Loading @@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr) read_unlock(&vmlist_lock); if (!p) { printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr); dump_stack(); return; } Loading Loading @@ -237,7 +234,7 @@ void __init early_ioremap_init(void) unsigned long *pgd; if (early_ioremap_debug) printk("early_ioremap_init()\n"); printk(KERN_DEBUG "early_ioremap_init()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = __pa(bm_pte) | _PAGE_TABLE; Loading @@ -248,15 +245,16 @@ void __init early_ioremap_init(void) */ if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { WARN_ON(1); printk("pgd %p != %p\n", printk(KERN_WARNING "pgd %p != %p\n", pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", fix_to_virt(FIX_BTMAP_BEGIN)); printk("fix_to_virt(FIX_BTMAP_END): %08lx\n", printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", fix_to_virt(FIX_BTMAP_END)); printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); } } Loading @@ -265,7 +263,7 @@ void __init early_ioremap_clear(void) unsigned long *pgd; if (early_ioremap_debug) printk("early_ioremap_clear()\n"); printk(KERN_DEBUG "early_ioremap_clear()\n"); pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); *pgd = 0; Loading Loading @@ -351,7 +349,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size) nesting = early_ioremap_nested; if (early_ioremap_debug) { printk("early_ioremap(%08lx, %08lx) [%d] => ", printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", phys_addr, size, nesting); dump_stack(); } Loading Loading @@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size) WARN_ON(nesting < 0); if (early_ioremap_debug) { printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); dump_stack(); } Loading
arch/x86/mm/ioremap_64.c +22 −26 Original line number Diff line number Diff line /* * arch/x86_64/mm/ioremap.c * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's Loading Loading @@ -33,8 +31,7 @@ EXPORT_SYMBOL(__phys_addr); * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, unsigned long flags) { int err = 0; Loading @@ -50,20 +47,18 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, if (!lookup_address(vaddr, &level)) return err; /* * Must use a address here and not struct page because the phys addr * can be a in hole between nodes and not have an memmap entry. * Must use a address here and not struct page because * the phys addr can be a in hole between nodes and * not have an memmap entry. */ err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags)); err = change_page_attr_addr(vaddr,npages, MAKE_GLOBAL(__PAGE_KERNEL|flags)); if (!err) global_flush_tlb(); } return err; } /* * Generic mapping function */ /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses Loading @@ -73,7 +68,8 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size, * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void *addr; struct vm_struct *area; Loading Loading @@ -142,7 +138,6 @@ EXPORT_SYMBOL(__ioremap); * * Must be freed with iounmap. */ void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); Loading @@ -165,7 +160,8 @@ void iounmap(volatile void __iomem *addr) addr < phys_to_virt(ISA_END_ADDRESS)) return; addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); addr = (volatile void __iomem *) (PAGE_MASK & (unsigned long __force)addr); /* Use the vm area unlocked, assuming the caller ensures there isn't another iounmap for the same address in parallel. Reuse of the virtual address is prevented by Loading @@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr) read_unlock(&vmlist_lock); if (!p) { printk("iounmap: bad address %p\n", addr); printk(KERN_ERR "iounmap: bad address %p\n", addr); dump_stack(); return; } Loading