Loading arch/nios2/include/asm/io.h +4 −16 Original line number Diff line number Diff line Loading @@ -25,29 +25,17 @@ #define writew_relaxed(x, addr) writew(x, addr) #define writel_relaxed(x, addr) writel(x, addr) extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, unsigned long cacheflag); void __iomem *ioremap(unsigned long physaddr, unsigned long size); extern void __iounmap(void __iomem *addr); static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) { return __ioremap(physaddr, size, 0); } static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size) { return __ioremap(physaddr, size, 0); } static inline void iounmap(void __iomem *addr) { __iounmap(addr); } #define ioremap_nocache ioremap_nocache #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache #define ioremap_nocache ioremap #define ioremap_wc ioremap #define ioremap_wt ioremap /* Pages to physical address... */ #define page_to_phys(page) virt_to_phys(page_to_virt(page)) Loading arch/nios2/mm/ioremap.c +3 −14 Original line number Diff line number Diff line Loading @@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, /* * Map some physical address range into the kernel address space. */ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long cacheflag) void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { struct vm_struct *area; unsigned long offset; Loading @@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return NULL; } /* * Map uncached objects in the low part of address space to * CONFIG_NIOS2_IO_REGION_BASE */ if (IS_MAPPABLE_UNCACHEABLE(phys_addr) && IS_MAPPABLE_UNCACHEABLE(last_addr) && !(cacheflag & _PAGE_CACHED)) return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr); /* Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; Loading @@ -158,14 +148,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (!area) return NULL; addr = area->addr; if (remap_area_pages((unsigned long) addr, phys_addr, size, cacheflag)) { if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char *)addr); } EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(ioremap); /* * __iounmap unmaps nearly everything, so be careful Loading Loading
arch/nios2/include/asm/io.h +4 −16 Original line number Diff line number Diff line Loading @@ -25,29 +25,17 @@ #define writew_relaxed(x, addr) writew(x, addr) #define writel_relaxed(x, addr) writel(x, addr) extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size, unsigned long cacheflag); void __iomem *ioremap(unsigned long physaddr, unsigned long size); extern void __iounmap(void __iomem *addr); static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size) { return __ioremap(physaddr, size, 0); } static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size) { return __ioremap(physaddr, size, 0); } static inline void iounmap(void __iomem *addr) { __iounmap(addr); } #define ioremap_nocache ioremap_nocache #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache #define ioremap_nocache ioremap #define ioremap_wc ioremap #define ioremap_wt ioremap /* Pages to physical address... */ #define page_to_phys(page) virt_to_phys(page_to_virt(page)) Loading
arch/nios2/mm/ioremap.c +3 −14 Original line number Diff line number Diff line Loading @@ -112,8 +112,7 @@ static int remap_area_pages(unsigned long address, unsigned long phys_addr, /* * Map some physical address range into the kernel address space. */ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long cacheflag) void __iomem *ioremap(unsigned long phys_addr, unsigned long size) { struct vm_struct *area; unsigned long offset; Loading @@ -139,15 +138,6 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return NULL; } /* * Map uncached objects in the low part of address space to * CONFIG_NIOS2_IO_REGION_BASE */ if (IS_MAPPABLE_UNCACHEABLE(phys_addr) && IS_MAPPABLE_UNCACHEABLE(last_addr) && !(cacheflag & _PAGE_CACHED)) return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr); /* Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; Loading @@ -158,14 +148,13 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, if (!area) return NULL; addr = area->addr; if (remap_area_pages((unsigned long) addr, phys_addr, size, cacheflag)) { if (remap_area_pages((unsigned long) addr, phys_addr, size, 0)) { vunmap(addr); return NULL; } return (void __iomem *) (offset + (char *)addr); } EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(ioremap); /* * __iounmap unmaps nearly everything, so be careful Loading