Loading arch/arm/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -2,6 +2,7 @@ config ARM bool default y select HAVE_AOUT select HAVE_DMA_API_DEBUG select HAVE_IDE select HAVE_MEMBLOCK select RTC_LIB Loading arch/arm/common/dmabounce.c +8 −8 Original line number Diff line number Diff line Loading @@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, * substitute the safe buffer for the unsafe one. * (basically move the buffer from an unsafe area to a safe one) */ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", Loading @@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, return map_single(dev, ptr, size, dir); } EXPORT_SYMBOL(dma_map_single); EXPORT_SYMBOL(__dma_map_single); /* * see if a mapped address was really a "safe" buffer and if so, copy Loading @@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single); * the safe buffer. (basically return things back to the way they * should be) */ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", Loading @@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, unmap_single(dev, dma_addr, size, dir); } EXPORT_SYMBOL(dma_unmap_single); EXPORT_SYMBOL(__dma_unmap_single); dma_addr_t dma_map_page(struct device *dev, struct page *page, dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", Loading @@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, return map_single(dev, page_address(page) + offset, size, dir); } EXPORT_SYMBOL(dma_map_page); EXPORT_SYMBOL(__dma_map_page); /* * see if a mapped address was really a "safe" buffer and if so, copy Loading @@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page); * the safe buffer. (basically return things back to the way they * should be) */ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", Loading @@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, unmap_single(dev, dma_addr, size, dir); } EXPORT_SYMBOL(dma_unmap_page); EXPORT_SYMBOL(__dma_unmap_page); int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, unsigned long off, size_t sz, enum dma_data_direction dir) Loading arch/arm/include/asm/dma-mapping.h +53 −12 Original line number Diff line number Diff line Loading @@ -5,6 +5,7 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> #include <linux/dma-debug.h> #include <asm-generic/dma-coherent.h> #include <asm/memory.h> Loading Loading @@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ extern dma_addr_t dma_map_single(struct device *, void *, size_t, extern dma_addr_t __dma_map_single(struct device *, void *, size_t, enum dma_data_direction); extern void dma_unmap_single(struct device *, dma_addr_t, size_t, extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); extern dma_addr_t dma_map_page(struct device *, struct page *, extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void dma_unmap_page(struct device *, dma_addr_t, size_t, extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, enum dma_data_direction); /* Loading @@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } #endif /* CONFIG_DMABOUNCE */ /** * dma_map_single - map a single buffer for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices Loading @@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); __dma_single_cpu_to_dev(cpu_addr, size, dir); addr = __dma_map_single(dev, cpu_addr, size, dir); debug_dma_map_page(dev, virt_to_page(cpu_addr), (unsigned long)cpu_addr & ~PAGE_MASK, size, dir, addr, true); return virt_to_dma(dev, cpu_addr); return addr; } /** Loading @@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); __dma_page_cpu_to_dev(page, offset, size, dir); addr = __dma_map_page(dev, page, offset, size, dir); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return pfn_to_dma(dev, page_to_pfn(page)) + offset; return addr; } /** Loading @@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); debug_dma_unmap_page(dev, handle, size, dir, true); __dma_unmap_single(dev, handle, size, dir); } /** Loading @@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); debug_dma_unmap_page(dev, handle, size, dir, false); __dma_unmap_page(dev, handle, size, dir); } #endif /* CONFIG_DMABOUNCE */ /** * dma_sync_single_range_for_cpu Loading @@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) return; Loading @@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); debug_dma_sync_single_for_device(dev, handle + offset, size, dir); if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; Loading arch/arm/mm/dma-mapping.c +21 −3 Original line number Diff line number Diff line Loading @@ -554,17 +554,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, struct scatterlist *s; int i, j; BUG_ON(!valid_dma_direction(dir)); for_each_sg(sg, s, nents, i) { s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, s->length, dir); if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; } debug_dma_map_sg(dev, sg, nents, nents, dir); return nents; bad_mapping: for_each_sg(sg, s, i, j) dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); return 0; } EXPORT_SYMBOL(dma_map_sg); Loading @@ -585,8 +588,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, struct scatterlist *s; int i; debug_dma_unmap_sg(dev, sg, nents, dir); for_each_sg(sg, s, nents, i) dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); } EXPORT_SYMBOL(dma_unmap_sg); Loading @@ -611,6 +616,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_cpu); Loading @@ -635,5 +642,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } debug_dma_sync_sg_for_device(dev, sg, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_device); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init); Loading
arch/arm/Kconfig +1 −0 Original line number Diff line number Diff line Loading @@ -2,6 +2,7 @@ config ARM bool default y select HAVE_AOUT select HAVE_DMA_API_DEBUG select HAVE_IDE select HAVE_MEMBLOCK select RTC_LIB Loading
arch/arm/common/dmabounce.c +8 −8 Original line number Diff line number Diff line Loading @@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, * substitute the safe buffer for the unsafe one. * (basically move the buffer from an unsafe area to a safe one) */ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", Loading @@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, return map_single(dev, ptr, size, dir); } EXPORT_SYMBOL(dma_map_single); EXPORT_SYMBOL(__dma_map_single); /* * see if a mapped address was really a "safe" buffer and if so, copy Loading @@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single); * the safe buffer. (basically return things back to the way they * should be) */ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", Loading @@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, unmap_single(dev, dma_addr, size, dir); } EXPORT_SYMBOL(dma_unmap_single); EXPORT_SYMBOL(__dma_unmap_single); dma_addr_t dma_map_page(struct device *dev, struct page *page, dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", Loading @@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, return map_single(dev, page_address(page) + offset, size, dir); } EXPORT_SYMBOL(dma_map_page); EXPORT_SYMBOL(__dma_map_page); /* * see if a mapped address was really a "safe" buffer and if so, copy Loading @@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page); * the safe buffer. (basically return things back to the way they * should be) */ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir) { dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", Loading @@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, unmap_single(dev, dma_addr, size, dir); } EXPORT_SYMBOL(dma_unmap_page); EXPORT_SYMBOL(__dma_unmap_page); int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, unsigned long off, size_t sz, enum dma_data_direction dir) Loading
arch/arm/include/asm/dma-mapping.h +53 −12 Original line number Diff line number Diff line Loading @@ -5,6 +5,7 @@ #include <linux/mm_types.h> #include <linux/scatterlist.h> #include <linux/dma-debug.h> #include <asm-generic/dma-coherent.h> #include <asm/memory.h> Loading Loading @@ -297,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); /* * The DMA API, implemented by dmabounce.c. See below for descriptions. */ extern dma_addr_t dma_map_single(struct device *, void *, size_t, extern dma_addr_t __dma_map_single(struct device *, void *, size_t, enum dma_data_direction); extern void dma_unmap_single(struct device *, dma_addr_t, size_t, extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); extern dma_addr_t dma_map_page(struct device *, struct page *, extern dma_addr_t __dma_map_page(struct device *, struct page *, unsigned long, size_t, enum dma_data_direction); extern void dma_unmap_page(struct device *, dma_addr_t, size_t, extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, enum dma_data_direction); /* Loading @@ -327,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, } static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { __dma_single_cpu_to_dev(cpu_addr, size, dir); return virt_to_dma(dev, cpu_addr); } static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); } static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } #endif /* CONFIG_DMABOUNCE */ /** * dma_map_single - map a single buffer for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices Loading @@ -344,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, size_t size, enum dma_data_direction dir) { dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); __dma_single_cpu_to_dev(cpu_addr, size, dir); addr = __dma_map_single(dev, cpu_addr, size, dir); debug_dma_map_page(dev, virt_to_page(cpu_addr), (unsigned long)cpu_addr & ~PAGE_MASK, size, dir, addr, true); return virt_to_dma(dev, cpu_addr); return addr; } /** Loading @@ -368,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir) { dma_addr_t addr; BUG_ON(!valid_dma_direction(dir)); __dma_page_cpu_to_dev(page, offset, size, dir); addr = __dma_map_page(dev, page, offset, size, dir); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return pfn_to_dma(dev, page_to_pfn(page)) + offset; return addr; } /** Loading @@ -392,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); debug_dma_unmap_page(dev, handle, size, dir, true); __dma_unmap_single(dev, handle, size, dir); } /** Loading @@ -412,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); debug_dma_unmap_page(dev, handle, size, dir, false); __dma_unmap_page(dev, handle, size, dir); } #endif /* CONFIG_DMABOUNCE */ /** * dma_sync_single_range_for_cpu Loading @@ -441,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) return; Loading @@ -453,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev, { BUG_ON(!valid_dma_direction(dir)); debug_dma_sync_single_for_device(dev, handle + offset, size, dir); if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) return; Loading
arch/arm/mm/dma-mapping.c +21 −3 Original line number Diff line number Diff line Loading @@ -554,17 +554,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, struct scatterlist *s; int i, j; BUG_ON(!valid_dma_direction(dir)); for_each_sg(sg, s, nents, i) { s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, s->length, dir); if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; } debug_dma_map_sg(dev, sg, nents, nents, dir); return nents; bad_mapping: for_each_sg(sg, s, i, j) dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); return 0; } EXPORT_SYMBOL(dma_map_sg); Loading @@ -585,8 +588,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, struct scatterlist *s; int i; debug_dma_unmap_sg(dev, sg, nents, dir); for_each_sg(sg, s, nents, i) dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); } EXPORT_SYMBOL(dma_unmap_sg); Loading @@ -611,6 +616,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_cpu); Loading @@ -635,5 +642,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } debug_dma_sync_sg_for_device(dev, sg, nents, dir); } EXPORT_SYMBOL(dma_sync_sg_for_device); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init);