Loading arch/arm/common/dmabounce.c +18 −15 Original line number Diff line number Diff line Loading @@ -435,6 +435,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -442,14 +443,13 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { struct page *page = sg_page(sg); unsigned int offset = sg->offset; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { struct page *page = sg_page(s); unsigned int offset = s->offset; unsigned int length = s->length; void *ptr = page_address(page) + offset; sg->dma_address = map_single(dev, ptr, length, dir); s->dma_address = map_single(dev, ptr, length, dir); } return nents; Loading @@ -459,6 +459,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -466,9 +467,9 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { dma_addr_t dma_addr = s->dma_address; unsigned int length = s->length; unmap_single(dev, dma_addr, length, dir); } Loading Loading @@ -502,6 +503,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -509,9 +511,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { dma_addr_t dma_addr = s->dma_address; unsigned int length = s->length; sync_single(dev, dma_addr, length, dir); } Loading @@ -521,6 +523,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -528,9 +531,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { dma_addr_t dma_addr = s->dma_address; unsigned int length = s->length; sync_single(dev, dma_addr, length, dir); } Loading arch/arm/include/asm/dma-mapping.h +5 −110 Original line number Diff line number Diff line Loading @@ -281,75 +281,6 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, dma_unmap_single(dev, handle, size, dir); } /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scatter-gather version of the * above dma_map_single interface. Here the scatter gather list * elements are each tagged with the appropriate dma address * and length. They are obtained via sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for dma_map_single are * the same here. */ #ifndef CONFIG_DMABOUNCE static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; i++, sg++) { char *virt; sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; virt = sg_virt(sg); if (!arch_is_coherent()) dma_cache_maint(virt, sg->length, dir); } return nents; } #else extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); #endif /** * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Unmap a set of streaming mode DMA translations. * Again, CPU read rules concerning calls here are the same as for * dma_unmap_single() above. */ #ifndef CONFIG_DMABOUNCE static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { /* nothing to do */ } #else extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); #endif /** * dma_sync_single_range_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices Loading Loading @@ -405,50 +336,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_range_for_device(dev, handle, 0, size, dir); } /** * dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. * * The same as dma_sync_single_for_* but for a scatter-gather list, * same rules and usage. /* * The scatter list versions of the above methods. */ #ifndef CONFIG_DMABOUNCE static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; i++, sg++) { char *virt = sg_virt(sg); if (!arch_is_coherent()) dma_cache_maint(virt, sg->length, dir); } } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; i++, sg++) { char *virt = sg_virt(sg); if (!arch_is_coherent()) dma_cache_maint(virt, sg->length, dir); } } #else extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); #endif #ifdef CONFIG_DMABOUNCE /* Loading arch/arm/mm/dma-mapping.c +92 −0 Original line number Diff line number Diff line Loading @@ -512,3 +512,95 @@ void dma_cache_maint(const void *start, size_t size, int direction) } } EXPORT_SYMBOL(dma_cache_maint); #ifndef CONFIG_DMABOUNCE /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the dma_map_single interface. * Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}. * * Device ownership issues as mentioned for dma_map_single are the same * here. */ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { s->dma_address = page_to_dma(dev, sg_page(s)) + s->offset; if (!arch_is_coherent()) dma_cache_maint(sg_virt(s), s->length, dir); } return nents; } EXPORT_SYMBOL(dma_map_sg); /** * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to unmap (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { /* nothing to do */ } EXPORT_SYMBOL(dma_unmap_sg); /** * dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (!arch_is_coherent()) dma_cache_maint(sg_virt(s), s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); /** * dma_sync_sg_for_device * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (!arch_is_coherent()) dma_cache_maint(sg_virt(s), s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); #endif Loading
arch/arm/common/dmabounce.c +18 −15 Original line number Diff line number Diff line Loading @@ -435,6 +435,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -442,14 +443,13 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { struct page *page = sg_page(sg); unsigned int offset = sg->offset; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { struct page *page = sg_page(s); unsigned int offset = s->offset; unsigned int length = s->length; void *ptr = page_address(page) + offset; sg->dma_address = map_single(dev, ptr, length, dir); s->dma_address = map_single(dev, ptr, length, dir); } return nents; Loading @@ -459,6 +459,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -466,9 +467,9 @@ dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { dma_addr_t dma_addr = s->dma_address; unsigned int length = s->length; unmap_single(dev, dma_addr, length, dir); } Loading Loading @@ -502,6 +503,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -509,9 +511,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { dma_addr_t dma_addr = s->dma_address; unsigned int length = s->length; sync_single(dev, dma_addr, length, dir); } Loading @@ -521,6 +523,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", Loading @@ -528,9 +531,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, BUG_ON(dir == DMA_NONE); for (i = 0; i < nents; i++, sg++) { dma_addr_t dma_addr = sg->dma_address; unsigned int length = sg->length; for_each_sg(sg, s, nents, i) { dma_addr_t dma_addr = s->dma_address; unsigned int length = s->length; sync_single(dev, dma_addr, length, dir); } Loading
arch/arm/include/asm/dma-mapping.h +5 −110 Original line number Diff line number Diff line Loading @@ -281,75 +281,6 @@ dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, dma_unmap_single(dev, handle, size, dir); } /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scatter-gather version of the * above dma_map_single interface. Here the scatter gather list * elements are each tagged with the appropriate dma address * and length. They are obtained via sg_dma_{address,length}(SG). * * NOTE: An implementation may be able to use a smaller number of * DMA address/length pairs than there are SG table elements. * (for example via virtual mapping capabilities) * The routine returns the number of addr/length pairs actually * used, at most nents. * * Device ownership issues as mentioned above for dma_map_single are * the same here. */ #ifndef CONFIG_DMABOUNCE static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; i++, sg++) { char *virt; sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; virt = sg_virt(sg); if (!arch_is_coherent()) dma_cache_maint(virt, sg->length, dir); } return nents; } #else extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); #endif /** * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Unmap a set of streaming mode DMA translations. * Again, CPU read rules concerning calls here are the same as for * dma_unmap_single() above. */ #ifndef CONFIG_DMABOUNCE static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { /* nothing to do */ } #else extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); #endif /** * dma_sync_single_range_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices Loading Loading @@ -405,50 +336,14 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_range_for_device(dev, handle, 0, size, dir); } /** * dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Make physical memory consistent for a set of streaming * mode DMA translations after a transfer. * * The same as dma_sync_single_for_* but for a scatter-gather list, * same rules and usage. /* * The scatter list versions of the above methods. */ #ifndef CONFIG_DMABOUNCE static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; i++, sg++) { char *virt = sg_virt(sg); if (!arch_is_coherent()) dma_cache_maint(virt, sg->length, dir); } } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { int i; for (i = 0; i < nents; i++, sg++) { char *virt = sg_virt(sg); if (!arch_is_coherent()) dma_cache_maint(virt, sg->length, dir); } } #else extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); #endif #ifdef CONFIG_DMABOUNCE /* Loading
arch/arm/mm/dma-mapping.c +92 −0 Original line number Diff line number Diff line Loading @@ -512,3 +512,95 @@ void dma_cache_maint(const void *start, size_t size, int direction) } } EXPORT_SYMBOL(dma_cache_maint); #ifndef CONFIG_DMABOUNCE /** * dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the dma_map_single interface. * Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}. * * Device ownership issues as mentioned for dma_map_single are the same * here. */ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { s->dma_address = page_to_dma(dev, sg_page(s)) + s->offset; if (!arch_is_coherent()) dma_cache_maint(sg_virt(s), s->length, dir); } return nents; } EXPORT_SYMBOL(dma_map_sg); /** * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to unmap (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { /* nothing to do */ } EXPORT_SYMBOL(dma_unmap_sg); /** * dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (!arch_is_coherent()) dma_cache_maint(sg_virt(s), s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_cpu); /** * dma_sync_sg_for_device * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (!arch_is_coherent()) dma_cache_maint(sg_virt(s), s->length, dir); } } EXPORT_SYMBOL(dma_sync_sg_for_device); #endif