Loading arch/arm/mm/dma-mapping.c +48 −1 Original line number Diff line number Diff line Loading @@ -42,6 +42,31 @@ #include "dma.h" #include "mm.h" struct arm_dma_buffer { struct list_head list; void *virt; }; static LIST_HEAD(arm_dma_bufs); static DEFINE_SPINLOCK(arm_dma_bufs_lock); static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) { struct arm_dma_buffer *buf, *found = NULL; unsigned long flags; spin_lock_irqsave(&arm_dma_bufs_lock, flags); list_for_each_entry(buf, &arm_dma_bufs, list) { if (buf->virt == virt) { list_del(&buf->list); found = buf; break; } } spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); return found; } /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed Loading Loading @@ -620,6 +645,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, struct page *page = NULL; void *addr; bool want_vaddr; struct arm_dma_buffer *buf; #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; Loading @@ -633,6 +659,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (!mask) return NULL; buf = kzalloc(sizeof(*buf), gfp); if (!buf) return NULL; if (mask < 0xffffffffULL) gfp |= GFP_DMA; Loading Loading @@ -662,8 +692,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); if (page) if (page) { unsigned long flags; *handle = pfn_to_dma(dev, page_to_pfn(page)); buf->virt = want_vaddr ? addr : page; spin_lock_irqsave(&arm_dma_bufs_lock, flags); list_add(&buf->list, &arm_dma_bufs); spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); } else { kfree(buf); } return want_vaddr ? addr : page; } Loading Loading @@ -742,6 +782,11 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); struct arm_dma_buffer *buf; buf = arm_dma_buffer_find(cpu_addr); if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) return; size = PAGE_ALIGN(size); Loading @@ -760,6 +805,8 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, WARN_ON(irqs_disabled()); __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); } kfree(buf); } void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, Loading Loading
arch/arm/mm/dma-mapping.c +48 −1 Original line number Diff line number Diff line Loading @@ -42,6 +42,31 @@ #include "dma.h" #include "mm.h" struct arm_dma_buffer { struct list_head list; void *virt; }; static LIST_HEAD(arm_dma_bufs); static DEFINE_SPINLOCK(arm_dma_bufs_lock); static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) { struct arm_dma_buffer *buf, *found = NULL; unsigned long flags; spin_lock_irqsave(&arm_dma_bufs_lock, flags); list_for_each_entry(buf, &arm_dma_bufs, list) { if (buf->virt == virt) { list_del(&buf->list); found = buf; break; } } spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); return found; } /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed Loading Loading @@ -620,6 +645,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, struct page *page = NULL; void *addr; bool want_vaddr; struct arm_dma_buffer *buf; #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; Loading @@ -633,6 +659,10 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (!mask) return NULL; buf = kzalloc(sizeof(*buf), gfp); if (!buf) return NULL; if (mask < 0xffffffffULL) gfp |= GFP_DMA; Loading Loading @@ -662,8 +692,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); if (page) if (page) { unsigned long flags; *handle = pfn_to_dma(dev, page_to_pfn(page)); buf->virt = want_vaddr ? addr : page; spin_lock_irqsave(&arm_dma_bufs_lock, flags); list_add(&buf->list, &arm_dma_bufs); spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); } else { kfree(buf); } return want_vaddr ? addr : page; } Loading Loading @@ -742,6 +782,11 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); struct arm_dma_buffer *buf; buf = arm_dma_buffer_find(cpu_addr); if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) return; size = PAGE_ALIGN(size); Loading @@ -760,6 +805,8 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, WARN_ON(irqs_disabled()); __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); } kfree(buf); } void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, Loading