Commit 198c50e2 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-mapping: refactor dma_{alloc,free}_pages



Factour out internal versions without the dma_debug calls in preparation
for callers that will need different dma_debug calls.

Note that this changes the dma_debug calls to get the not page aligned
size values, but as long as alloc and free agree on one variant we are
fine.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarTomasz Figa <tfiga@chromium.org>
Tested-by: default avatarRicardo Ribalda <ribalda@chromium.org>
parent eedb0b12
Loading
Loading
Loading
Loading
+19 −10
Original line number Diff line number Diff line
@@ -477,11 +477,10 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
}
EXPORT_SYMBOL(dma_free_attrs);

struct page *dma_alloc_pages(struct device *dev, size_t size,
static struct page *__dma_alloc_pages(struct device *dev, size_t size,
		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);
	struct page *page;

	if (WARN_ON_ONCE(!dev->coherent_dma_mask))
		return NULL;
@@ -490,31 +489,41 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,

	size = PAGE_ALIGN(size);
	if (dma_alloc_direct(dev, ops))
		page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
	else if (ops->alloc_pages)
		page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
	else
		return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
	if (!ops->alloc_pages)
		return NULL;
	return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
}

	debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
struct page *dma_alloc_pages(struct device *dev, size_t size,
		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
{
	struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);

	if (page)
		debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
	return page;
}
EXPORT_SYMBOL_GPL(dma_alloc_pages);

void dma_free_pages(struct device *dev, size_t size, struct page *page,
static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
		dma_addr_t dma_handle, enum dma_data_direction dir)
{
	const struct dma_map_ops *ops = get_dma_ops(dev);

	size = PAGE_ALIGN(size);
	debug_dma_unmap_page(dev, dma_handle, size, dir);

	if (dma_alloc_direct(dev, ops))
		dma_direct_free_pages(dev, size, page, dma_handle, dir);
	else if (ops->free_pages)
		ops->free_pages(dev, size, page, dma_handle, dir);
}

void dma_free_pages(struct device *dev, size_t size, struct page *page,
		dma_addr_t dma_handle, enum dma_data_direction dir)
{
	debug_dma_unmap_page(dev, dma_handle, size, dir);
	__dma_free_pages(dev, size, page, dma_handle, dir);
}
EXPORT_SYMBOL_GPL(dma_free_pages);

int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,