Commit faf4ef82 authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-direct: add support for dma_coherent_default_memory



Add an option to allocate uncached memory for dma_alloc_coherent from
the global dma_coherent_default_memory.  This will allow to move
arm-nommu (and eventually other platforms) to use generic code for
allocating uncached memory from a pre-populated pool.

Note that this is a different pool from the one that platforms that
can remap at runtime use for GFP_ATOMIC allocations for now, although
there might be opportunities to eventually end up with a common codebase
for the two use cases.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarDillon Min <dillon.minfei@gmail.com>
parent 2a047e06
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -93,6 +93,10 @@ config DMA_COHERENT_POOL
	select GENERIC_ALLOCATOR
	bool

config DMA_GLOBAL_POOL
	select DMA_DECLARE_COHERENT
	bool

config DMA_REMAP
	bool
	depends on MMU
+15 −0
Original line number Diff line number Diff line
@@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size,

	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
	    !dev_is_dma_coherent(dev))
		return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);

	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
	    !dev_is_dma_coherent(dev))
		return dma_alloc_from_global_coherent(dev, size, dma_handle);

	/*
	 * Remapping or decrypting memory may block. If either is required and
	 * we can't block, allocate the memory from the atomic pools.
@@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size,

	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
	    !dev_is_dma_coherent(dev)) {
		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
		return;
	}

	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
	    !dev_is_dma_coherent(dev)) {
		if (!dma_release_from_global_coherent(page_order, cpu_addr))
			WARN_ON_ONCE(1);
		return;
	}

	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
@@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;
	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
		return ret;

	if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
		return -ENXIO;