Commit f5ff79fd authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

dma-mapping: remove CONFIG_DMA_REMAP



CONFIG_DMA_REMAP is used to build a few helpers around the core
vmalloc code, and to use them in case there is a highmem page in
dma-direct, and to make dma coherent allocations be able to use
non-contiguous pages allocations for DMA allocations in the dma-iommu
layer.

Right now it needs to be explicitly selected by architectures, and
is only done so by architectures that require remapping to deal
with devices that are not DMA coherent.  Make it unconditional for
builds with CONFIG_MMU as it is very little extra code, but makes
it much more likely that large DMA allocations succeed on x86.

This fixes hot plugging a NVMe thunderbolt SSD for me, which tries
to allocate a 1MB buffer that is otherwise hard to obtain due to
memory fragmentation on a heavily used laptop.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent fba09099
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -47,7 +47,7 @@ config ARM
	select DMA_DECLARE_COHERENT
	select DMA_DECLARE_COHERENT
	select DMA_GLOBAL_POOL if !MMU
	select DMA_GLOBAL_POOL if !MMU
	select DMA_OPS
	select DMA_OPS
	select DMA_REMAP if MMU
	select DMA_NONCOHERENT_MMAP if MMU
	select EDAC_SUPPORT
	select EDAC_SUPPORT
	select EDAC_ATOMIC_SCRUB
	select EDAC_ATOMIC_SCRUB
	select GENERIC_ALLOCATOR
	select GENERIC_ALLOCATOR
+1 −1
Original line number Original line Diff line number Diff line
@@ -17,7 +17,7 @@ config XTENSA
	select BUILDTIME_TABLE_SORT
	select BUILDTIME_TABLE_SORT
	select CLONE_BACKWARDS
	select CLONE_BACKWARDS
	select COMMON_CLK
	select COMMON_CLK
	select DMA_REMAP if MMU
	select DMA_NONCOHERENT_MMAP if MMU
	select GENERIC_ATOMIC64
	select GENERIC_ATOMIC64
	select GENERIC_IRQ_SHOW
	select GENERIC_IRQ_SHOW
	select GENERIC_PCI_IOMAP
	select GENERIC_PCI_IOMAP
+5 −9
Original line number Original line Diff line number Diff line
@@ -852,7 +852,6 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
	return NULL;
	return NULL;
}
}


#ifdef CONFIG_DMA_REMAP
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
		size_t size, enum dma_data_direction dir, gfp_t gfp,
		size_t size, enum dma_data_direction dir, gfp_t gfp,
		unsigned long attrs)
		unsigned long attrs)
@@ -882,7 +881,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
	sg_free_table(&sh->sgt);
	sg_free_table(&sh->sgt);
	kfree(sh);
	kfree(sh);
}
}
#endif /* CONFIG_DMA_REMAP */


static void iommu_dma_sync_single_for_cpu(struct device *dev,
static void iommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
@@ -1276,7 +1274,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
	    dma_free_from_pool(dev, cpu_addr, alloc_size))
	    dma_free_from_pool(dev, cpu_addr, alloc_size))
		return;
		return;


	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
	if (is_vmalloc_addr(cpu_addr)) {
		/*
		/*
		 * If it the address is remapped, then it's either non-coherent
		 * If it the address is remapped, then it's either non-coherent
		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
@@ -1318,7 +1316,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
	if (!page)
	if (!page)
		return NULL;
		return NULL;


	if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
	if (!coherent || PageHighMem(page)) {
		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);


		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
@@ -1350,7 +1348,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,


	gfp |= __GFP_ZERO;
	gfp |= __GFP_ZERO;


	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
	if (gfpflags_allow_blocking(gfp) &&
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
		return iommu_dma_alloc_remap(dev, size, handle, gfp,
		return iommu_dma_alloc_remap(dev, size, handle, gfp,
				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
@@ -1391,7 +1389,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
		return -ENXIO;
		return -ENXIO;


	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
	if (is_vmalloc_addr(cpu_addr)) {
		struct page **pages = dma_common_find_pages(cpu_addr);
		struct page **pages = dma_common_find_pages(cpu_addr);


		if (pages)
		if (pages)
@@ -1413,7 +1411,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
	struct page *page;
	struct page *page;
	int ret;
	int ret;


	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
	if (is_vmalloc_addr(cpu_addr)) {
		struct page **pages = dma_common_find_pages(cpu_addr);
		struct page **pages = dma_common_find_pages(cpu_addr);


		if (pages) {
		if (pages) {
@@ -1445,10 +1443,8 @@ static const struct dma_map_ops iommu_dma_ops = {
	.free			= iommu_dma_free,
	.free			= iommu_dma_free,
	.alloc_pages		= dma_common_alloc_pages,
	.alloc_pages		= dma_common_alloc_pages,
	.free_pages		= dma_common_free_pages,
	.free_pages		= dma_common_free_pages,
#ifdef CONFIG_DMA_REMAP
	.alloc_noncontiguous	= iommu_dma_alloc_noncontiguous,
	.alloc_noncontiguous	= iommu_dma_alloc_noncontiguous,
	.free_noncontiguous	= iommu_dma_free_noncontiguous,
	.free_noncontiguous	= iommu_dma_free_noncontiguous,
#endif
	.mmap			= iommu_dma_mmap,
	.mmap			= iommu_dma_mmap,
	.get_sgtable		= iommu_dma_get_sgtable,
	.get_sgtable		= iommu_dma_get_sgtable,
	.map_page		= iommu_dma_map_page,
	.map_page		= iommu_dma_map_page,
+1 −6
Original line number Original line Diff line number Diff line
@@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL
	select DMA_DECLARE_COHERENT
	select DMA_DECLARE_COHERENT
	bool
	bool


config DMA_REMAP
	bool
	depends on MMU
	select DMA_NONCOHERENT_MMAP

config DMA_DIRECT_REMAP
config DMA_DIRECT_REMAP
	bool
	bool
	select DMA_REMAP
	select DMA_COHERENT_POOL
	select DMA_COHERENT_POOL
	select DMA_NONCOHERENT_MMAP


config DMA_CMA
config DMA_CMA
	bool "DMA Contiguous Memory Allocator"
	bool "DMA Contiguous Memory Allocator"
+1 −1
Original line number Original line Diff line number Diff line
@@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG)		+= debug.o
obj-$(CONFIG_DMA_API_DEBUG)		+= debug.o
obj-$(CONFIG_SWIOTLB)			+= swiotlb.o
obj-$(CONFIG_SWIOTLB)			+= swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL)		+= pool.o
obj-$(CONFIG_DMA_COHERENT_POOL)		+= pool.o
obj-$(CONFIG_DMA_REMAP)			+= remap.o
obj-$(CONFIG_MMU)			+= remap.o
obj-$(CONFIG_DMA_MAP_BENCHMARK)		+= map_benchmark.o
obj-$(CONFIG_DMA_MAP_BENCHMARK)		+= map_benchmark.o
Loading