Commit 4a3bb420 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - fix debugfs initialization order (Anthony Iliopoulos)

 - use memory_intersects() directly (Kefeng Wang)

 - allow to return specific errors from ->map_sg (Logan Gunthorpe,
   Martin Oliveira)

 - turn the dma_map_sg return value into an unsigned int (me)

 - provide a common global coherent pool іmplementation (me)

* tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping: (31 commits)
  hexagon: use the generic global coherent pool
  dma-mapping: make the global coherent pool conditional
  dma-mapping: add a dma_init_global_coherent helper
  dma-mapping: simplify dma_init_coherent_memory
  dma-mapping: allow using the global coherent pool for !ARM
  ARM/nommu: use the generic dma-direct code for non-coherent devices
  dma-direct: add support for dma_coherent_default_memory
  dma-mapping: return an unsigned int from dma_map_sg{,_attrs}
  dma-mapping: disallow .map_sg operations from returning zero on error
  dma-mapping: return error code from dma_dummy_map_sg()
  x86/amd_gart: don't set failed sg dma_address to DMA_MAPPING_ERROR
  x86/amd_gart: return error code from gart_map_sg()
  xen: swiotlb: return error code from xen_swiotlb_map_sg()
  parisc: return error code from .map_sg() ops
  sparc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR
  sparc/iommu: return error codes from .map_sg() ops
  s390/pci: don't set failed sg dma_address to DMA_MAPPING_ERROR
  s390/pci: return error code from s390_dma_map_sg()
  powerpc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR
  powerpc/iommu: return error code from .map_sg() ops
  ...
parents eceae1e7 c1dec343
Loading
Loading
Loading
Loading
+7 −3
Original line number Diff line number Diff line
@@ -649,7 +649,9 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
		sg->dma_address
		  = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
				     sg->length, dac_allowed);
		return sg->dma_address != DMA_MAPPING_ERROR;
		if (sg->dma_address == DMA_MAPPING_ERROR)
			return -EIO;
		return 1;
	}

	start = sg;
@@ -685,8 +687,10 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
	if (out < end)
		out->dma_length = 0;

	if (out - start == 0)
	if (out - start == 0) {
		printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
		return -ENOMEM;
	}
	DBGA("pci_map_sg: %ld entries\n", out - start);

	return out - start;
@@ -699,7 +703,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
	   entries.  Unmap them now.  */
	if (out > start)
		pci_unmap_sg(pdev, start, out - start, dir);
	return 0;
	return -ENOMEM;
}

/* Unmap a set of streaming mode DMA translations.  Again, cpu read
+3 −2
Original line number Diff line number Diff line
@@ -18,8 +18,8 @@ config ARM
	select ARCH_HAS_SET_MEMORY
	select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
	select ARCH_HAS_STRICT_MODULE_RWX if MMU
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
	select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU
	select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU
	select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
	select ARCH_HAVE_CUSTOM_GPIO_H
@@ -44,6 +44,7 @@ config ARM
	select CPU_PM if SUSPEND || CPU_IDLE
	select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
	select DMA_DECLARE_COHERENT
	select DMA_GLOBAL_POOL if !MMU
	select DMA_OPS
	select DMA_REMAP if MMU
	select EDAC_SUPPORT
+6 −167
Original line number Diff line number Diff line
@@ -5,12 +5,7 @@
 *  Copyright (C) 2000-2004 Russell King
 */

#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-direct.h>
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>

#include <asm/cachetype.h>
#include <asm/cacheflush.h>
#include <asm/outercache.h>
@@ -18,64 +13,7 @@

#include "dma.h"

/*
 *  The generic direct mapping code is used if
 *   - MMU/MPU is off
 *   - cpu is v7m w/o cache support
 *   - device is coherent
 *  otherwise arm_nommu_dma_ops is used.
 *
 *  arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
 *  [1] on how to declare such memory).
 *
 *  [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
 */

static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
				 dma_addr_t *dma_handle, gfp_t gfp,
				 unsigned long attrs)

{
	void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);

	/*
	 * dma_alloc_from_global_coherent() may fail because:
	 *
	 * - no consistent DMA region has been defined, so we can't
	 *   continue.
	 * - there is no space left in consistent DMA region, so we
	 *   only can fallback to generic allocator if we are
	 *   advertised that consistency is not required.
	 */

	WARN_ON_ONCE(ret == NULL);
	return ret;
}

static void arm_nommu_dma_free(struct device *dev, size_t size,
			       void *cpu_addr, dma_addr_t dma_addr,
			       unsigned long attrs)
{
	int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);

	WARN_ON_ONCE(ret == 0);
}

static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
			      void *cpu_addr, dma_addr_t dma_addr, size_t size,
			      unsigned long attrs)
{
	int ret;

	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
		return ret;
	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
		return ret;
	return -ENXIO;
}


static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
		enum dma_data_direction dir)
{
	dmac_map_area(__va(paddr), size, dir);
@@ -86,7 +24,7 @@ static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
		outer_clean_range(paddr, paddr + size);
}

static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
		enum dma_data_direction dir)
{
	if (dir != DMA_TO_DEVICE) {
@@ -95,102 +33,6 @@ static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
	}
}

static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
					 unsigned long offset, size_t size,
					 enum dma_data_direction dir,
					 unsigned long attrs)
{
	dma_addr_t handle = page_to_phys(page) + offset;

	__dma_page_cpu_to_dev(handle, size, dir);

	return handle;
}

static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
				     size_t size, enum dma_data_direction dir,
				     unsigned long attrs)
{
	__dma_page_dev_to_cpu(handle, size, dir);
}


static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
				int nents, enum dma_data_direction dir,
				unsigned long attrs)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		sg_dma_address(sg) = sg_phys(sg);
		sg_dma_len(sg) = sg->length;
		__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
	}

	return nents;
}

static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
				   int nents, enum dma_data_direction dir,
				   unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i)
		__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
}

static void arm_nommu_dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(handle, size, dir);
}

static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	__dma_page_cpu_to_dev(handle, size, dir);
}

static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
					     int nents, enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i)
		__dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
}

static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
					  int nents, enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i)
		__dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
}

const struct dma_map_ops arm_nommu_dma_ops = {
	.alloc			= arm_nommu_dma_alloc,
	.free			= arm_nommu_dma_free,
	.alloc_pages		= dma_direct_alloc_pages,
	.free_pages		= dma_direct_free_pages,
	.mmap			= arm_nommu_dma_mmap,
	.map_page		= arm_nommu_dma_map_page,
	.unmap_page		= arm_nommu_dma_unmap_page,
	.map_sg			= arm_nommu_dma_map_sg,
	.unmap_sg		= arm_nommu_dma_unmap_sg,
	.sync_single_for_device	= arm_nommu_dma_sync_single_for_device,
	.sync_single_for_cpu	= arm_nommu_dma_sync_single_for_cpu,
	.sync_sg_for_device	= arm_nommu_dma_sync_sg_for_device,
	.sync_sg_for_cpu	= arm_nommu_dma_sync_sg_for_cpu,
};
EXPORT_SYMBOL(arm_nommu_dma_ops);

void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
			const struct iommu_ops *iommu, bool coherent)
{
@@ -201,14 +43,11 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
		 * enough to check if MPU is in use or not since in absense of
		 * MPU system memory map is used.
		 */
		dev->archdata.dma_coherent = (cacheid) ? coherent : true;
		dev->dma_coherent = cacheid ? coherent : true;
	} else {
		/*
		 * Assume coherent DMA in case MMU/MPU has not been set up.
		 */
		dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
		dev->dma_coherent = (get_cr() & CR_M) ? coherent : true;
	}

	if (!dev->archdata.dma_coherent)
		set_dma_ops(dev, &arm_nommu_dma_ops);
}
+16 −10
Original line number Diff line number Diff line
@@ -980,7 +980,7 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
{
	const struct dma_map_ops *ops = get_dma_ops(dev);
	struct scatterlist *s;
	int i, j;
	int i, j, ret;

	for_each_sg(sg, s, nents, i) {
#ifdef CONFIG_NEED_SG_DMA_LENGTH
@@ -988,15 +988,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
#endif
		s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
						s->length, dir, attrs);
		if (dma_mapping_error(dev, s->dma_address))
		if (dma_mapping_error(dev, s->dma_address)) {
			ret = -EIO;
			goto bad_mapping;
		}
	}
	return nents;

 bad_mapping:
	for_each_sg(sg, s, i, j)
		ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
	return 0;
	return ret;
}

/**
@@ -1622,7 +1624,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		     bool is_coherent)
{
	struct scatterlist *s = sg, *dma = sg, *start = sg;
	int i, count = 0;
	int i, count = 0, ret;
	unsigned int offset = s->offset;
	unsigned int size = s->offset + s->length;
	unsigned int max = dma_get_max_seg_size(dev);
@@ -1630,12 +1632,13 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
	for (i = 1; i < nents; i++) {
		s = sg_next(s);

		s->dma_address = DMA_MAPPING_ERROR;
		s->dma_length = 0;

		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
			if (__map_sg_chunk(dev, start, size, &dma->dma_address,
			    dir, attrs, is_coherent) < 0)
			ret = __map_sg_chunk(dev, start, size,
					     &dma->dma_address, dir, attrs,
					     is_coherent);
			if (ret < 0)
				goto bad_mapping;

			dma->dma_address += offset;
@@ -1648,8 +1651,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		}
		size += s->length;
	}
	if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
		is_coherent) < 0)
	ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
			     is_coherent);
	if (ret < 0)
		goto bad_mapping;

	dma->dma_address += offset;
@@ -1660,7 +1664,9 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
bad_mapping:
	for_each_sg(sg, s, count, i)
		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
	return 0;
	if (ret == -ENOMEM)
		return ret;
	return -EINVAL;
}

/**
+1 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@ config HEXAGON
	select ARCH_32BIT_OFF_T
	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
	select ARCH_NO_PREEMPT
	select DMA_GLOBAL_POOL
	# Other pending projects/to-do items.
	# select HAVE_REGS_AND_STACK_ACCESS_API
	# select HAVE_HW_BREAKPOINT if PERF_EVENTS
Loading