Commit 4136ce90 authored by Robin Murphy's avatar Robin Murphy Committed by Christoph Hellwig
Browse files

ARM/dma-mapping: merge IOMMU ops



The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.

Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarMarc Zyngier <maz@kernel.org>
parent d563bccf
Loading
Loading
Loading
Loading
+13 −24
Original line number Diff line number Diff line
@@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
	struct scatterlist *s;
	int i;

	if (dev->dma_coherent)
		return;

	for_each_sg(sg, s, nents, i)
		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);

@@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
	struct scatterlist *s;
	int i;

	if (dev->dma_coherent)
		return;

	for_each_sg(sg, s, nents, i)
		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
@@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	struct page *page;
	unsigned int offset = handle & ~PAGE_MASK;

	if (!iova)
	if (dev->dma_coherent || !iova)
		return;

	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	__dma_page_dev_to_cpu(page, offset, size, dir);
}

@@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
	dma_addr_t iova = handle & PAGE_MASK;
	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	struct page *page;
	unsigned int offset = handle & ~PAGE_MASK;

	if (!iova)
	if (dev->dma_coherent || !iova)
		return;

	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
	__dma_page_cpu_to_dev(page, offset, size, dir);
}

@@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
	.unmap_resource		= arm_iommu_unmap_resource,
};

static const struct dma_map_ops iommu_coherent_ops = {
	.alloc		= arm_iommu_alloc_attrs,
	.free		= arm_iommu_free_attrs,
	.mmap		= arm_iommu_mmap_attrs,
	.get_sgtable	= arm_iommu_get_sgtable,

	.map_page	= arm_iommu_map_page,
	.unmap_page	= arm_iommu_unmap_page,

	.map_sg		= arm_iommu_map_sg,
	.unmap_sg	= arm_iommu_unmap_sg,

	.map_resource	= arm_iommu_map_resource,
	.unmap_resource	= arm_iommu_unmap_resource,
};

/**
 * arm_iommu_create_mapping
 * @bus: pointer to the bus holding the client device (for IOMMU calls)
@@ -1750,9 +1742,6 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
		return;
	}

	if (coherent)
		set_dma_ops(dev, &iommu_coherent_ops);
	else
	set_dma_ops(dev, &iommu_ops);
}