Commit 82612d66 authored by Tom Murphy's avatar Tom Murphy Committed by Will Deacon
Browse files

iommu: Allow the dma-iommu api to use bounce buffers



Allow the dma-iommu api to use bounce buffers for untrusted devices.
This is a copy of the intel bounce buffer code.

Co-developed-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarTom Murphy <murphyt7@tcd.ie>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Tested-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Link: https://lore.kernel.org/r/20201124082057.2614359-4-baolu.lu@linux.intel.com


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 230309d0
Loading
Loading
Loading
Loading
+149 −13
Original line number Diff line number Diff line
@@ -20,9 +20,11 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
#include <linux/vmalloc.h>
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>

struct iommu_dma_msi_page {
	struct list_head	list;
@@ -499,6 +501,31 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
	iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
}

static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
		size_t size, enum dma_data_direction dir,
		unsigned long attrs)
{
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	phys_addr_t phys;

	phys = iommu_iova_to_phys(domain, dma_addr);
	if (WARN_ON(!phys))
		return;

	__iommu_dma_unmap(dev, dma_addr, size);

	if (unlikely(is_swiotlb_buffer(phys)))
		swiotlb_tbl_unmap_single(dev, phys, size,
				iova_align(iovad, size), dir, attrs);
}

static bool dev_is_untrusted(struct device *dev)
{
	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}

static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
		size_t size, int prot, u64 dma_mask)
{
@@ -524,6 +551,54 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
	return iova + iova_off;
}

static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
		size_t org_size, dma_addr_t dma_mask, bool coherent,
		enum dma_data_direction dir, unsigned long attrs)
{
	int prot = dma_info_to_prot(dir, coherent, attrs);
	struct iommu_domain *domain = iommu_get_dma_domain(dev);
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	struct iova_domain *iovad = &cookie->iovad;
	size_t aligned_size = org_size;
	void *padding_start;
	size_t padding_size;
	dma_addr_t iova;

	/*
	 * If both the physical buffer start address and size are
	 * page aligned, we don't need to use a bounce page.
	 */
	if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) &&
	    iova_offset(iovad, phys | org_size)) {
		aligned_size = iova_align(iovad, org_size);
		phys = swiotlb_tbl_map_single(dev, phys, org_size,
					      aligned_size, dir, attrs);

		if (phys == DMA_MAPPING_ERROR)
			return DMA_MAPPING_ERROR;

		/* Cleanup the padding area. */
		padding_start = phys_to_virt(phys);
		padding_size = aligned_size;

		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
		    (dir == DMA_TO_DEVICE ||
		     dir == DMA_BIDIRECTIONAL)) {
			padding_start += org_size;
			padding_size -= org_size;
		}

		memset(padding_start, 0, padding_size);
	}

	iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
	if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys))
		swiotlb_tbl_unmap_single(dev, phys, org_size,
				aligned_size, dir, attrs);

	return iova;
}

static void __iommu_dma_free_pages(struct page **pages, int count)
{
	while (count--)
@@ -697,11 +772,15 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	if (!dev_is_dma_coherent(dev))
		arch_sync_dma_for_cpu(phys, size, dir);

	if (is_swiotlb_buffer(phys))
		swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU);
}

static void iommu_dma_sync_single_for_device(struct device *dev,
@@ -709,10 +788,14 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
{
	phys_addr_t phys;

	if (dev_is_dma_coherent(dev))
	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
		return;

	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
	if (is_swiotlb_buffer(phys))
		swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE);

	if (!dev_is_dma_coherent(dev))
		arch_sync_dma_for_device(phys, size, dir);
}

@@ -723,11 +806,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
	for_each_sg(sgl, sg, nelems, i) {
		if (!dev_is_dma_coherent(dev))
			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);

		if (is_swiotlb_buffer(sg_phys(sg)))
			swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
						dir, SYNC_FOR_CPU);
	}
}

static void iommu_dma_sync_sg_for_device(struct device *dev,
@@ -737,12 +826,18 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
	struct scatterlist *sg;
	int i;

	if (dev_is_dma_coherent(dev))
	if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev))
		return;

	for_each_sg(sgl, sg, nelems, i)
	for_each_sg(sgl, sg, nelems, i) {
		if (is_swiotlb_buffer(sg_phys(sg)))
			swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
						dir, SYNC_FOR_DEVICE);

		if (!dev_is_dma_coherent(dev))
			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
	}
}

static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
		unsigned long offset, size_t size, enum dma_data_direction dir,
@@ -750,10 +845,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
	phys_addr_t phys = page_to_phys(page) + offset;
	bool coherent = dev_is_dma_coherent(dev);
	int prot = dma_info_to_prot(dir, coherent, attrs);
	dma_addr_t dma_handle;

	dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
	dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev),
			coherent, dir, attrs);
	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
	    dma_handle != DMA_MAPPING_ERROR)
		arch_sync_dma_for_device(phys, size, dir);
@@ -765,7 +860,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
	__iommu_dma_unmap(dev, dma_handle, size);
	__iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs);
}

/*
@@ -843,6 +938,39 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
	}
}

static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
		__iommu_dma_unmap_swiotlb(dev, sg_dma_address(s),
				sg_dma_len(s), dir, attrs);
}

static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s),
				s->length, dma_get_mask(dev),
				dev_is_dma_coherent(dev), dir, attrs);
		if (sg_dma_address(s) == DMA_MAPPING_ERROR)
			goto out_unmap;
		sg_dma_len(s) = s->length;
	}

	return nents;

out_unmap:
	iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
	return 0;
}

/*
 * The DMA API client is passing in a scatterlist which could describe
 * any old buffer layout, but the IOMMU API requires everything to be
@@ -869,6 +997,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);

	if (dev_is_untrusted(dev))
		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);

	/*
	 * Work out how much IOVA space we need, and align the segments to
	 * IOVA granules for the IOMMU driver to handle. With some clever
@@ -938,6 +1069,11 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);

	if (dev_is_untrusted(dev)) {
		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
		return;
	}

	/*
	 * The scatterlist segments are mapped into a single
	 * contiguous IOVA allocation, so this is incredibly easy.