Commit f02ad36d authored by Logan Gunthorpe's avatar Logan Gunthorpe Committed by Christoph Hellwig
Browse files

dma-direct: support PCI P2PDMA pages in dma-direct map_sg



Add PCI P2PDMA support for dma_direct_map_sg() so that it can map
PCI P2PDMA pages directly without a hack in the callers. This allows
for heterogeneous SGLs that contain both P2PDMA and regular pages.

A P2PDMA page may have three possible outcomes when being mapped:
  1) If the data path between the two devices doesn't go through the
     root port, then it should be mapped with a PCI bus address
  2) If the data path goes through the host bridge, it should be mapped
     normally, as though it were a CPU physical address
  3) It is not possible for the two devices to communicate and thus
     the mapping operation should fail (and it will return -EREMOTEIO).

SGL segments that contain PCI bus addresses are marked with
sg_dma_mark_pci_p2pdma() and are ignored when unmapped.

P2PDMA mappings are also failed if swiotlb needs to be used on the
mapping.

Signed-off-by: default avatarLogan Gunthorpe <logang@deltatee.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 7c2645a2
Loading
Loading
Loading
Loading
+37 −6
Original line number Diff line number Diff line
@@ -454,29 +454,60 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
		arch_sync_dma_for_cpu_all();
}

/*
 * Unmaps segments, except for ones marked as pci_p2pdma which do not
 * require any further action as they contain a bus address.
 */
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
		int nents, enum dma_data_direction dir, unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i)
		dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
			     attrs);
	for_each_sg(sgl,  sg, nents, i) {
		if (sg_is_dma_bus_address(sg))
			sg_dma_unmark_bus_address(sg);
		else
			dma_direct_unmap_page(dev, sg->dma_address,
					      sg_dma_len(sg), dir, attrs);
	}
}
#endif

int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
		enum dma_data_direction dir, unsigned long attrs)
{
	int i;
	struct pci_p2pdma_map_state p2pdma_state = {};
	enum pci_p2pdma_map_type map;
	struct scatterlist *sg;
	int i, ret;

	for_each_sg(sgl, sg, nents, i) {
		if (is_pci_p2pdma_page(sg_page(sg))) {
			map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
			switch (map) {
			case PCI_P2PDMA_MAP_BUS_ADDR:
				continue;
			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
				/*
				 * Any P2P mapping that traverses the PCI
				 * host bridge must be mapped with CPU physical
				 * address and not PCI bus addresses. This is
				 * done with dma_direct_map_page() below.
				 */
				break;
			default:
				ret = -EREMOTEIO;
				goto out_unmap;
			}
		}

		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
				sg->offset, sg->length, dir, attrs);
		if (sg->dma_address == DMA_MAPPING_ERROR)
		if (sg->dma_address == DMA_MAPPING_ERROR) {
			ret = -EIO;
			goto out_unmap;
		}
		sg_dma_len(sg) = sg->length;
	}

@@ -484,7 +515,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,

out_unmap:
	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
	return -EIO;
	return ret;
}

dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
+7 −1
Original line number Diff line number Diff line
@@ -8,6 +8,7 @@
#define _KERNEL_DMA_DIRECT_H

#include <linux/dma-direct.h>
#include <linux/memremap.h>

int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
		void *cpu_addr, dma_addr_t dma_addr, size_t size,
@@ -87,10 +88,15 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
	phys_addr_t phys = page_to_phys(page) + offset;
	dma_addr_t dma_addr = phys_to_dma(dev, phys);

	if (is_swiotlb_force_bounce(dev))
	if (is_swiotlb_force_bounce(dev)) {
		if (is_pci_p2pdma_page(page))
			return DMA_MAPPING_ERROR;
		return swiotlb_map(dev, phys, size, dir, attrs);
	}

	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
		if (is_pci_p2pdma_page(page))
			return DMA_MAPPING_ERROR;
		if (is_swiotlb_active(dev))
			return swiotlb_map(dev, phys, size, dir, attrs);