Loading drivers/iommu/dma-iommu.c +26 −1 Original line number Diff line number Diff line Loading @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> #include <linux/crash_dump.h> struct iommu_dma_msi_page { struct list_head list; Loading Loading @@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } static int iommu_dma_deferred_attach(struct device *dev, struct iommu_domain *domain) { const struct iommu_ops *ops = domain->ops; if (!is_kdump_kernel()) return 0; if (unlikely(ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))) return iommu_attach_device(domain, dev); return 0; } /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. Loading Loading @@ -470,6 +486,9 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return DMA_MAPPING_ERROR; size = iova_align(iovad, size + iova_off); iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); Loading Loading @@ -579,6 +598,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, *dma_handle = DMA_MAPPING_ERROR; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return NULL; min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { min_size = PAGE_SIZE; Loading Loading @@ -821,6 +843,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return 0; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); Loading Loading
drivers/iommu/dma-iommu.c +26 −1 Original line number Diff line number Diff line Loading @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> #include <linux/crash_dump.h> struct iommu_dma_msi_page { struct list_head list; Loading Loading @@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } static int iommu_dma_deferred_attach(struct device *dev, struct iommu_domain *domain) { const struct iommu_ops *ops = domain->ops; if (!is_kdump_kernel()) return 0; if (unlikely(ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))) return iommu_attach_device(domain, dev); return 0; } /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. Loading Loading @@ -470,6 +486,9 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return DMA_MAPPING_ERROR; size = iova_align(iovad, size + iova_off); iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); Loading Loading @@ -579,6 +598,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, *dma_handle = DMA_MAPPING_ERROR; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return NULL; min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { min_size = PAGE_SIZE; Loading Loading @@ -821,6 +843,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; if (unlikely(iommu_dma_deferred_attach(dev, domain))) return 0; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); Loading