Loading arch/powerpc/include/asm/dma-mapping.h +4 −4 Original line number Diff line number Diff line Loading @@ -19,13 +19,13 @@ #include <asm/swiotlb.h> /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); extern void __dma_direct_free_coherent(struct device *dev, size_t size, extern void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); extern int dma_direct_mmap_coherent(struct device *dev, extern int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs); Loading Loading @@ -73,7 +73,7 @@ static inline unsigned long device_to_mask(struct device *dev) #ifdef CONFIG_PPC64 extern struct dma_map_ops dma_iommu_ops; #endif extern const struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_nommu_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { Loading arch/powerpc/kernel/dma-iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -114,7 +114,7 @@ int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, .mmap = dma_direct_mmap_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, Loading arch/powerpc/kernel/dma-swiotlb.c +3 −3 Original line number Diff line number Diff line Loading @@ -47,9 +47,9 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) * for everything else. */ const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_direct_alloc_coherent, .free = __dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .alloc = __dma_nommu_alloc_coherent, .free = __dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, Loading arch/powerpc/kernel/dma.c +34 −34 Original line number Diff line number Diff line Loading @@ -40,7 +40,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) return pfn; } static int dma_direct_dma_supported(struct device *dev, u64 mask) static int dma_nommu_dma_supported(struct device *dev, u64 mask) { #ifdef CONFIG_PPC64 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); Loading @@ -62,7 +62,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) #endif } void *__dma_direct_alloc_coherent(struct device *dev, size_t size, void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { Loading Loading @@ -119,7 +119,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size, #endif } void __dma_direct_free_coherent(struct device *dev, size_t size, void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { Loading @@ -130,7 +130,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size, #endif } static void *dma_direct_alloc_coherent(struct device *dev, size_t size, static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { Loading @@ -139,8 +139,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, /* The coherent mask may be smaller than the real mask, check if * we can really use the direct ops */ if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) return __dma_direct_alloc_coherent(dev, size, dma_handle, if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) return __dma_nommu_alloc_coherent(dev, size, dma_handle, flag, attrs); /* Ok we can't ... do we have an iommu ? If not, fail */ Loading @@ -154,15 +154,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dev_to_node(dev)); } static void dma_direct_free_coherent(struct device *dev, size_t size, static void dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct iommu_table *iommu; /* See comments in dma_direct_alloc_coherent() */ if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) return __dma_direct_free_coherent(dev, size, vaddr, dma_handle, /* See comments in dma_nommu_alloc_coherent() */ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, attrs); /* Maybe we used an iommu ... */ iommu = get_iommu_table_base(dev); Loading @@ -175,7 +175,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, iommu_free_coherent(iommu, size, vaddr, dma_handle); } int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { Loading @@ -193,7 +193,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot); } static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { Loading @@ -213,13 +213,13 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { } static u64 dma_direct_get_required_mask(struct device *dev) static u64 dma_nommu_get_required_mask(struct device *dev) { u64 end, mask; Loading @@ -231,7 +231,7 @@ static u64 dma_direct_get_required_mask(struct device *dev) return mask; } static inline dma_addr_t dma_direct_map_page(struct device *dev, static inline dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, Loading @@ -246,7 +246,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, return page_to_phys(page) + offset + get_dma_offset(dev); } static inline void dma_direct_unmap_page(struct device *dev, static inline void dma_nommu_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, Loading @@ -255,7 +255,7 @@ static inline void dma_direct_unmap_page(struct device *dev, } #ifdef CONFIG_NOT_COHERENT_CACHE static inline void dma_direct_sync_sg(struct device *dev, static inline void dma_nommu_sync_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { Loading @@ -266,7 +266,7 @@ static inline void dma_direct_sync_sg(struct device *dev, __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } static inline void dma_direct_sync_single(struct device *dev, static inline void dma_nommu_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { Loading @@ -274,24 +274,24 @@ static inline void dma_direct_sync_single(struct device *dev, } #endif const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, .get_required_mask = dma_direct_get_required_mask, const struct dma_map_ops dma_nommu_ops = { .alloc = dma_nommu_alloc_coherent, .free = dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = dma_nommu_map_sg, .unmap_sg = dma_nommu_unmap_sg, .dma_supported = dma_nommu_dma_supported, .map_page = dma_nommu_map_page, .unmap_page = dma_nommu_unmap_page, .get_required_mask = dma_nommu_get_required_mask, #ifdef CONFIG_NOT_COHERENT_CACHE .sync_single_for_cpu = dma_direct_sync_single, .sync_single_for_device = dma_direct_sync_single, .sync_sg_for_cpu = dma_direct_sync_sg, .sync_sg_for_device = dma_direct_sync_sg, .sync_single_for_cpu = dma_nommu_sync_single, .sync_single_for_device = dma_nommu_sync_single, .sync_sg_for_cpu = dma_nommu_sync_sg, .sync_sg_for_device = dma_nommu_sync_sg, #endif }; EXPORT_SYMBOL(dma_direct_ops); EXPORT_SYMBOL(dma_nommu_ops); int dma_set_coherent_mask(struct device *dev, u64 mask) { Loading @@ -302,7 +302,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) * is no dma_op->set_coherent_mask() so we have to do * things the hard way: */ if (get_dma_ops(dev) != &dma_direct_ops || if (get_dma_ops(dev) != &dma_nommu_ops || get_iommu_table_base(dev) == NULL || !dma_iommu_dma_supported(dev, mask)) return -EIO; Loading arch/powerpc/kernel/pci-common.c +1 −1 Original line number Diff line number Diff line Loading @@ -60,7 +60,7 @@ resource_size_t isa_mem_base; EXPORT_SYMBOL(isa_mem_base); static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops; void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { Loading Loading
arch/powerpc/include/asm/dma-mapping.h +4 −4 Original line number Diff line number Diff line Loading @@ -19,13 +19,13 @@ #include <asm/swiotlb.h> /* Some dma direct funcs must be visible for use in other dma_ops */ extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); extern void __dma_direct_free_coherent(struct device *dev, size_t size, extern void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); extern int dma_direct_mmap_coherent(struct device *dev, extern int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs); Loading Loading @@ -73,7 +73,7 @@ static inline unsigned long device_to_mask(struct device *dev) #ifdef CONFIG_PPC64 extern struct dma_map_ops dma_iommu_ops; #endif extern const struct dma_map_ops dma_direct_ops; extern const struct dma_map_ops dma_nommu_ops; static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) { Loading
arch/powerpc/kernel/dma-iommu.c +1 −1 Original line number Diff line number Diff line Loading @@ -114,7 +114,7 @@ int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr) struct dma_map_ops dma_iommu_ops = { .alloc = dma_iommu_alloc_coherent, .free = dma_iommu_free_coherent, .mmap = dma_direct_mmap_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = dma_iommu_map_sg, .unmap_sg = dma_iommu_unmap_sg, .dma_supported = dma_iommu_dma_supported, Loading
arch/powerpc/kernel/dma-swiotlb.c +3 −3 Original line number Diff line number Diff line Loading @@ -47,9 +47,9 @@ static u64 swiotlb_powerpc_get_required(struct device *dev) * for everything else. */ const struct dma_map_ops swiotlb_dma_ops = { .alloc = __dma_direct_alloc_coherent, .free = __dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .alloc = __dma_nommu_alloc_coherent, .free = __dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = swiotlb_map_sg_attrs, .unmap_sg = swiotlb_unmap_sg_attrs, .dma_supported = swiotlb_dma_supported, Loading
arch/powerpc/kernel/dma.c +34 −34 Original line number Diff line number Diff line Loading @@ -40,7 +40,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) return pfn; } static int dma_direct_dma_supported(struct device *dev, u64 mask) static int dma_nommu_dma_supported(struct device *dev, u64 mask) { #ifdef CONFIG_PPC64 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); Loading @@ -62,7 +62,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) #endif } void *__dma_direct_alloc_coherent(struct device *dev, size_t size, void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { Loading Loading @@ -119,7 +119,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size, #endif } void __dma_direct_free_coherent(struct device *dev, size_t size, void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { Loading @@ -130,7 +130,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size, #endif } static void *dma_direct_alloc_coherent(struct device *dev, size_t size, static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) { Loading @@ -139,8 +139,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, /* The coherent mask may be smaller than the real mask, check if * we can really use the direct ops */ if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) return __dma_direct_alloc_coherent(dev, size, dma_handle, if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) return __dma_nommu_alloc_coherent(dev, size, dma_handle, flag, attrs); /* Ok we can't ... do we have an iommu ? If not, fail */ Loading @@ -154,15 +154,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size, dev_to_node(dev)); } static void dma_direct_free_coherent(struct device *dev, size_t size, static void dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct iommu_table *iommu; /* See comments in dma_direct_alloc_coherent() */ if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) return __dma_direct_free_coherent(dev, size, vaddr, dma_handle, /* See comments in dma_nommu_alloc_coherent() */ if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, attrs); /* Maybe we used an iommu ... */ iommu = get_iommu_table_base(dev); Loading @@ -175,7 +175,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size, iommu_free_coherent(iommu, size, vaddr, dma_handle); } int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, unsigned long attrs) { Loading @@ -193,7 +193,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot); } static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, unsigned long attrs) { Loading @@ -213,13 +213,13 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, unsigned long attrs) { } static u64 dma_direct_get_required_mask(struct device *dev) static u64 dma_nommu_get_required_mask(struct device *dev) { u64 end, mask; Loading @@ -231,7 +231,7 @@ static u64 dma_direct_get_required_mask(struct device *dev) return mask; } static inline dma_addr_t dma_direct_map_page(struct device *dev, static inline dma_addr_t dma_nommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, Loading @@ -246,7 +246,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, return page_to_phys(page) + offset + get_dma_offset(dev); } static inline void dma_direct_unmap_page(struct device *dev, static inline void dma_nommu_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, Loading @@ -255,7 +255,7 @@ static inline void dma_direct_unmap_page(struct device *dev, } #ifdef CONFIG_NOT_COHERENT_CACHE static inline void dma_direct_sync_sg(struct device *dev, static inline void dma_nommu_sync_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { Loading @@ -266,7 +266,7 @@ static inline void dma_direct_sync_sg(struct device *dev, __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); } static inline void dma_direct_sync_single(struct device *dev, static inline void dma_nommu_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { Loading @@ -274,24 +274,24 @@ static inline void dma_direct_sync_single(struct device *dev, } #endif const struct dma_map_ops dma_direct_ops = { .alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, .mmap = dma_direct_mmap_coherent, .map_sg = dma_direct_map_sg, .unmap_sg = dma_direct_unmap_sg, .dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, .get_required_mask = dma_direct_get_required_mask, const struct dma_map_ops dma_nommu_ops = { .alloc = dma_nommu_alloc_coherent, .free = dma_nommu_free_coherent, .mmap = dma_nommu_mmap_coherent, .map_sg = dma_nommu_map_sg, .unmap_sg = dma_nommu_unmap_sg, .dma_supported = dma_nommu_dma_supported, .map_page = dma_nommu_map_page, .unmap_page = dma_nommu_unmap_page, .get_required_mask = dma_nommu_get_required_mask, #ifdef CONFIG_NOT_COHERENT_CACHE .sync_single_for_cpu = dma_direct_sync_single, .sync_single_for_device = dma_direct_sync_single, .sync_sg_for_cpu = dma_direct_sync_sg, .sync_sg_for_device = dma_direct_sync_sg, .sync_single_for_cpu = dma_nommu_sync_single, .sync_single_for_device = dma_nommu_sync_single, .sync_sg_for_cpu = dma_nommu_sync_sg, .sync_sg_for_device = dma_nommu_sync_sg, #endif }; EXPORT_SYMBOL(dma_direct_ops); EXPORT_SYMBOL(dma_nommu_ops); int dma_set_coherent_mask(struct device *dev, u64 mask) { Loading @@ -302,7 +302,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) * is no dma_op->set_coherent_mask() so we have to do * things the hard way: */ if (get_dma_ops(dev) != &dma_direct_ops || if (get_dma_ops(dev) != &dma_nommu_ops || get_iommu_table_base(dev) == NULL || !dma_iommu_dma_supported(dev, mask)) return -EIO; Loading
arch/powerpc/kernel/pci-common.c +1 −1 Original line number Diff line number Diff line Loading @@ -60,7 +60,7 @@ resource_size_t isa_mem_base; EXPORT_SYMBOL(isa_mem_base); static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops; void set_pci_dma_ops(const struct dma_map_ops *dma_ops) { Loading