Loading include/linux/dma-direct.h +2 −1 Original line number Diff line number Diff line Loading @@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return false; return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ Loading kernel/dma/direct.c +11 −10 Original line number Diff line number Diff line Loading @@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return false; } if (*dev->dma_mask >= DMA_BIT_MASK(32)) { if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { dev_err(dev, "%s: overflow %pad+%zu of device mask %llx\n", caller, &dma_addr, size, *dev->dma_mask); "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", caller, &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask); } return false; } Loading @@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) { u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) max_dma = dev->bus_dma_mask; return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_mask) { if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) dma_mask = dev->bus_dma_mask; if (force_dma_unencrypted()) *phys_mask = __dma_to_phys(dev, dma_mask); else Loading @@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= dev->coherent_dma_mask; min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); } void *dma_direct_alloc_pages(struct device *dev, size_t size, Loading Loading @@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif /* * Upstream PCI/PCIe bridges or SoC interconnects may not carry * as many DMA address bits as the device itself supports. */ if (dev->bus_dma_mask && mask > dev->bus_dma_mask) return 0; return 1; } Loading Loading
include/linux/dma-direct.h +2 −1 Original line number Diff line number Diff line Loading @@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) if (!dev->dma_mask) return false; return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= min_not_zero(*dev->dma_mask, dev->bus_dma_mask); } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ Loading
kernel/dma/direct.c +11 −10 Original line number Diff line number Diff line Loading @@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, return false; } if (*dev->dma_mask >= DMA_BIT_MASK(32)) { if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { dev_err(dev, "%s: overflow %pad+%zu of device mask %llx\n", caller, &dma_addr, size, *dev->dma_mask); "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", caller, &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask); } return false; } Loading @@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) { u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) max_dma = dev->bus_dma_mask; return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; } static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, u64 *phys_mask) { if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) dma_mask = dev->bus_dma_mask; if (force_dma_unencrypted()) *phys_mask = __dma_to_phys(dev, dma_mask); else Loading @@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) { return phys_to_dma_direct(dev, phys) + size - 1 <= dev->coherent_dma_mask; min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); } void *dma_direct_alloc_pages(struct device *dev, size_t size, Loading Loading @@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) return 0; #endif /* * Upstream PCI/PCIe bridges or SoC interconnects may not carry * as many DMA address bits as the device itself supports. */ if (dev->bus_dma_mask && mask > dev->bus_dma_mask) return 0; return 1; } Loading