Commit f9774cfd authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

ARM/dma-mapping: use dma_to_phys/phys_to_dma in the dma-mapping code



Use the helpers as expected by the dma-direct code in the old arm
dma-mapping code to ease a gradual switch to the common DMA code.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Tested-by: default avatarMarc Zyngier <maz@kernel.org>
parent d6e2e925
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -128,14 +128,14 @@ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
{
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		__dma_page_cpu_to_dev(page, offset, size, dir);
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
	return phys_to_dma(dev, page_to_phys(page) + offset);
}

static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
	     unsigned long offset, size_t size, enum dma_data_direction dir,
	     unsigned long attrs)
{
	return pfn_to_dma(dev, page_to_pfn(page)) + offset;
	return phys_to_dma(dev, page_to_phys(page) + offset);
}

/**
@@ -156,7 +156,7 @@ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
		size_t size, enum dma_data_direction dir, unsigned long attrs)
{
	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
		__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
		__dma_page_dev_to_cpu(phys_to_page(dma_to_phys(dev, handle)),
				      handle & ~PAGE_MASK, size, dir);
}

@@ -164,7 +164,7 @@ static void arm_dma_sync_single_for_cpu(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
	struct page *page = phys_to_page(dma_to_phys(dev, handle-offset));
	__dma_page_dev_to_cpu(page, offset, size, dir);
}

@@ -172,7 +172,7 @@ static void arm_dma_sync_single_for_device(struct device *dev,
		dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
	unsigned int offset = handle & (PAGE_SIZE - 1);
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
	struct page *page = phys_to_page(dma_to_phys(dev, handle-offset));
	__dma_page_cpu_to_dev(page, offset, size, dir);
}

@@ -190,7 +190,7 @@ static int arm_dma_supported(struct device *dev, u64 mask)
	 * Translate the device's DMA mask to a PFN limit.  This
	 * PFN number includes the page which we can DMA to.
	 */
	return dma_to_pfn(dev, mask) >= max_dma_pfn;
	return PHYS_PFN(dma_to_phys(dev, mask)) >= max_dma_pfn;
}

static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
@@ -681,7 +681,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
	if (page) {
		unsigned long flags;

		*handle = pfn_to_dma(dev, page_to_pfn(page));
		*handle = phys_to_dma(dev, page_to_phys(page));
		buf->virt = args.want_vaddr ? addr : page;

		spin_lock_irqsave(&arm_dma_bufs_lock, flags);
@@ -721,7 +721,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
	int ret = -ENXIO;
	unsigned long nr_vma_pages = vma_pages(vma);
	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long pfn = dma_to_pfn(dev, dma_addr);
	unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
	unsigned long off = vma->vm_pgoff;

	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
@@ -762,7 +762,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
			   dma_addr_t handle, unsigned long attrs,
			   bool is_coherent)
{
	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
	struct page *page = phys_to_page(dma_to_phys(dev, handle));
	struct arm_dma_buffer *buf;
	struct arm_dma_free_args args = {
		.dev = dev,
@@ -796,15 +796,15 @@ static int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
		 void *cpu_addr, dma_addr_t handle, size_t size,
		 unsigned long attrs)
{
	unsigned long pfn = dma_to_pfn(dev, handle);
	phys_addr_t paddr = dma_to_phys(dev, handle);
	struct page *page;
	int ret;

	/* If the PFN is not valid, we do not have a struct page */
	if (!pfn_valid(pfn))
	if (!pfn_valid(PHYS_PFN(paddr)))
		return -ENXIO;

	page = pfn_to_page(pfn);
	page = phys_to_page(paddr);

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
	if (unlikely(ret))