Commit 40ac971e authored by Roman Skakun's avatar Roman Skakun Committed by Christoph Hellwig
Browse files

dma-mapping: handle vmalloc addresses in dma_common_{mmap,get_sgtable}



xen-swiotlb can use vmalloc backed addresses for dma coherent allocations
and uses the common helpers.  Properly handle them to unbreak Xen on
ARM platforms.

Fixes: 1b65c4e5 ("swiotlb-xen: use xen_alloc/free_coherent_pages")
Signed-off-by: default avatarRoman Skakun <roman_skakun@epam.com>
Reviewed-by: default avatarAndrii Anisov <andrii_anisov@epam.com>
[hch: split the patch, renamed the helpers]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent d936eb23
Loading
Loading
Loading
Loading
+10 −2
Original line number Diff line number Diff line
@@ -5,6 +5,13 @@
 */
#include <linux/dma-map-ops.h>

static struct page *dma_common_vaddr_to_page(void *cpu_addr)
{
	if (is_vmalloc_addr(cpu_addr))
		return vmalloc_to_page(cpu_addr);
	return virt_to_page(cpu_addr);
}

/*
 * Create scatter-list for the already allocated DMA buffer.
 */
@@ -12,7 +19,7 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
		 unsigned long attrs)
{
	struct page *page = virt_to_page(cpu_addr);
	struct page *page = dma_common_vaddr_to_page(cpu_addr);
	int ret;

	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -32,6 +39,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
	unsigned long user_count = vma_pages(vma);
	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
	unsigned long off = vma->vm_pgoff;
	struct page *page = dma_common_vaddr_to_page(cpu_addr);
	int ret = -ENXIO;

	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
@@ -43,7 +51,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
		return -ENXIO;

	return remap_pfn_range(vma, vma->vm_start,
			page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
			page_to_pfn(page) + vma->vm_pgoff,
			user_count << PAGE_SHIFT, vma->vm_page_prot);
#else
	return -ENXIO;