Commit 7d6db80b authored by Christoph Hellwig's avatar Christoph Hellwig
Browse files

sparc32: use DMA_DIRECT_REMAP



Use the generic dma remapping allocator instead of open coding it.
This also avoids setting up page tables from irq context which is
generally dangerous and uses the atomic pool instead.

Note that this changes the kernel virtual address at which the
dma coherent memory is mapped from the DVMA_VADDR region to the general
vmalloc pool.  I could not find any indication that this matters
for the hardware.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarAndreas Larsson <andreas@gaisler.com>
Acked-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 837e80b3
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -53,8 +53,9 @@ config SPARC32
	def_bool !64BIT
	select ARCH_32BIT_OFF_T
	select ARCH_HAS_SYNC_DMA_FOR_CPU
	select GENERIC_ATOMIC64
	select CLZ_TAB
	select DMA_DIRECT_REMAP
	select GENERIC_ATOMIC64
	select HAVE_UID16
	select OLD_SIGACTION
	select ZONE_DMA
+0 −54
Original line number Diff line number Diff line
@@ -300,60 +300,6 @@ arch_initcall(sparc_register_ioport);

#endif /* CONFIG_SBUS */


/* Allocate and map kernel buffer using consistent mode DMA for a device.
 * hwdev should be valid struct pci_dev pointer for PCI devices.
 */
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
		gfp_t gfp, unsigned long attrs)
{
	unsigned long addr;
	void *va;

	if (!size || size > 256 * 1024)	/* __get_free_pages() limit */
		return NULL;

	size = PAGE_ALIGN(size);
	va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size));
	if (!va) {
		printk("%s: no %zd pages\n", __func__, size >> PAGE_SHIFT);
		return NULL;
	}

	addr = sparc_dma_alloc_resource(dev, size);
	if (!addr)
		goto err_nomem;

	srmmu_mapiorange(0, virt_to_phys(va), addr, size);

	*dma_handle = virt_to_phys(va);
	return (void *)addr;

err_nomem:
	free_pages((unsigned long)va, get_order(size));
	return NULL;
}

/* Free and unmap a consistent DMA buffer.
 * cpu_addr is what was returned arch_dma_alloc, size must be the same as what
 * was passed into arch_dma_alloc, and likewise dma_addr must be the same as
 * what *dma_ndler was set to.
 *
 * References to the memory and mappings associated with cpu_addr/dma_addr
 * past this call are illegal.
 */
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
		dma_addr_t dma_addr, unsigned long attrs)
{
	size = PAGE_ALIGN(size);

	if (!sparc_dma_free_resource(cpu_addr, size))
		return;

	srmmu_unmapiorange((unsigned long)cpu_addr, size);
	free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
}

/*
 * IIep is write-through, not flushing on cpu to device transfer.
 *