Commit 172292be authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jason Gunthorpe
Browse files

dma-mapping: remove dma_virt_ops

Now that the RDMA core deals with devices that only do DMA mapping in
lower layers properly, there is no user for dma_virt_ops and it can be
removed.

Link: https://lore.kernel.org/r/20201106181941.1878556-11-hch@lst.de


Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 73063ec5
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -565,6 +565,4 @@ static inline int dma_mmap_wc(struct device *dev,
int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
		dma_addr_t dma_start, u64 size);

extern const struct dma_map_ops dma_virt_ops;

#endif /* _LINUX_DMA_MAPPING_H */
+0 −5
Original line number Diff line number Diff line
@@ -75,11 +75,6 @@ config ARCH_HAS_DMA_PREP_COHERENT
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
	bool

config DMA_VIRT_OPS
	bool
	depends on HAS_DMA
	select DMA_OPS

config SWIOTLB
	bool
	select NEED_DMA_MAP_STATE
+0 −1
Original line number Diff line number Diff line
@@ -5,7 +5,6 @@ obj-$(CONFIG_DMA_OPS) += ops_helpers.o
obj-$(CONFIG_DMA_OPS)			+= dummy.o
obj-$(CONFIG_DMA_CMA)			+= contiguous.o
obj-$(CONFIG_DMA_DECLARE_COHERENT)	+= coherent.o
obj-$(CONFIG_DMA_VIRT_OPS)		+= virt.o
obj-$(CONFIG_DMA_API_DEBUG)		+= debug.o
obj-$(CONFIG_SWIOTLB)			+= swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL)		+= pool.o

kernel/dma/virt.c

deleted100644 → 0
+0 −61
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0
/*
 * DMA operations that map to virtual addresses without flushing memory.
 */
#include <linux/export.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>

static void *dma_virt_alloc(struct device *dev, size_t size,
			    dma_addr_t *dma_handle, gfp_t gfp,
			    unsigned long attrs)
{
	void *ret;

	ret = (void *)__get_free_pages(gfp | __GFP_ZERO, get_order(size));
	if (ret)
		*dma_handle = (uintptr_t)ret;
	return ret;
}

static void dma_virt_free(struct device *dev, size_t size,
			  void *cpu_addr, dma_addr_t dma_addr,
			  unsigned long attrs)
{
	free_pages((unsigned long)cpu_addr, get_order(size));
}

static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
				    unsigned long offset, size_t size,
				    enum dma_data_direction dir,
				    unsigned long attrs)
{
	return (uintptr_t)(page_address(page) + offset);
}

static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
			   int nents, enum dma_data_direction dir,
			   unsigned long attrs)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		BUG_ON(!sg_page(sg));
		sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
		sg_dma_len(sg) = sg->length;
	}

	return nents;
}

const struct dma_map_ops dma_virt_ops = {
	.alloc			= dma_virt_alloc,
	.free			= dma_virt_free,
	.map_page		= dma_virt_map_page,
	.map_sg			= dma_virt_map_sg,
	.alloc_pages		= dma_common_alloc_pages,
	.free_pages		= dma_common_free_pages,
};
EXPORT_SYMBOL(dma_virt_ops);