Commit 3bf3710e authored by Thomas Hellström's avatar Thomas Hellström
Browse files

drm/ttm: Add a generic TTM memcpy move for page-based iomem



The internal ttm_bo_util memcpy uses ioremap functionality, and while it
probably might be possible to use it for copying in- and out of
sglist represented io memory, using io_mem_reserve() / io_mem_free()
callbacks, that would cause problems with fault().
Instead, implement a method mapping page-by-page using kmap_local()
semantics. As an additional benefit we then avoid the occasional global
TLB flushes of ioremap() and consuming ioremap space, elimination of a
critical point of failure and with a slight change of semantics we could
also push the memcpy out async for testing and async driver development
purposes.

A special linear iomem iterator is introduced internally to mimic the
old ioremap behaviour for code-paths that can't immediately be ported
over. This adds to the code size and should be considered a temporary
solution.

Looking at the code we have a lot of checks for iomap tagged pointers.
Ideally we should extend the core memremap functions to also accept
uncached memory and kmap_local functionality. Then we could strip a
lot of code.

Cc: Christian König <christian.koenig@amd.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20210602083818.241793-4-thomas.hellstrom@linux.intel.com
parent c43f2f98
Loading
Loading
Loading
Loading
+98 −181
Original line number Diff line number Diff line
@@ -72,188 +72,125 @@ void ttm_mem_io_free(struct ttm_device *bdev,
	mem->bus.addr = NULL;
}

static int ttm_resource_ioremap(struct ttm_device *bdev,
			       struct ttm_resource *mem,
			       void **virtual)
/**
 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
 * @bo: The struct ttm_buffer_object.
 * @new_mem: The struct ttm_resource we're moving to (copy destination).
 * @new_iter: A struct ttm_kmap_iter representing the destination resource.
 * @src_iter: A struct ttm_kmap_iter representing the source resource.
 *
 * This function is intended to be able to move out async under a
 * dma-fence if desired.
 */
void ttm_move_memcpy(struct ttm_buffer_object *bo,
		     u32 num_pages,
		     struct ttm_kmap_iter *dst_iter,
		     struct ttm_kmap_iter *src_iter)
{
	int ret;
	void *addr;
	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
	struct ttm_tt *ttm = bo->ttm;
	struct dma_buf_map src_map, dst_map;
	pgoff_t i;

	*virtual = NULL;
	ret = ttm_mem_io_reserve(bdev, mem);
	if (ret || !mem->bus.is_iomem)
		return ret;
	/* Single TTM move. NOP */
	if (dst_ops->maps_tt && src_ops->maps_tt)
		return;

	if (mem->bus.addr) {
		addr = mem->bus.addr;
	} else {
		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
	/* Don't move nonexistent data. Clear destination instead. */
	if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
		if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
			return;

		if (mem->bus.caching == ttm_write_combined)
			addr = ioremap_wc(mem->bus.offset, bus_size);
#ifdef CONFIG_X86
		else if (mem->bus.caching == ttm_cached)
			addr = ioremap_cache(mem->bus.offset, bus_size);
#endif
		for (i = 0; i < num_pages; ++i) {
			dst_ops->map_local(dst_iter, &dst_map, i);
			if (dst_map.is_iomem)
				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
			else
			addr = ioremap(mem->bus.offset, bus_size);
		if (!addr) {
			ttm_mem_io_free(bdev, mem);
			return -ENOMEM;
		}
	}
	*virtual = addr;
	return 0;
}

static void ttm_resource_iounmap(struct ttm_device *bdev,
				struct ttm_resource *mem,
				void *virtual)
{
	if (virtual && mem->bus.addr == NULL)
		iounmap(virtual);
	ttm_mem_io_free(bdev, mem);
				memset(dst_map.vaddr, 0, PAGE_SIZE);
			if (dst_ops->unmap_local)
				dst_ops->unmap_local(dst_iter, &dst_map);
		}

static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
{
	uint32_t *dstP =
	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
	uint32_t *srcP =
	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));

	int i;
	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
		iowrite32(ioread32(srcP++), dstP++);
	return 0;
		return;
	}

static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
				unsigned long page,
				pgprot_t prot)
{
	struct page *d = ttm->pages[page];
	void *dst;

	if (!d)
		return -ENOMEM;

	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
	dst = kmap_atomic_prot(d, prot);
	if (!dst)
		return -ENOMEM;

	memcpy_fromio(dst, src, PAGE_SIZE);
	for (i = 0; i < num_pages; ++i) {
		dst_ops->map_local(dst_iter, &dst_map, i);
		src_ops->map_local(src_iter, &src_map, i);

	kunmap_atomic(dst);
		if (!src_map.is_iomem && !dst_map.is_iomem) {
			memcpy(dst_map.vaddr, src_map.vaddr, PAGE_SIZE);
		} else if (!src_map.is_iomem) {
			dma_buf_map_memcpy_to(&dst_map, src_map.vaddr,
					      PAGE_SIZE);
		} else if (!dst_map.is_iomem) {
			memcpy_fromio(dst_map.vaddr, src_map.vaddr_iomem,
				      PAGE_SIZE);
		} else {
			int j;
			u32 __iomem *src = src_map.vaddr_iomem;
			u32 __iomem *dst = dst_map.vaddr_iomem;

	return 0;
			for (j = 0; j < (PAGE_SIZE / sizeof(u32)); ++j)
				iowrite32(ioread32(src++), dst++);
		}

static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
				unsigned long page,
				pgprot_t prot)
{
	struct page *s = ttm->pages[page];
	void *src;

	if (!s)
		return -ENOMEM;

	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
	src = kmap_atomic_prot(s, prot);
	if (!src)
		return -ENOMEM;

	memcpy_toio(dst, src, PAGE_SIZE);

	kunmap_atomic(src);

	return 0;
		if (src_ops->unmap_local)
			src_ops->unmap_local(src_iter, &src_map);
		if (dst_ops->unmap_local)
			dst_ops->unmap_local(dst_iter, &dst_map);
	}
}
EXPORT_SYMBOL(ttm_move_memcpy);

int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
		       struct ttm_operation_ctx *ctx,
		       struct ttm_resource *new_mem)
		       struct ttm_resource *dst_mem)
{
	struct ttm_resource *old_mem = bo->resource;
	struct ttm_device *bdev = bo->bdev;
	struct ttm_resource_manager *man;
	struct ttm_resource_manager *dst_man =
		ttm_manager_type(bo->bdev, dst_mem->mem_type);
	struct ttm_tt *ttm = bo->ttm;
	void *old_iomap;
	void *new_iomap;
	int ret;
	unsigned long i;

	man = ttm_manager_type(bdev, new_mem->mem_type);

	ret = ttm_bo_wait_ctx(bo, ctx);
	if (ret)
		return ret;
	struct ttm_resource *src_mem = bo->resource;
	struct ttm_resource_manager *src_man =
		ttm_manager_type(bdev, src_mem->mem_type);
	struct ttm_resource src_copy = *src_mem;
	union {
		struct ttm_kmap_iter_tt tt;
		struct ttm_kmap_iter_linear_io io;
	} _dst_iter, _src_iter;
	struct ttm_kmap_iter *dst_iter, *src_iter;
	int ret = 0;

	ret = ttm_resource_ioremap(bdev, old_mem, &old_iomap);
	if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
		    dst_man->use_tt)) {
		ret = ttm_tt_populate(bdev, ttm, ctx);
		if (ret)
			return ret;
	ret = ttm_resource_ioremap(bdev, new_mem, &new_iomap);
	if (ret)
		goto out;

	/*
	 * Single TTM move. NOP.
	 */
	if (old_iomap == NULL && new_iomap == NULL)
		goto out1;

	/*
	 * Don't move nonexistent data. Clear destination instead.
	 */
	if (old_iomap == NULL &&
	    (ttm == NULL || (!ttm_tt_is_populated(ttm) &&
			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
		goto out1;
	}

	/*
	 * TTM might be null for moves within the same region.
	 */
	if (ttm) {
		ret = ttm_tt_populate(bdev, ttm, ctx);
		if (ret)
			goto out1;
	}
	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
	if (IS_ERR(dst_iter))
		return PTR_ERR(dst_iter);

	for (i = 0; i < new_mem->num_pages; ++i) {
		if (old_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
			ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
						   prot);
		} else if (new_iomap == NULL) {
			pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
			ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
						   prot);
		} else {
			ret = ttm_copy_io_page(new_iomap, old_iomap, i);
		}
		if (ret)
			break;
	}
	mb();
out1:
	ttm_resource_iounmap(bdev, new_mem, new_iomap);
out:
	ttm_resource_iounmap(bdev, old_mem, old_iomap);

	if (ret) {
		ttm_resource_free(bo, &new_mem);
		return ret;
	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
	if (IS_ERR(src_iter)) {
		ret = PTR_ERR(src_iter);
		goto out_src_iter;
	}

	ttm_resource_free(bo, &bo->resource);
	ttm_bo_assign_mem(bo, new_mem);
	ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
	src_copy = *src_mem;
	ttm_bo_move_sync_cleanup(bo, dst_mem);

	if (!man->use_tt)
		ttm_bo_tt_destroy(bo);
	if (!src_iter->ops->maps_tt)
		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, &src_copy);
out_src_iter:
	if (!dst_iter->ops->maps_tt)
		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);

	return ret;
}
@@ -335,27 +272,7 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
	man = ttm_manager_type(bo->bdev, res->mem_type);
	caching = man->use_tt ? bo->ttm->caching : res->bus.caching;

	/* Cached mappings need no adjustment */
	if (caching == ttm_cached)
		return tmp;

#if defined(__i386__) || defined(__x86_64__)
	if (caching == ttm_write_combined)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
    defined(__powerpc__) || defined(__mips__)
	if (caching == ttm_write_combined)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
	tmp = pgprot_noncached(tmp);
#endif
	return tmp;
	return ttm_prot_from_caching(caching, tmp);
}
EXPORT_SYMBOL(ttm_io_prot);

+35 −0
Original line number Diff line number Diff line
@@ -31,12 +31,47 @@
 */
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pgtable.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_caching.h>

#include "ttm_module.h"

/**
 * ttm_prot_from_caching - Modify the page protection according to the
 * ttm cacing mode
 * @caching: The ttm caching mode
 * @tmp: The original page protection
 *
 * Return: The modified page protection
 */
pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
{
	/* Cached mappings need no adjustment */
	if (caching == ttm_cached)
		return tmp;

#if defined(__i386__) || defined(__x86_64__)
	if (caching == ttm_write_combined)
		tmp = pgprot_writecombine(tmp);
	else if (boot_cpu_data.x86 > 3)
		tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
	defined(__powerpc__) || defined(__mips__)
	if (caching == ttm_write_combined)
		tmp = pgprot_writecombine(tmp);
	else
		tmp = pgprot_noncached(tmp);
#endif
#if defined(__sparc__)
	tmp = pgprot_noncached(tmp);
#endif
	return tmp;
}

struct dentry *ttm_debugfs_root;

static int __init ttm_init(void)
+193 −0
Original line number Diff line number Diff line
@@ -22,6 +22,10 @@
 * Authors: Christian König
 */

#include <linux/dma-buf-map.h>
#include <linux/io-mapping.h>
#include <linux/scatterlist.h>

#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>

@@ -154,3 +158,192 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
		man->func->debug(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);

static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
					  struct dma_buf_map *dmap,
					  pgoff_t i)
{
	struct ttm_kmap_iter_iomap *iter_io =
		container_of(iter, typeof(*iter_io), base);
	void __iomem *addr;

retry:
	while (i >= iter_io->cache.end) {
		iter_io->cache.sg = iter_io->cache.sg ?
			sg_next(iter_io->cache.sg) : iter_io->st->sgl;
		iter_io->cache.i = iter_io->cache.end;
		iter_io->cache.end += sg_dma_len(iter_io->cache.sg) >>
			PAGE_SHIFT;
		iter_io->cache.offs = sg_dma_address(iter_io->cache.sg) -
			iter_io->start;
	}

	if (i < iter_io->cache.i) {
		iter_io->cache.end = 0;
		iter_io->cache.sg = NULL;
		goto retry;
	}

	addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
				       (((resource_size_t)i - iter_io->cache.i)
					<< PAGE_SHIFT));
	dma_buf_map_set_vaddr_iomem(dmap, addr);
}

static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
					    struct dma_buf_map *map)
{
	io_mapping_unmap_local(map->vaddr_iomem);
}

static const struct ttm_kmap_iter_ops ttm_kmap_iter_io_ops = {
	.map_local =  ttm_kmap_iter_iomap_map_local,
	.unmap_local = ttm_kmap_iter_iomap_unmap_local,
	.maps_tt = false,
};

/**
 * ttm_kmap_iter_iomap_init - Initialize a struct ttm_kmap_iter_iomap
 * @iter_io: The struct ttm_kmap_iter_iomap to initialize.
 * @iomap: The struct io_mapping representing the underlying linear io_memory.
 * @st: sg_table into @iomap, representing the memory of the struct
 * ttm_resource.
 * @start: Offset that needs to be subtracted from @st to make
 * sg_dma_address(st->sgl) - @start == 0 for @iomap start.
 *
 * Return: Pointer to the embedded struct ttm_kmap_iter.
 */
struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
			 struct io_mapping *iomap,
			 struct sg_table *st,
			 resource_size_t start)
{
	iter_io->base.ops = &ttm_kmap_iter_io_ops;
	iter_io->iomap = iomap;
	iter_io->st = st;
	iter_io->start = start;
	memset(&iter_io->cache, 0, sizeof(iter_io->cache));

	return &iter_io->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_iomap_init);

/**
 * DOC: Linear io iterator
 *
 * This code should die in the not too near future. Best would be if we could
 * make io-mapping use memremap for all io memory, and have memremap
 * implement a kmap_local functionality. We could then strip a huge amount of
 * code. These linear io iterators are implemented to mimic old functionality,
 * and they don't use kmap_local semantics at all internally. Rather ioremap or
 * friends, and at least on 32-bit they add global TLB flushes and points
 * of failure.
 */

static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
					      struct dma_buf_map *dmap,
					      pgoff_t i)
{
	struct ttm_kmap_iter_linear_io *iter_io =
		container_of(iter, typeof(*iter_io), base);

	*dmap = iter_io->dmap;
	dma_buf_map_incr(dmap, i * PAGE_SIZE);
}

static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
	.map_local =  ttm_kmap_iter_linear_io_map_local,
	.maps_tt = false,
};

/**
 * ttm_kmap_iter_linear_io_init - Initialize an iterator for linear io memory
 * @iter_io: The iterator to initialize
 * @bdev: The TTM device
 * @mem: The ttm resource representing the iomap.
 *
 * This function is for internal TTM use only. It sets up a memcpy kmap iterator
 * pointing at a linear chunk of io memory.
 *
 * Return: A pointer to the embedded struct ttm_kmap_iter or error pointer on
 * failure.
 */
struct ttm_kmap_iter *
ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io,
			     struct ttm_device *bdev,
			     struct ttm_resource *mem)
{
	int ret;

	ret = ttm_mem_io_reserve(bdev, mem);
	if (ret)
		goto out_err;
	if (!mem->bus.is_iomem) {
		ret = -EINVAL;
		goto out_io_free;
	}

	if (mem->bus.addr) {
		dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
		iter_io->needs_unmap = false;
	} else {
		size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;

		iter_io->needs_unmap = true;
		memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
		if (mem->bus.caching == ttm_write_combined)
			dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
						    ioremap_wc(mem->bus.offset,
							       bus_size));
		else if (mem->bus.caching == ttm_cached)
			dma_buf_map_set_vaddr(&iter_io->dmap,
					      memremap(mem->bus.offset, bus_size,
						       MEMREMAP_WB |
						       MEMREMAP_WT |
						       MEMREMAP_WC));

		/* If uncached requested or if mapping cached or wc failed */
		if (dma_buf_map_is_null(&iter_io->dmap))
			dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
						    ioremap(mem->bus.offset,
							    bus_size));

		if (dma_buf_map_is_null(&iter_io->dmap)) {
			ret = -ENOMEM;
			goto out_io_free;
		}
	}

	iter_io->base.ops = &ttm_kmap_iter_linear_io_ops;
	return &iter_io->base;

out_io_free:
	ttm_mem_io_free(bdev, mem);
out_err:
	return ERR_PTR(ret);
}

/**
 * ttm_kmap_iter_linear_io_fini - Clean up an iterator for linear io memory
 * @iter_io: The iterator to initialize
 * @bdev: The TTM device
 * @mem: The ttm resource representing the iomap.
 *
 * This function is for internal TTM use only. It cleans up a memcpy kmap
 * iterator initialized by ttm_kmap_iter_linear_io_init.
 */
void
ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io,
			     struct ttm_device *bdev,
			     struct ttm_resource *mem)
{
	if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
		if (iter_io->dmap.is_iomem)
			iounmap(iter_io->dmap.vaddr_iomem);
		else
			memunmap(iter_io->dmap.vaddr);
	}

	ttm_mem_io_free(bdev, mem);
}
+45 −0
Original line number Diff line number Diff line
@@ -433,3 +433,48 @@ void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
	if (!ttm_dma32_pages_limit)
		ttm_dma32_pages_limit = num_dma32_pages;
}

static void ttm_kmap_iter_tt_map_local(struct ttm_kmap_iter *iter,
				       struct dma_buf_map *dmap,
				       pgoff_t i)
{
	struct ttm_kmap_iter_tt *iter_tt =
		container_of(iter, typeof(*iter_tt), base);

	dma_buf_map_set_vaddr(dmap, kmap_local_page_prot(iter_tt->tt->pages[i],
							 iter_tt->prot));
}

static void ttm_kmap_iter_tt_unmap_local(struct ttm_kmap_iter *iter,
					 struct dma_buf_map *map)
{
	kunmap_local(map->vaddr);
}

static const struct ttm_kmap_iter_ops ttm_kmap_iter_tt_ops = {
	.map_local = ttm_kmap_iter_tt_map_local,
	.unmap_local = ttm_kmap_iter_tt_unmap_local,
	.maps_tt = true,
};

/**
 * ttm_kmap_iter_tt_init - Initialize a struct ttm_kmap_iter_tt
 * @iter_tt: The struct ttm_kmap_iter_tt to initialize.
 * @tt: Struct ttm_tt holding page pointers of the struct ttm_resource.
 *
 * Return: Pointer to the embedded struct ttm_kmap_iter.
 */
struct ttm_kmap_iter *
ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
		      struct ttm_tt *tt)
{
	iter_tt->base.ops = &ttm_kmap_iter_tt_ops;
	iter_tt->tt = tt;
	if (tt)
		iter_tt->prot = ttm_prot_from_caching(tt->caching, PAGE_KERNEL);
	else
		iter_tt->prot = PAGE_KERNEL;

	return &iter_tt->base;
}
EXPORT_SYMBOL(ttm_kmap_iter_tt_init);
+28 −0
Original line number Diff line number Diff line
@@ -40,6 +40,7 @@
#include <drm/ttm/ttm_device.h>

#include "ttm_bo_api.h"
#include "ttm_kmap_iter.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
@@ -270,6 +271,23 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
			      bool pipeline,
			      struct ttm_resource *new_mem);

/**
 * ttm_bo_move_accel_cleanup.
 *
 * @bo: A pointer to a struct ttm_buffer_object.
 * @new_mem: struct ttm_resource indicating where to move.
 *
 * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
 * by the caller to be idle. Typically used after memcpy buffer moves.
 */
static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
					    struct ttm_resource *new_mem)
{
	int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem);

	WARN_ON(ret);
}

/**
 * ttm_bo_pipeline_gutting.
 *
@@ -304,4 +322,14 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
 */
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);

void ttm_move_memcpy(struct ttm_buffer_object *bo,
		     u32 num_pages,
		     struct ttm_kmap_iter *dst_iter,
		     struct ttm_kmap_iter *src_iter);

struct ttm_kmap_iter *
ttm_kmap_iter_iomap_init(struct ttm_kmap_iter_iomap *iter_io,
			 struct io_mapping *iomap,
			 struct sg_table *st,
			 resource_size_t start);
#endif
Loading