Commit 88be9a0a authored by Matthew Auld's avatar Matthew Auld
Browse files

drm/i915/ttm: add ttm_buddy_man



Add back our standalone i915_buddy allocator and integrate it into a
ttm_resource_manager. This will plug into our ttm backend for managing
device local-memory in the next couple of patches.

v2(Thomas):
    - Return -ENOSPC from the buddy; ttm expects this in order to
      trigger eviction
    - Drop the unnecessary inline
    - bo->page_alignment is in page units

Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Acked-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210616152501.394518-1-matthew.auld@intel.com
parent c865204e
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -162,6 +162,7 @@ gem-y += \
i915-y += \
	  $(gem-y) \
	  i915_active.o \
	  i915_buddy.o \
	  i915_cmd_parser.o \
	  i915_gem_evict.o \
	  i915_gem_gtt.o \
@@ -171,6 +172,7 @@ i915-y += \
	  i915_request.o \
	  i915_scheduler.o \
	  i915_trace_points.o \
	  i915_ttm_buddy_manager.o \
	  i915_vma.o \
	  intel_wopcm.o

+412 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include <linux/kmemleak.h>

#include "i915_buddy.h"

#include "i915_gem.h"
#include "i915_utils.h"

static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
						 struct i915_buddy_block *parent,
						 unsigned int order,
						 u64 offset)
{
	struct i915_buddy_block *block;

	GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);

	block = kmem_cache_zalloc(mm->slab_blocks, GFP_KERNEL);
	if (!block)
		return NULL;

	block->header = offset;
	block->header |= order;
	block->parent = parent;

	GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
	return block;
}

static void i915_block_free(struct i915_buddy_mm *mm,
			    struct i915_buddy_block *block)
{
	kmem_cache_free(mm->slab_blocks, block);
}

static void mark_allocated(struct i915_buddy_block *block)
{
	block->header &= ~I915_BUDDY_HEADER_STATE;
	block->header |= I915_BUDDY_ALLOCATED;

	list_del(&block->link);
}

static void mark_free(struct i915_buddy_mm *mm,
		      struct i915_buddy_block *block)
{
	block->header &= ~I915_BUDDY_HEADER_STATE;
	block->header |= I915_BUDDY_FREE;

	list_add(&block->link,
		 &mm->free_list[i915_buddy_block_order(block)]);
}

static void mark_split(struct i915_buddy_block *block)
{
	block->header &= ~I915_BUDDY_HEADER_STATE;
	block->header |= I915_BUDDY_SPLIT;

	list_del(&block->link);
}

int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
{
	unsigned int i;
	u64 offset;

	if (size < chunk_size)
		return -EINVAL;

	if (chunk_size < PAGE_SIZE)
		return -EINVAL;

	if (!is_power_of_2(chunk_size))
		return -EINVAL;

	size = round_down(size, chunk_size);

	mm->size = size;
	mm->chunk_size = chunk_size;
	mm->max_order = ilog2(size) - ilog2(chunk_size);

	GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);

	mm->slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
	if (!mm->slab_blocks)
		return -ENOMEM;

	mm->free_list = kmalloc_array(mm->max_order + 1,
				      sizeof(struct list_head),
				      GFP_KERNEL);
	if (!mm->free_list)
		goto out_destroy_slab;

	for (i = 0; i <= mm->max_order; ++i)
		INIT_LIST_HEAD(&mm->free_list[i]);

	mm->n_roots = hweight64(size);

	mm->roots = kmalloc_array(mm->n_roots,
				  sizeof(struct i915_buddy_block *),
				  GFP_KERNEL);
	if (!mm->roots)
		goto out_free_list;

	offset = 0;
	i = 0;

	/*
	 * Split into power-of-two blocks, in case we are given a size that is
	 * not itself a power-of-two.
	 */
	do {
		struct i915_buddy_block *root;
		unsigned int order;
		u64 root_size;

		root_size = rounddown_pow_of_two(size);
		order = ilog2(root_size) - ilog2(chunk_size);

		root = i915_block_alloc(mm, NULL, order, offset);
		if (!root)
			goto out_free_roots;

		mark_free(mm, root);

		GEM_BUG_ON(i > mm->max_order);
		GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);

		mm->roots[i] = root;

		offset += root_size;
		size -= root_size;
		i++;
	} while (size);

	return 0;

out_free_roots:
	while (i--)
		i915_block_free(mm, mm->roots[i]);
	kfree(mm->roots);
out_free_list:
	kfree(mm->free_list);
out_destroy_slab:
	kmem_cache_destroy(mm->slab_blocks);
	return -ENOMEM;
}

void i915_buddy_fini(struct i915_buddy_mm *mm)
{
	int i;

	for (i = 0; i < mm->n_roots; ++i) {
		GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
		i915_block_free(mm, mm->roots[i]);
	}

	kfree(mm->roots);
	kfree(mm->free_list);
	kmem_cache_destroy(mm->slab_blocks);
}

static int split_block(struct i915_buddy_mm *mm,
		       struct i915_buddy_block *block)
{
	unsigned int block_order = i915_buddy_block_order(block) - 1;
	u64 offset = i915_buddy_block_offset(block);

	GEM_BUG_ON(!i915_buddy_block_is_free(block));
	GEM_BUG_ON(!i915_buddy_block_order(block));

	block->left = i915_block_alloc(mm, block, block_order, offset);
	if (!block->left)
		return -ENOMEM;

	block->right = i915_block_alloc(mm, block, block_order,
					offset + (mm->chunk_size << block_order));
	if (!block->right) {
		i915_block_free(mm, block->left);
		return -ENOMEM;
	}

	mark_free(mm, block->left);
	mark_free(mm, block->right);

	mark_split(block);

	return 0;
}

static struct i915_buddy_block *
get_buddy(struct i915_buddy_block *block)
{
	struct i915_buddy_block *parent;

	parent = block->parent;
	if (!parent)
		return NULL;

	if (parent->left == block)
		return parent->right;

	return parent->left;
}

static void __i915_buddy_free(struct i915_buddy_mm *mm,
			      struct i915_buddy_block *block)
{
	struct i915_buddy_block *parent;

	while ((parent = block->parent)) {
		struct i915_buddy_block *buddy;

		buddy = get_buddy(block);

		if (!i915_buddy_block_is_free(buddy))
			break;

		list_del(&buddy->link);

		i915_block_free(mm, block);
		i915_block_free(mm, buddy);

		block = parent;
	}

	mark_free(mm, block);
}

void i915_buddy_free(struct i915_buddy_mm *mm,
		     struct i915_buddy_block *block)
{
	GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
	__i915_buddy_free(mm, block);
}

void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
{
	struct i915_buddy_block *block, *on;

	list_for_each_entry_safe(block, on, objects, link) {
		i915_buddy_free(mm, block);
		cond_resched();
	}
	INIT_LIST_HEAD(objects);
}

/*
 * Allocate power-of-two block. The order value here translates to:
 *
 *   0 = 2^0 * mm->chunk_size
 *   1 = 2^1 * mm->chunk_size
 *   2 = 2^2 * mm->chunk_size
 *   ...
 */
struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
{
	struct i915_buddy_block *block = NULL;
	unsigned int i;
	int err;

	for (i = order; i <= mm->max_order; ++i) {
		block = list_first_entry_or_null(&mm->free_list[i],
						 struct i915_buddy_block,
						 link);
		if (block)
			break;
	}

	if (!block)
		return ERR_PTR(-ENOSPC);

	GEM_BUG_ON(!i915_buddy_block_is_free(block));

	while (i != order) {
		err = split_block(mm, block);
		if (unlikely(err))
			goto out_free;

		/* Go low */
		block = block->left;
		i--;
	}

	mark_allocated(block);
	kmemleak_update_trace(block);
	return block;

out_free:
	if (i != order)
		__i915_buddy_free(mm, block);
	return ERR_PTR(err);
}

static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
{
	return s1 <= e2 && e1 >= s2;
}

static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
{
	return s1 <= s2 && e1 >= e2;
}

/*
 * Allocate range. Note that it's safe to chain together multiple alloc_ranges
 * with the same blocks list.
 *
 * Intended for pre-allocating portions of the address space, for example to
 * reserve a block for the initial framebuffer or similar, hence the expectation
 * here is that i915_buddy_alloc() is still the main vehicle for
 * allocations, so if that's not the case then the drm_mm range allocator is
 * probably a much better fit, and so you should probably go use that instead.
 */
int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
			   struct list_head *blocks,
			   u64 start, u64 size)
{
	struct i915_buddy_block *block;
	struct i915_buddy_block *buddy;
	LIST_HEAD(allocated);
	LIST_HEAD(dfs);
	u64 end;
	int err;
	int i;

	if (size < mm->chunk_size)
		return -EINVAL;

	if (!IS_ALIGNED(size | start, mm->chunk_size))
		return -EINVAL;

	if (range_overflows(start, size, mm->size))
		return -EINVAL;

	for (i = 0; i < mm->n_roots; ++i)
		list_add_tail(&mm->roots[i]->tmp_link, &dfs);

	end = start + size - 1;

	do {
		u64 block_start;
		u64 block_end;

		block = list_first_entry_or_null(&dfs,
						 struct i915_buddy_block,
						 tmp_link);
		if (!block)
			break;

		list_del(&block->tmp_link);

		block_start = i915_buddy_block_offset(block);
		block_end = block_start + i915_buddy_block_size(mm, block) - 1;

		if (!overlaps(start, end, block_start, block_end))
			continue;

		if (i915_buddy_block_is_allocated(block)) {
			err = -ENOSPC;
			goto err_free;
		}

		if (contains(start, end, block_start, block_end)) {
			if (!i915_buddy_block_is_free(block)) {
				err = -ENOSPC;
				goto err_free;
			}

			mark_allocated(block);
			list_add_tail(&block->link, &allocated);
			continue;
		}

		if (!i915_buddy_block_is_split(block)) {
			err = split_block(mm, block);
			if (unlikely(err))
				goto err_undo;
		}

		list_add(&block->right->tmp_link, &dfs);
		list_add(&block->left->tmp_link, &dfs);
	} while (1);

	list_splice_tail(&allocated, blocks);
	return 0;

err_undo:
	/*
	 * We really don't want to leave around a bunch of split blocks, since
	 * bigger is better, so make sure we merge everything back before we
	 * free the allocated blocks.
	 */
	buddy = get_buddy(block);
	if (buddy &&
	    (i915_buddy_block_is_free(block) &&
	     i915_buddy_block_is_free(buddy)))
		__i915_buddy_free(mm, block);

err_free:
	i915_buddy_free_list(mm, &allocated);
	return err;
}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_buddy.c"
#endif
+133 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2021 Intel Corporation
 */

#ifndef __I915_BUDDY_H__
#define __I915_BUDDY_H__

#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/slab.h>

struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define I915_BUDDY_HEADER_STATE  GENMASK_ULL(11, 10)
#define   I915_BUDDY_ALLOCATED	   (1 << 10)
#define   I915_BUDDY_FREE	   (2 << 10)
#define   I915_BUDDY_SPLIT	   (3 << 10)
/* Free to be used, if needed in the future */
#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
#define I915_BUDDY_HEADER_ORDER  GENMASK_ULL(5, 0)
	u64 header;

	struct i915_buddy_block *left;
	struct i915_buddy_block *right;
	struct i915_buddy_block *parent;

	void *private; /* owned by creator */

	/*
	 * While the block is allocated by the user through i915_buddy_alloc*,
	 * the user has ownership of the link, for example to maintain within
	 * a list, if so desired. As soon as the block is freed with
	 * i915_buddy_free* ownership is given back to the mm.
	 */
	struct list_head link;
	struct list_head tmp_link;
};

/* Order-zero must be at least PAGE_SIZE */
#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)

/*
 * Binary Buddy System.
 *
 * Locking should be handled by the user, a simple mutex around
 * i915_buddy_alloc* and i915_buddy_free* should suffice.
 */
struct i915_buddy_mm {
	struct kmem_cache *slab_blocks;
	/* Maintain a free list for each order. */
	struct list_head *free_list;

	/*
	 * Maintain explicit binary tree(s) to track the allocation of the
	 * address space. This gives us a simple way of finding a buddy block
	 * and performing the potentially recursive merge step when freeing a
	 * block.  Nodes are either allocated or free, in which case they will
	 * also exist on the respective free list.
	 */
	struct i915_buddy_block **roots;

	/*
	 * Anything from here is public, and remains static for the lifetime of
	 * the mm. Everything above is considered do-not-touch.
	 */
	unsigned int n_roots;
	unsigned int max_order;

	/* Must be at least PAGE_SIZE */
	u64 chunk_size;
	u64 size;
};

static inline u64
i915_buddy_block_offset(struct i915_buddy_block *block)
{
	return block->header & I915_BUDDY_HEADER_OFFSET;
}

static inline unsigned int
i915_buddy_block_order(struct i915_buddy_block *block)
{
	return block->header & I915_BUDDY_HEADER_ORDER;
}

static inline unsigned int
i915_buddy_block_state(struct i915_buddy_block *block)
{
	return block->header & I915_BUDDY_HEADER_STATE;
}

static inline bool
i915_buddy_block_is_allocated(struct i915_buddy_block *block)
{
	return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
}

static inline bool
i915_buddy_block_is_free(struct i915_buddy_block *block)
{
	return i915_buddy_block_state(block) == I915_BUDDY_FREE;
}

static inline bool
i915_buddy_block_is_split(struct i915_buddy_block *block)
{
	return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
}

static inline u64
i915_buddy_block_size(struct i915_buddy_mm *mm,
		      struct i915_buddy_block *block)
{
	return mm->chunk_size << i915_buddy_block_order(block);
}

int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);

void i915_buddy_fini(struct i915_buddy_mm *mm);

struct i915_buddy_block *
i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);

int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
			   struct list_head *blocks,
			   u64 start, u64 size);

void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);

void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);

#endif
+248 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2021 Intel Corporation
 */

#include <linux/slab.h>

#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>

#include "i915_ttm_buddy_manager.h"

#include "i915_buddy.h"
#include "i915_gem.h"

struct i915_ttm_buddy_manager {
	struct ttm_resource_manager manager;
	struct i915_buddy_mm mm;
	struct list_head reserved;
	struct mutex lock;
};

static struct i915_ttm_buddy_manager *
to_buddy_manager(struct ttm_resource_manager *man)
{
	return container_of(man, struct i915_ttm_buddy_manager, manager);
}

static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
				    struct ttm_buffer_object *bo,
				    const struct ttm_place *place,
				    struct ttm_resource **res)
{
	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
	struct i915_ttm_buddy_resource *bman_res;
	struct i915_buddy_mm *mm = &bman->mm;
	unsigned long n_pages;
	unsigned int min_order;
	u64 min_page_size;
	u64 size;
	int err;

	GEM_BUG_ON(place->fpfn || place->lpfn);

	bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
	if (!bman_res)
		return -ENOMEM;

	ttm_resource_init(bo, place, &bman_res->base);
	INIT_LIST_HEAD(&bman_res->blocks);
	bman_res->mm = mm;

	GEM_BUG_ON(!bman_res->base.num_pages);
	size = bman_res->base.num_pages << PAGE_SHIFT;

	min_page_size = bo->page_alignment << PAGE_SHIFT;
	GEM_BUG_ON(min_page_size < mm->chunk_size);
	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
		size = roundup_pow_of_two(size);
		min_order = ilog2(size) - ilog2(mm->chunk_size);
	}

	if (size > mm->size) {
		err = -E2BIG;
		goto err_free_res;
	}

	n_pages = size >> ilog2(mm->chunk_size);

	do {
		struct i915_buddy_block *block;
		unsigned int order;

		order = fls(n_pages) - 1;
		GEM_BUG_ON(order > mm->max_order);
		GEM_BUG_ON(order < min_order);

		do {
			mutex_lock(&bman->lock);
			block = i915_buddy_alloc(mm, order);
			mutex_unlock(&bman->lock);
			if (!IS_ERR(block))
				break;

			if (order-- == min_order) {
				err = -ENOSPC;
				goto err_free_blocks;
			}
		} while (1);

		n_pages -= BIT(order);

		list_add_tail(&block->link, &bman_res->blocks);

		if (!n_pages)
			break;
	} while (1);

	*res = &bman_res->base;
	return 0;

err_free_blocks:
	mutex_lock(&bman->lock);
	i915_buddy_free_list(mm, &bman_res->blocks);
	mutex_unlock(&bman->lock);
err_free_res:
	kfree(bman_res);
	return err;
}

static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
				    struct ttm_resource *res)
{
	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);

	mutex_lock(&bman->lock);
	i915_buddy_free_list(&bman->mm, &bman_res->blocks);
	mutex_unlock(&bman->lock);

	kfree(bman_res);
}

static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
	.alloc = i915_ttm_buddy_man_alloc,
	.free = i915_ttm_buddy_man_free,
};


/**
 * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
 * @bdev: The ttm device
 * @type: Memory type we want to manage
 * @use_tt: Set use_tt for the manager
 * @size: The size in bytes to manage
 * @chunk_size: The minimum page size in bytes for our allocations i.e
 * order-zero
 *
 * Note that the starting address is assumed to be zero here, since this
 * simplifies keeping the property where allocated blocks having natural
 * power-of-two alignment. So long as the real starting address is some large
 * power-of-two, or naturally start from zero, then this should be fine.  Also
 * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
 * if say there is some unusable range from the start of the region. We can
 * revisit this in the future and make the interface accept an actual starting
 * offset and let it take care of the rest.
 *
 * Note that if the @size is not aligned to the @chunk_size then we perform the
 * required rounding to get the usable size. The final size in pages can be
 * taken from &ttm_resource_manager.size.
 *
 * Return: 0 on success, negative error code on failure.
 */
int i915_ttm_buddy_man_init(struct ttm_device *bdev,
			    unsigned int type, bool use_tt,
			    u64 size, u64 chunk_size)
{
	struct ttm_resource_manager *man;
	struct i915_ttm_buddy_manager *bman;
	int err;

	bman = kzalloc(sizeof(*bman), GFP_KERNEL);
	if (!bman)
		return -ENOMEM;

	err = i915_buddy_init(&bman->mm, size, chunk_size);
	if (err)
		goto err_free_bman;

	mutex_init(&bman->lock);
	INIT_LIST_HEAD(&bman->reserved);

	man = &bman->manager;
	man->use_tt = use_tt;
	man->func = &i915_ttm_buddy_manager_func;
	ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);

	ttm_resource_manager_set_used(man, true);
	ttm_set_driver_manager(bdev, type, man);

	return 0;

err_free_bman:
	kfree(bman);
	return err;
}

/**
 * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
 * @bdev: The ttm device
 * @type: Memory type we want to manage
 *
 * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
 * also be freed for us here.
 *
 * Return: 0 on success, negative error code on failure.
 */
int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
{
	struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
	struct i915_buddy_mm *mm = &bman->mm;
	int ret;

	ttm_resource_manager_set_used(man, false);

	ret = ttm_resource_manager_evict_all(bdev, man);
	if (ret)
		return ret;

	ttm_set_driver_manager(bdev, type, NULL);

	mutex_lock(&bman->lock);
	i915_buddy_free_list(mm, &bman->reserved);
	i915_buddy_fini(mm);
	mutex_unlock(&bman->lock);

	ttm_resource_manager_cleanup(man);
	kfree(bman);

	return 0;
}

/**
 * i915_ttm_buddy_man_reserve - Reserve address range
 * @man: The buddy allocator ttm manager
 * @start: The offset in bytes, where the region start is assumed to be zero
 * @size: The size in bytes
 *
 * Note that the starting address for the region is always assumed to be zero.
 *
 * Return: 0 on success, negative error code on failure.
 */
int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
			       u64 start, u64 size)
{
	struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
	struct i915_buddy_mm *mm = &bman->mm;
	int ret;

	mutex_lock(&bman->lock);
	ret = i915_buddy_alloc_range(mm, &bman->reserved, start, size);
	mutex_unlock(&bman->lock);

	return ret;
}
+56 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2021 Intel Corporation
 */

#ifndef __I915_TTM_BUDDY_MANAGER_H__
#define __I915_TTM_BUDDY_MANAGER_H__

#include <linux/list.h>
#include <linux/types.h>

#include <drm/ttm/ttm_resource.h>

struct ttm_device;
struct ttm_resource_manager;
struct i915_buddy_mm;

/**
 * struct i915_ttm_buddy_resource
 *
 * @base: struct ttm_resource base class we extend
 * @blocks: the list of struct i915_buddy_block for this resource/allocation
 * @mm: the struct i915_buddy_mm for this resource
 *
 * Extends the struct ttm_resource to manage an address space allocation with
 * one or more struct i915_buddy_block.
 */
struct i915_ttm_buddy_resource {
	struct ttm_resource base;
	struct list_head blocks;
	struct i915_buddy_mm *mm;
};

/**
 * to_ttm_buddy_resource
 *
 * @res: the resource to upcast
 *
 * Upcast the struct ttm_resource object into a struct i915_ttm_buddy_resource.
 */
static inline struct i915_ttm_buddy_resource *
to_ttm_buddy_resource(struct ttm_resource *res)
{
	return container_of(res, struct i915_ttm_buddy_resource, base);
}

int i915_ttm_buddy_man_init(struct ttm_device *bdev,
			    unsigned type, bool use_tt,
			    u64 size, u64 chunk_size);
int i915_ttm_buddy_man_fini(struct ttm_device *bdev,
			    unsigned int type);

int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
			       u64 start, u64 size);

#endif
Loading