Commit d1487389 authored by Thomas Hellström's avatar Thomas Hellström Committed by Matthew Auld
Browse files

drm/i915/ttm Initialize the ttm device and memory managers



Temporarily remove the buddy allocator and related selftests
and hook up the TTM range manager for i915 regions.

Also modify the mock region selftests somewhat to account for a
fragmenting manager.

Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602083818.241793-2-thomas.hellstrom@linux.intel.com
parent 0e4fe0c9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@ config DRM_I915
	select SND_HDA_I915 if SND_HDA_CORE
	select CEC_CORE if CEC_NOTIFIER
	select VMAP_PFN
	select DRM_TTM
	help
	  Choose this option if you have a system that has "Intel Graphics
	  Media Accelerator" or "HD Graphics" integrated graphics,
+1 −1
Original line number Diff line number Diff line
@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
	  intel_memory_region.o \
	  intel_pch.o \
	  intel_pm.o \
	  intel_region_ttm.o \
	  intel_runtime_pm.o \
	  intel_sideband.o \
	  intel_step.o \
@@ -160,7 +161,6 @@ gem-y += \
i915-y += \
	  $(gem-y) \
	  i915_active.o \
	  i915_buddy.o \
	  i915_cmd_parser.o \
	  i915_gem_evict.o \
	  i915_gem_gtt.o \
+57 −2
Original line number Diff line number Diff line
@@ -4,16 +4,71 @@
 */

#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"

static void lmem_put_pages(struct drm_i915_gem_object *obj,
			   struct sg_table *pages)
{
	intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
	obj->mm.dirty = false;
	sg_free_table(pages);
	kfree(pages);
}

static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
	unsigned int flags;
	struct sg_table *pages;

	flags = I915_ALLOC_MIN_PAGE_SIZE;
	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
		flags |= I915_ALLOC_CONTIGUOUS;

	obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
							 obj->base.size,
							 flags);
	if (IS_ERR(obj->mm.st_mm_node))
		return PTR_ERR(obj->mm.st_mm_node);

	/* Range manager is always contigous */
	if (obj->mm.region->is_range_manager)
		obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
	pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
	if (IS_ERR(pages)) {
		intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
		return PTR_ERR(pages);
	}

	__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));

	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
		void __iomem *vaddr =
			i915_gem_object_lmem_io_map(obj, 0, obj->base.size);

		if (!vaddr) {
			struct sg_table *pages =
				__i915_gem_object_unset_pages(obj);

			if (!IS_ERR_OR_NULL(pages))
				lmem_put_pages(obj, pages);
		}

		memset_io(vaddr, 0, obj->base.size);
		io_mapping_unmap(vaddr);
	}

	return 0;
}

const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
	.name = "i915_gem_object_lmem",
	.flags = I915_GEM_OBJECT_HAS_IOMEM,

	.get_pages = i915_gem_object_get_pages_buddy,
	.put_pages = i915_gem_object_put_pages_buddy,
	.get_pages = lmem_get_pages,
	.put_pages = lmem_put_pages,
	.release = i915_gem_object_release_memory_region,
};

+4 −2
Original line number Diff line number Diff line
@@ -235,10 +235,12 @@ struct drm_i915_gem_object {
		 * Memory region for this object.
		 */
		struct intel_memory_region *region;

		/**
		 * List of memory region blocks allocated for this object.
		 * Memory manager node allocated for this object.
		 */
		struct list_head blocks;
		void *st_mm_node;

		/**
		 * Element within memory_region->objects or region->purgeable
		 * if the object is marked as DONTNEED. Access is protected by
+2 −1
Original line number Diff line number Diff line
@@ -475,7 +475,8 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
	if (!i915_gem_object_has_pinned_pages(obj))
		assert_object_held(obj);

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
Loading