Commit bfd616ff authored by Thomas Zimmermann's avatar Thomas Zimmermann
Browse files

Merge tag 'tags/topic/i915-ttm-2021-06-11' into drm-misc-next

drm-misc and drm-intel pull request for topic/i915-ttm:
- Convert i915 lmem handling to ttm.
- Add a patch to temporarily add a driver_private member to vma_node.
- Use this to allow mixed object mmap handling for i915.
parents 00f4471e cf3e3e86
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -1148,15 +1148,6 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
		return -EACCES;
	}

	if (node->readonly) {
		if (vma->vm_flags & VM_WRITE) {
			drm_gem_object_put(obj);
			return -EINVAL;
		}

		vma->vm_flags &= ~VM_MAYWRITE;
	}

	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
			       vma);

+1 −0
Original line number Diff line number Diff line
@@ -155,6 +155,7 @@ gem-y += \
	gem/i915_gem_stolen.o \
	gem/i915_gem_throttle.o \
	gem/i915_gem_tiling.o \
	gem/i915_gem_ttm.o \
	gem/i915_gem_userptr.o \
	gem/i915_gem_wait.o \
	gem/i915_gemfs.o
+1 −1
Original line number Diff line number Diff line
@@ -11771,7 +11771,7 @@ intel_user_framebuffer_create(struct drm_device *dev,

	/* object is backed with LMEM for discrete */
	i915 = to_i915(obj->base.dev);
	if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
	if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) {
		/* object is "remote", not in local memory */
		i915_gem_object_put(obj);
		return ERR_PTR(-EREMOTE);
+3 −6
Original line number Diff line number Diff line
@@ -85,13 +85,10 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
		return -E2BIG;

	/*
	 * For now resort to CPU based clearing for device local-memory, in the
	 * near future this will use the blitter engine for accelerated, GPU
	 * based clearing.
	 * I915_BO_ALLOC_USER will make sure the object is cleared before
	 * any user access.
	 */
	flags = 0;
	if (mr->type == INTEL_MEMORY_LOCAL)
		flags = I915_BO_ALLOC_CPU_CLEAR;
	flags = I915_BO_ALLOC_USER;

	ret = mr->ops->init_object(mr, obj, size, flags);
	if (ret)
+41 −85
Original line number Diff line number Diff line
@@ -4,74 +4,10 @@
 */

#include "intel_memory_region.h"
#include "intel_region_ttm.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"

static void lmem_put_pages(struct drm_i915_gem_object *obj,
			   struct sg_table *pages)
{
	intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
	obj->mm.dirty = false;
	sg_free_table(pages);
	kfree(pages);
}

static int lmem_get_pages(struct drm_i915_gem_object *obj)
{
	unsigned int flags;
	struct sg_table *pages;

	flags = I915_ALLOC_MIN_PAGE_SIZE;
	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
		flags |= I915_ALLOC_CONTIGUOUS;

	obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
							 obj->base.size,
							 flags);
	if (IS_ERR(obj->mm.st_mm_node))
		return PTR_ERR(obj->mm.st_mm_node);

	/* Range manager is always contigous */
	if (obj->mm.region->is_range_manager)
		obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
	pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
	if (IS_ERR(pages)) {
		intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
		return PTR_ERR(pages);
	}

	__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));

	if (obj->flags & I915_BO_ALLOC_CPU_CLEAR) {
		void __iomem *vaddr =
			i915_gem_object_lmem_io_map(obj, 0, obj->base.size);

		if (!vaddr) {
			struct sg_table *pages =
				__i915_gem_object_unset_pages(obj);

			if (!IS_ERR_OR_NULL(pages))
				lmem_put_pages(obj, pages);
		}

		memset_io(vaddr, 0, obj->base.size);
		io_mapping_unmap(vaddr);
	}

	return 0;
}

const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
	.name = "i915_gem_object_lmem",
	.flags = I915_GEM_OBJECT_HAS_IOMEM,

	.get_pages = lmem_get_pages,
	.put_pages = lmem_put_pages,
	.release = i915_gem_object_release_memory_region,
};

void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
			    unsigned long n,
@@ -87,10 +23,50 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
}

/**
 * i915_gem_object_validates_to_lmem - Whether the object is resident in
 * lmem when pages are present.
 * @obj: The object to check.
 *
 * Migratable objects residency may change from under us if the object is
 * not pinned or locked. This function is intended to be used to check whether
 * the object can only reside in lmem when pages are present.
 *
 * Return: Whether the object is always resident in lmem when pages are
 * present.
 */
bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj)
{
	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);

	return !i915_gem_object_migratable(obj) &&
		mr && (mr->type == INTEL_MEMORY_LOCAL ||
		       mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}

/**
 * i915_gem_object_is_lmem - Whether the object is resident in
 * lmem
 * @obj: The object to check.
 *
 * Even if an object is allowed to migrate and change memory region,
 * this function checks whether it will always be present in lmem when
 * valid *or* if that's not the case, whether it's currently resident in lmem.
 * For migratable and evictable objects, the latter only makes sense when
 * the object is locked.
 *
 * Return: Whether the object migratable but resident in lmem, or not
 * migratable and will be present in lmem when valid.
 */
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
	struct intel_memory_region *mr = obj->mm.region;
	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);

#ifdef CONFIG_LOCKDEP
	if (i915_gem_object_migratable(obj) &&
	    i915_gem_object_evictable(obj))
		assert_object_held(obj);
#endif
	return mr && (mr->type == INTEL_MEMORY_LOCAL ||
		      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
}
@@ -103,23 +79,3 @@ i915_gem_object_create_lmem(struct drm_i915_private *i915,
	return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
					     size, flags);
}

int __i915_gem_lmem_object_init(struct intel_memory_region *mem,
				struct drm_i915_gem_object *obj,
				resource_size_t size,
				unsigned int flags)
{
	static struct lock_class_key lock_class;
	struct drm_i915_private *i915 = mem->i915;

	drm_gem_private_object_init(&i915->drm, &obj->base, size);
	i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class, flags);

	obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;

	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);

	i915_gem_object_init_memory_region(obj, mem);

	return 0;
}
Loading