Commit 7e00897b authored by Maarten Lankhorst's avatar Maarten Lankhorst
Browse files

drm/i915: Add object locking to i915_gem_evict_for_node and i915_gem_evict_something, v2.



Because we will start to require the obj->resv lock for unbinding,
ensure these vma eviction utility functions also take the lock.

This requires some function signature changes, to ensure that the
ww context is passed around, but is mostly straightforward.

Previously this was split up into several patches, but reworking
should allow for easier bisection.

Changes since v1:
- Handle evicting dead objects better.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-4-maarten.lankhorst@linux.intel.com
parent 6945c53b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -506,7 +506,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
	GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
	size = ggtt->vm.total - GUC_GGTT_TOP;

	ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
	ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
				   GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
				   PIN_NOEVICT);
	if (ret)
+1 −1
Original line number Diff line number Diff line
@@ -1382,7 +1382,7 @@ static int evict_vma(void *data)
	complete(&arg->completion);

	mutex_lock(&vm->mutex);
	err = i915_gem_evict_for_node(vm, &evict, 0);
	err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
	mutex_unlock(&vm->mutex);

	return err;
+1 −1
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)

	mutex_lock(&gt->ggtt->vm.mutex);
	mmio_hw_access_pre(gt);
	ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
	ret = i915_gem_gtt_insert(&gt->ggtt->vm, NULL, node,
				  size, I915_GTT_PAGE_SIZE,
				  I915_COLOR_UNEVICTABLE,
				  start, end, flags);
+2 −0
Original line number Diff line number Diff line
@@ -1735,11 +1735,13 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)

/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
					  struct i915_gem_ww_ctx *ww,
					  u64 min_size, u64 alignment,
					  unsigned long color,
					  u64 start, u64 end,
					  unsigned flags);
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
					 struct i915_gem_ww_ctx *ww,
					 struct drm_mm_node *node,
					 unsigned int flags);
int i915_gem_evict_vm(struct i915_address_space *vm,
+62 −7
Original line number Diff line number Diff line
@@ -37,6 +37,11 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
	bool fail_if_busy:1;
} igt_evict_ctl;)

static bool dying_vma(struct i915_vma *vma)
{
	return !kref_read(&vma->obj->base.refcount);
}

static int ggtt_flush(struct intel_gt *gt)
{
	/*
@@ -49,8 +54,37 @@ static int ggtt_flush(struct intel_gt *gt)
	return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
}

static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
{
	/*
	 * We add the extra refcount so the object doesn't drop to zero until
	 * after ungrab_vma(), this way trylock is always paired with unlock.
	 */
	if (i915_gem_object_get_rcu(vma->obj)) {
		if (!i915_gem_object_trylock(vma->obj, ww)) {
			i915_gem_object_put(vma->obj);
			return false;
		}
	} else {
		/* Dead objects don't need pins */
		atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
	}

	return true;
}

static void ungrab_vma(struct i915_vma *vma)
{
	if (dying_vma(vma))
		return;

	i915_gem_object_unlock(vma->obj);
	i915_gem_object_put(vma->obj);
}

static bool
mark_free(struct drm_mm_scan *scan,
	  struct i915_gem_ww_ctx *ww,
	  struct i915_vma *vma,
	  unsigned int flags,
	  struct list_head *unwind)
@@ -58,6 +92,9 @@ mark_free(struct drm_mm_scan *scan,
	if (i915_vma_is_pinned(vma))
		return false;

	if (!grab_vma(vma, ww))
		return false;

	list_add(&vma->evict_link, unwind);
	return drm_mm_scan_add_block(scan, &vma->node);
}
@@ -76,6 +113,7 @@ static bool defer_evict(struct i915_vma *vma)
/**
 * i915_gem_evict_something - Evict vmas to make room for binding a new one
 * @vm: address space to evict from
 * @ww: An optional struct i915_gem_ww_ctx.
 * @min_size: size of the desired free space
 * @alignment: alignment constraint of the desired free space
 * @color: color for the desired space
@@ -98,6 +136,7 @@ static bool defer_evict(struct i915_vma *vma)
 */
int
i915_gem_evict_something(struct i915_address_space *vm,
			 struct i915_gem_ww_ctx *ww,
			 u64 min_size, u64 alignment,
			 unsigned long color,
			 u64 start, u64 end,
@@ -170,7 +209,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
			continue;
		}

		if (mark_free(&scan, vma, flags, &eviction_list))
		if (mark_free(&scan, ww, vma, flags, &eviction_list))
			goto found;
	}

@@ -178,6 +217,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
		ret = drm_mm_scan_remove_block(&scan, &vma->node);
		BUG_ON(ret);
		ungrab_vma(vma);
	}

	/*
@@ -222,10 +262,12 @@ i915_gem_evict_something(struct i915_address_space *vm,
	 * of any of our objects, thus corrupting the list).
	 */
	list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
		if (drm_mm_scan_remove_block(&scan, &vma->node))
		if (drm_mm_scan_remove_block(&scan, &vma->node)) {
			__i915_vma_pin(vma);
		else
		} else {
			list_del(&vma->evict_link);
			ungrab_vma(vma);
		}
	}

	/* Unbinding will emit any required flushes */
@@ -234,16 +276,20 @@ i915_gem_evict_something(struct i915_address_space *vm,
		__i915_vma_unpin(vma);
		if (ret == 0)
			ret = __i915_vma_unbind(vma);
		ungrab_vma(vma);
	}

	while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
		vma = container_of(node, struct i915_vma, node);

		/* If we find any non-objects (!vma), we cannot evict them */
		if (vma->node.color != I915_COLOR_UNEVICTABLE)
		if (vma->node.color != I915_COLOR_UNEVICTABLE &&
		    grab_vma(vma, ww)) {
			ret = __i915_vma_unbind(vma);
		else
			ret = -ENOSPC; /* XXX search failed, try again? */
			ungrab_vma(vma);
		} else {
			ret = -ENOSPC;
		}
	}

	return ret;
@@ -252,6 +298,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
/**
 * i915_gem_evict_for_node - Evict vmas to make room for binding a new one
 * @vm: address space to evict from
 * @ww: An optional struct i915_gem_ww_ctx.
 * @target: range (and color) to evict for
 * @flags: additional flags to control the eviction algorithm
 *
@@ -261,6 +308,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
 * memory in e.g. the shrinker.
 */
int i915_gem_evict_for_node(struct i915_address_space *vm,
			    struct i915_gem_ww_ctx *ww,
			    struct drm_mm_node *target,
			    unsigned int flags)
{
@@ -333,6 +381,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
			break;
		}

		if (!grab_vma(vma, ww)) {
			ret = -ENOSPC;
			break;
		}

		/*
		 * Never show fear in the face of dragons!
		 *
@@ -350,6 +403,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
		__i915_vma_unpin(vma);
		if (ret == 0)
			ret = __i915_vma_unbind(vma);

		ungrab_vma(vma);
	}

	return ret;
@@ -401,7 +456,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
			 * the resv is shared among multiple objects, we still
			 * need the object ref.
			 */
			if (!kref_read(&vma->obj->base.refcount) ||
			if (dying_vma(vma) ||
			    (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
				__i915_vma_pin(vma);
				list_add(&vma->evict_link, &locked_eviction_list);
Loading