Commit 0f341974 authored by Maarten Lankhorst's avatar Maarten Lankhorst
Browse files

drm/i915: Add i915_vma_unbind_unlocked, and take obj lock for i915_vma_unbind, v2.



We want to remove more members of i915_vma, which requires the locking to
be held more often.

Start requiring gem object lock for i915_vma_unbind, as it's one of the
callers that may unpin pages.

Some special care is needed when evicting, because the last reference to
the object may be held by the VMA, so after __i915_vma_unbind, vma may be
garbage, and we need to cache vma->obj before unlocking.

Changes since v1:
- Make trylock failing a WARN. (Matt)
- Remove double i915_vma_wait_for_bind() (Matt)
- Move atomic_set to right before mutex_unlock(), to make it more clear
  they belong together. (Matt)

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220114132320.109030-5-maarten.lankhorst@linux.intel.com
parent 7e00897b
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
		goto err;

	if (i915_vma_misplaced(vma, 0, alignment, 0)) {
		ret = i915_vma_unbind(vma);
		ret = i915_vma_unbind_unlocked(vma);
		if (ret) {
			vma = ERR_PTR(ret);
			goto err;
+1 −1
Original line number Diff line number Diff line
@@ -641,7 +641,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
		 * pages.
		 */
		for (offset = 4096; offset < page_size; offset += 4096) {
			err = i915_vma_unbind(vma);
			err = i915_vma_unbind_unlocked(vma);
			if (err)
				goto out_unpin;

+1 −1
Original line number Diff line number Diff line
@@ -318,7 +318,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
	int err;

	if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
		err = i915_vma_unbind(vma);
		err = i915_vma_unbind_unlocked(vma);
		if (err)
			return err;
	}
+6 −0
Original line number Diff line number Diff line
@@ -166,7 +166,9 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
	kunmap(p);

out:
	i915_gem_object_lock(obj, NULL);
	__i915_vma_put(vma);
	i915_gem_object_unlock(obj);
	return err;
}

@@ -261,7 +263,9 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
		if (err)
			return err;

		i915_gem_object_lock(obj, NULL);
		__i915_vma_put(vma);
		i915_gem_object_unlock(obj);

		if (igt_timeout(end_time,
				"%s: timed out after tiling=%d stride=%d\n",
@@ -1352,7 +1356,9 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
	 * for other objects. Ergo we have to revoke the previous mmap PTE
	 * access as it no longer points to the same object.
	 */
	i915_gem_object_lock(obj, NULL);
	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
	i915_gem_object_unlock(obj);
	if (err) {
		pr_err("Failed to unbind object!\n");
		goto out_unmap;
+42 −3
Original line number Diff line number Diff line
@@ -129,22 +129,51 @@ void i915_ggtt_suspend_vm(struct i915_address_space *vm)

	drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);

retry:
	i915_gem_drain_freed_objects(vm->i915);

	mutex_lock(&vm->mutex);

	/* Skip rewriting PTE on VMA unbind. */
	open = atomic_xchg(&vm->open, 0);

	list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
		struct drm_i915_gem_object *obj = vma->obj;

		GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
		i915_vma_wait_for_bind(vma);

		if (i915_vma_is_pinned(vma))
		if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
			continue;

		/* unlikely to race when GPU is idle, so no worry about slowpath.. */
		if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
			/*
			 * No dead objects should appear here, GPU should be
			 * completely idle, and userspace suspended
			 */
			i915_gem_object_get(obj);

			atomic_set(&vm->open, open);
			mutex_unlock(&vm->mutex);

			i915_gem_object_lock(obj, NULL);
			open = i915_vma_unbind(vma);
			i915_gem_object_unlock(obj);

			GEM_WARN_ON(open);

			i915_gem_object_put(obj);
			goto retry;
		}

		if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
			i915_vma_wait_for_bind(vma);

			__i915_vma_evict(vma, false);
			drm_mm_remove_node(&vma->node);
		}

		i915_gem_object_unlock(obj);
	}

	vm->clear_range(vm, 0, vm->total);
@@ -746,11 +775,21 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
	atomic_set(&ggtt->vm.open, 0);

	flush_workqueue(ggtt->vm.i915->wq);
	i915_gem_drain_freed_objects(ggtt->vm.i915);

	mutex_lock(&ggtt->vm.mutex);

	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
		struct drm_i915_gem_object *obj = vma->obj;
		bool trylock;

		trylock = i915_gem_object_trylock(obj, NULL);
		WARN_ON(!trylock);

		WARN_ON(__i915_vma_unbind(vma));
		if (trylock)
			i915_gem_object_unlock(obj);
	}

	if (drm_mm_node_allocated(&ggtt->error_capture))
		drm_mm_remove_node(&ggtt->error_capture);
Loading