Commit 1cacd689 authored by Anshuman Gupta's avatar Anshuman Gupta
Browse files

drm/i915/dgfx: Grab wakeref at i915_ttm_unmap_virtual



We had already grabbed the rpm wakeref at obj destruction path,
but it also required to grab the wakeref when object moves.
When i915_gem_object_release_mmap_offset() gets called by
i915_ttm_move_notify(), it will release the mmap offset without
grabbing the wakeref. We want to avoid that therefore,
grab the wakeref at i915_ttm_unmap_virtual() accordingly.

While doing that also changed the lmem_userfault_lock from
mutex to spinlock, as spinlock widely used for list.

Also changed if (obj->userfault_count) to
GEM_BUG_ON(!obj->userfault_count).

v2:
- Removed lmem_userfault_{list,lock} from intel_gt. [Matt Auld]

Fixes: ad74457a ("drm/i915/dgfx: Release mmap on rpm suspend")
Suggested-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarAnshuman Gupta <anshuman.gupta@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221027092242.1476080-3-anshuman.gupta@intel.com
parent e66c8dcf
Loading
Loading
Loading
Loading
+7 −12
Original line number Diff line number Diff line
@@ -557,12 +557,14 @@ void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *

	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);

	if (obj->userfault_count) {
		/* rpm wakeref provide exclusive access */
	/*
	 * We have exclusive access here via runtime suspend. All other callers
	 * must first grab the rpm wakeref.
	 */
	GEM_BUG_ON(!obj->userfault_count);
	list_del(&obj->userfault_link);
	obj->userfault_count = 0;
}
}

void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
@@ -587,13 +589,6 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
		spin_lock(&obj->mmo.lock);
	}
	spin_unlock(&obj->mmo.lock);

	if (obj->userfault_count) {
		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		list_del(&obj->userfault_link);
		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		obj->userfault_count = 0;
	}
}

static struct i915_mmap_offset *
+26 −12
Original line number Diff line number Diff line
@@ -509,18 +509,9 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
	struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
	intel_wakeref_t wakeref = 0;

	if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
		/* ttm_bo_release() already has dma_resv_lock */
		if (i915_ttm_cpu_maps_iomem(bo->resource))
			wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);

		__i915_gem_object_pages_fini(obj);

		if (wakeref)
			intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);

		i915_ttm_free_cached_io_rsgt(obj);
	}
}
@@ -1098,12 +1089,15 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		goto out_rpm;

	/* ttm_bo_vm_reserve() already has dma_resv_lock */
	/*
	 * ttm_bo_vm_reserve() already has dma_resv_lock.
	 * userfault_count is protected by dma_resv lock and rpm wakeref.
	 */
	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
		obj->userfault_count = 1;
		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
	}

	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
@@ -1169,7 +1163,27 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)

static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
{
	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
	intel_wakeref_t wakeref = 0;

	assert_object_held_shared(obj);

	if (i915_ttm_cpu_maps_iomem(bo->resource)) {
		wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);

		/* userfault_count is protected by obj lock and rpm wakeref. */
		if (obj->userfault_count) {
			spin_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
			list_del(&obj->userfault_link);
			spin_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
			obj->userfault_count = 0;
		}
	}

	ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));

	if (wakeref)
		intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
}

static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
+1 −1
Original line number Diff line number Diff line
@@ -648,6 +648,6 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)

	init_intel_runtime_pm_wakeref(rpm);
	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
	mutex_init(&rpm->lmem_userfault_lock);
	spin_lock_init(&rpm->lmem_userfault_lock);
	intel_wakeref_auto_init(&rpm->userfault_wakeref, rpm);
}
+1 −1
Original line number Diff line number Diff line
@@ -64,7 +64,7 @@ struct intel_runtime_pm {
	 *  but instead has exclusive access by virtue of all other accesses requiring
	 *  holding the runtime pm wakeref.
	 */
	struct mutex lmem_userfault_lock;
	spinlock_t lmem_userfault_lock;

	/*
	 *  Keep list of userfaulted gem obj, which require to release their