Commit a0696856 authored by Nirmoy Das's avatar Nirmoy Das Committed by Ramalingam C
Browse files

drm/i915: Fix a lockdep warning at error capture

For some platfroms we use stop_machine version of
gen8_ggtt_insert_page/gen8_ggtt_insert_entries to avoid a
concurrent GGTT access bug but this causes a circular locking
dependency warning:

  Possible unsafe locking scenario:
        CPU0                    CPU1
        ----                    ----
   lock(&ggtt->error_mutex);
                                lock(dma_fence_map);
                                lock(&ggtt->error_mutex);
   lock(cpu_hotplug_lock);

Fix this by calling gen8_ggtt_insert_page/gen8_ggtt_insert_entries
directly at error capture which is concurrent GGTT access safe because
reset path make sure of that.

v2: Fix rebase conflict and added a comment.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5595


Reviewed-by: default avatarGwan-gyeong Mun <gwan-gyeong.mun@intel.com>
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Signed-off-by: default avatarRamalingam C <ramalingam.c@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220624110821.29190-1-nirmoy.das@intel.com
parent 58eaa6b3
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -960,6 +960,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
	if (intel_vm_no_concurrent_access_wa(i915)) {
		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;

		/*
		 * Calling stop_machine() version of GGTT update function
		 * at error capture/reset path will raise lockdep warning.
		 * Allow calling gen8_ggtt_insert_* directly at reset path
		 * which is safe from parallel GGTT updates.
		 */
		ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
		ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;

		ggtt->vm.bind_async_flags =
			I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
	}
+9 −0
Original line number Diff line number Diff line
@@ -306,6 +306,15 @@ struct i915_address_space {
			       struct i915_vma_resource *vma_res,
			       enum i915_cache_level cache_level,
			       u32 flags);
	void (*raw_insert_page)(struct i915_address_space *vm,
				dma_addr_t addr,
				u64 offset,
				enum i915_cache_level cache_level,
				u32 flags);
	void (*raw_insert_entries)(struct i915_address_space *vm,
				   struct i915_vma_resource *vma_res,
				   enum i915_cache_level cache_level,
				   u32 flags);
	void (*cleanup)(struct i915_address_space *vm);

	void (*foreach)(struct i915_address_space *vm,
+4 −1
Original line number Diff line number Diff line
@@ -493,6 +493,9 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
	if (i915_gem_object_is_lmem(obj))
		pte_flags |= PTE_LM;

	if (ggtt->vm.raw_insert_entries)
		ggtt->vm.raw_insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
	else
		ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
}

+6 −2
Original line number Diff line number Diff line
@@ -1104,6 +1104,10 @@ i915_vma_coredump_create(const struct intel_gt *gt,

		for_each_sgt_daddr(dma, iter, vma_res->bi.pages) {
			mutex_lock(&ggtt->error_mutex);
			if (ggtt->vm.raw_insert_page)
				ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
							 I915_CACHE_NONE, 0);
			else
				ggtt->vm.insert_page(&ggtt->vm, dma, slot,
						     I915_CACHE_NONE, 0);
			mb();