Commit 38d5ec43 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison
Browse files

drm/i915/guc: Ensure request ordering via completion fences



If two requests are on the same ring, they are explicitly ordered by the
HW. So, a submission fence is sufficient to ensure ordering when using
the new GuC submission interface. Conversely, if two requests share a
timeline and are on the same physical engine but different context this
doesn't ensure ordering on the new GuC submission interface. So, a
completion fence needs to be used to ensure ordering.

v2:
 (Daniele)
  - Don't delete spin lock
v3:
 (Daniele)
  - Delete forward dec

Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-13-matthew.brost@intel.com
parent e6cb8dc9
Loading
Loading
Loading
Loading
+8 −2
Original line number Original line Diff line number Diff line
@@ -432,6 +432,7 @@ void i915_request_retire_upto(struct i915_request *rq)


	do {
	do {
		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
		GEM_BUG_ON(!i915_request_completed(tmp));
	} while (i915_request_retire(tmp) && tmp != rq);
	} while (i915_request_retire(tmp) && tmp != rq);
}
}


@@ -1463,7 +1464,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
			return ret;
			return ret;
	}
	}


	if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
	if (!intel_engine_uses_guc(to->engine) &&
	    is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
		ret = await_request_submit(to, from);
		ret = await_request_submit(to, from);
	else
	else
		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
		ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
@@ -1622,6 +1624,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
	prev = to_request(__i915_active_fence_set(&timeline->last_request,
	prev = to_request(__i915_active_fence_set(&timeline->last_request,
						  &rq->fence));
						  &rq->fence));
	if (prev && !__i915_request_is_complete(prev)) {
	if (prev && !__i915_request_is_complete(prev)) {
		bool uses_guc = intel_engine_uses_guc(rq->engine);

		/*
		/*
		 * The requests are supposed to be kept in order. However,
		 * The requests are supposed to be kept in order. However,
		 * we need to be wary in case the timeline->last_request
		 * we need to be wary in case the timeline->last_request
@@ -1632,7 +1636,9 @@ __i915_request_add_to_timeline(struct i915_request *rq)
			   i915_seqno_passed(prev->fence.seqno,
			   i915_seqno_passed(prev->fence.seqno,
					     rq->fence.seqno));
					     rq->fence.seqno));


		if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
		if ((!uses_guc &&
		     is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
		    (uses_guc && prev->context == rq->context))
			i915_sw_fence_await_sw_fence(&rq->submit,
			i915_sw_fence_await_sw_fence(&rq->submit,
						     &prev->submit,
						     &prev->submit,
						     &rq->submitq);
						     &rq->submitq);