Commit e0717063 authored by Matthew Brost's avatar Matthew Brost Committed by John Harrison
Browse files

drm/i915/guc: Defer context unpin until scheduling is disabled



With GuC scheduling, it isn't safe to unpin a context while scheduling
is enabled for that context as the GuC may touch some of the pinned
state (e.g. LRC). To ensure scheduling isn't enabled when an unpin is
done, a call back is added to intel_context_unpin when pin count == 1
to disable scheduling for that context. When the response CTB is
received it is safe to do the final unpin.

Future patches may add a heuristic / delay to schedule the disable
call back to avoid thrashing on schedule enable / disable.

v2:
 (John H)
  - s/drm_dbg/drm_err
 (Daneiel)
  - Clean up sched state function

Cc: John Harrison <john.c.harrison@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210721215101.139794-9-matthew.brost@intel.com
parent b208f2d5
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -306,9 +306,9 @@ int __intel_context_do_pin(struct intel_context *ce)
	return err;
}

void intel_context_unpin(struct intel_context *ce)
void __intel_context_do_unpin(struct intel_context *ce, int sub)
{
	if (!atomic_dec_and_test(&ce->pin_count))
	if (!atomic_sub_and_test(sub, &ce->pin_count))
		return;

	CE_TRACE(ce, "unpin\n");
+26 −1
Original line number Diff line number Diff line
@@ -113,7 +113,32 @@ static inline void __intel_context_pin(struct intel_context *ce)
	atomic_inc(&ce->pin_count);
}

void intel_context_unpin(struct intel_context *ce);
void __intel_context_do_unpin(struct intel_context *ce, int sub);

static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
{
	__intel_context_do_unpin(ce, 2);
}

static inline void intel_context_unpin(struct intel_context *ce)
{
	if (!ce->ops->sched_disable) {
		__intel_context_do_unpin(ce, 1);
	} else {
		/*
		 * Move ownership of this pin to the scheduling disable which is
		 * an async operation. When that operation completes the above
		 * intel_context_sched_disable_unpin is called potentially
		 * unpinning the context.
		 */
		while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
			if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
				ce->ops->sched_disable(ce);
				break;
			}
		}
	}
}

void intel_context_enter_engine(struct intel_context *ce);
void intel_context_exit_engine(struct intel_context *ce);
+2 −0
Original line number Diff line number Diff line
@@ -43,6 +43,8 @@ struct intel_context_ops {
	void (*enter)(struct intel_context *ce);
	void (*exit)(struct intel_context *ce);

	void (*sched_disable)(struct intel_context *ce);

	void (*reset)(struct intel_context *ce);
	void (*destroy)(struct kref *kref);
};
+2 −0
Original line number Diff line number Diff line
@@ -248,6 +248,8 @@ int intel_guc_reset_engine(struct intel_guc *guc,

int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
					  const u32 *msg, u32 len);
int intel_guc_sched_done_process_msg(struct intel_guc *guc,
				     const u32 *msg, u32 len);

void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);

+3 −0
Original line number Diff line number Diff line
@@ -932,6 +932,9 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r
		ret = intel_guc_deregister_done_process_msg(guc, payload,
							    len);
		break;
	case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
		ret = intel_guc_sched_done_process_msg(guc, payload, len);
		break;
	default:
		ret = -EOPNOTSUPP;
		break;
Loading