Commit 77dcbffb authored by John Harrison's avatar John Harrison
Browse files

drm/i915/guc: Rename desc_idx to ctx_id



The LRC descriptor pool is going away. So, stop naming context ids as
descriptor pool indecies.

While at it, add a bunch of missing line feeds to some error messages.

Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220302003357.4188363-7-John.C.Harrison@Intel.com
parent 8e2e9c43
Loading
Loading
Loading
Loading
+26 −26
Original line number Diff line number Diff line
@@ -2232,7 +2232,7 @@ static void prepare_context_registration_info(struct intel_context *ce)
{
	struct intel_engine_cs *engine = ce->engine;
	struct intel_guc *guc = &engine->gt->uc.guc;
	u32 desc_idx = ce->guc_id.id;
	u32 ctx_id = ce->guc_id.id;
	struct guc_lrc_desc *desc;
	struct intel_context *child;

@@ -2245,7 +2245,7 @@ static void prepare_context_registration_info(struct intel_context *ce)
	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
		   i915_gem_object_is_lmem(ce->ring->vma->obj));

	desc = __get_lrc_desc(guc, desc_idx);
	desc = __get_lrc_desc(guc, ctx_id);
	desc->engine_class = engine_class_to_guc_class(engine->class);
	desc->engine_submit_mask = engine->logical_mask;
	desc->hw_context_desc = ce->lrc.lrca;
@@ -2297,16 +2297,16 @@ static int try_context_registration(struct intel_context *ce, bool loop)
	struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
	struct intel_guc *guc = &engine->gt->uc.guc;
	intel_wakeref_t wakeref;
	u32 desc_idx = ce->guc_id.id;
	u32 ctx_id = ce->guc_id.id;
	bool context_registered;
	int ret = 0;

	GEM_BUG_ON(!sched_state_is_init(ce));

	context_registered = ctx_id_mapped(guc, desc_idx);
	context_registered = ctx_id_mapped(guc, ctx_id);

	clr_ctx_id_mapping(guc, desc_idx);
	set_ctx_id_mapping(guc, desc_idx, ce);
	clr_ctx_id_mapping(guc, ctx_id);
	set_ctx_id_mapping(guc, ctx_id, ce);

	/*
	 * The context_lookup xarray is used to determine if the hardware
@@ -2332,7 +2332,7 @@ static int try_context_registration(struct intel_context *ce, bool loop)
		}
		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
		if (unlikely(disabled)) {
			clr_ctx_id_mapping(guc, desc_idx);
			clr_ctx_id_mapping(guc, ctx_id);
			return 0;	/* Will get registered later */
		}

@@ -2348,9 +2348,9 @@ static int try_context_registration(struct intel_context *ce, bool loop)
		with_intel_runtime_pm(runtime_pm, wakeref)
			ret = register_context(ce, loop);
		if (unlikely(ret == -EBUSY)) {
			clr_ctx_id_mapping(guc, desc_idx);
			clr_ctx_id_mapping(guc, ctx_id);
		} else if (unlikely(ret == -ENODEV)) {
			clr_ctx_id_mapping(guc, desc_idx);
			clr_ctx_id_mapping(guc, ctx_id);
			ret = 0;	/* Will get registered later */
		}
	}
@@ -3864,26 +3864,26 @@ void intel_guc_submission_init_early(struct intel_guc *guc)
}

static inline struct intel_context *
g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
{
	struct intel_context *ce;

	if (unlikely(desc_idx >= GUC_MAX_CONTEXT_ID)) {
	if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
		drm_err(&guc_to_gt(guc)->i915->drm,
			"Invalid desc_idx %u", desc_idx);
			"Invalid ctx_id %u\n", ctx_id);
		return NULL;
	}

	ce = __get_context(guc, desc_idx);
	ce = __get_context(guc, ctx_id);
	if (unlikely(!ce)) {
		drm_err(&guc_to_gt(guc)->i915->drm,
			"Context is NULL, desc_idx %u", desc_idx);
			"Context is NULL, ctx_id %u\n", ctx_id);
		return NULL;
	}

	if (unlikely(intel_context_is_child(ce))) {
		drm_err(&guc_to_gt(guc)->i915->drm,
			"Context is child, desc_idx %u", desc_idx);
			"Context is child, ctx_id %u\n", ctx_id);
		return NULL;
	}

@@ -3895,14 +3895,14 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
					  u32 len)
{
	struct intel_context *ce;
	u32 desc_idx = msg[0];
	u32 ctx_id = msg[0];

	if (unlikely(len < 1)) {
		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
		return -EPROTO;
	}

	ce = g2h_context_lookup(guc, desc_idx);
	ce = g2h_context_lookup(guc, ctx_id);
	if (unlikely(!ce))
		return -EPROTO;

@@ -3946,14 +3946,14 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
{
	struct intel_context *ce;
	unsigned long flags;
	u32 desc_idx = msg[0];
	u32 ctx_id = msg[0];

	if (unlikely(len < 2)) {
		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
		return -EPROTO;
	}

	ce = g2h_context_lookup(guc, desc_idx);
	ce = g2h_context_lookup(guc, ctx_id);
	if (unlikely(!ce))
		return -EPROTO;

@@ -3961,8 +3961,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
		     (!context_pending_enable(ce) &&
		     !context_pending_disable(ce)))) {
		drm_err(&guc_to_gt(guc)->i915->drm,
			"Bad context sched_state 0x%x, desc_idx %u",
			ce->guc_state.sched_state, desc_idx);
			"Bad context sched_state 0x%x, ctx_id %u\n",
			ce->guc_state.sched_state, ctx_id);
		return -EPROTO;
	}

@@ -4060,14 +4060,14 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
{
	struct intel_context *ce;
	unsigned long flags;
	int desc_idx;
	int ctx_id;

	if (unlikely(len != 1)) {
		drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
		return -EPROTO;
	}

	desc_idx = msg[0];
	ctx_id = msg[0];

	/*
	 * The context lookup uses the xarray but lookups only require an RCU lock
@@ -4076,7 +4076,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
	 * asynchronously until the reset is done.
	 */
	xa_lock_irqsave(&guc->context_lookup, flags);
	ce = g2h_context_lookup(guc, desc_idx);
	ce = g2h_context_lookup(guc, ctx_id);
	if (ce)
		intel_context_get(ce);
	xa_unlock_irqrestore(&guc->context_lookup, flags);