Commit 349a2bc5 authored by Matthew Brost's avatar Matthew Brost Committed by Matt Roper
Browse files

drm/i915: Move active tracking to i915_sched_engine



Move active request tracking and its lock to i915_sched_engine. This
lock is also the submission lock so having it in the i915_sched_engine
is the correct place.

v3:
 (Jason Ekstrand)
  Add kernel doc
v6:
  Rebase

Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.comk>
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-5-matthew.brost@intel.com
parent c4fd7d8c
Loading
Loading
Loading
Loading
+0 −2
Original line number Original line Diff line number Diff line
@@ -269,8 +269,6 @@ intel_engine_create_pinned_context(struct intel_engine_cs *engine,


void intel_engine_destroy_pinned_context(struct intel_context *ce);
void intel_engine_destroy_pinned_context(struct intel_context *ce);


void intel_engine_init_active(struct intel_engine_cs *engine,
			      unsigned int subclass);
#define ENGINE_PHYSICAL	0
#define ENGINE_PHYSICAL	0
#define ENGINE_MOCK	1
#define ENGINE_MOCK	1
#define ENGINE_VIRTUAL	2
#define ENGINE_VIRTUAL	2
+11 −32
Original line number Original line Diff line number Diff line
@@ -721,7 +721,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
	if (err)
	if (err)
		goto err_cmd_parser;
		goto err_cmd_parser;


	intel_engine_init_active(engine, ENGINE_PHYSICAL);
	intel_engine_init_execlists(engine);
	intel_engine_init_execlists(engine);
	intel_engine_init__pm(engine);
	intel_engine_init__pm(engine);
	intel_engine_init_retire(engine);
	intel_engine_init_retire(engine);
@@ -780,11 +779,11 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
	frame->rq.ring = &frame->ring;
	frame->rq.ring = &frame->ring;


	mutex_lock(&ce->timeline->mutex);
	mutex_lock(&ce->timeline->mutex);
	spin_lock_irq(&engine->active.lock);
	spin_lock_irq(&engine->sched_engine->lock);


	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
	dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;


	spin_unlock_irq(&engine->active.lock);
	spin_unlock_irq(&engine->sched_engine->lock);
	mutex_unlock(&ce->timeline->mutex);
	mutex_unlock(&ce->timeline->mutex);


	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
	GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
@@ -793,28 +792,6 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
	return dw;
	return dw;
}
}


void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
	INIT_LIST_HEAD(&engine->active.requests);
	INIT_LIST_HEAD(&engine->active.hold);

	spin_lock_init(&engine->active.lock);
	lockdep_set_subclass(&engine->active.lock, subclass);

	/*
	 * Due to an interesting quirk in lockdep's internal debug tracking,
	 * after setting a subclass we must ensure the lock is used. Otherwise,
	 * nr_unused_locks is incremented once too often.
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	local_irq_disable();
	lock_map_acquire(&engine->active.lock.dep_map);
	lock_map_release(&engine->active.lock.dep_map);
	local_irq_enable();
#endif
}

struct intel_context *
struct intel_context *
intel_engine_create_pinned_context(struct intel_engine_cs *engine,
intel_engine_create_pinned_context(struct intel_engine_cs *engine,
				   struct i915_address_space *vm,
				   struct i915_address_space *vm,
@@ -969,7 +946,7 @@ int intel_engines_init(struct intel_gt *gt)
 */
 */
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
{
	GEM_BUG_ON(!list_empty(&engine->active.requests));
	GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
	tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
	tasklet_kill(&engine->execlists.tasklet); /* flush the callback */


	i915_sched_engine_put(engine->sched_engine);
	i915_sched_engine_put(engine->sched_engine);
@@ -1325,7 +1302,7 @@ static struct intel_timeline *get_timeline(struct i915_request *rq)
	struct intel_timeline *tl;
	struct intel_timeline *tl;


	/*
	/*
	 * Even though we are holding the engine->active.lock here, there
	 * Even though we are holding the engine->sched_engine->lock here, there
	 * is no control over the submission queue per-se and we are
	 * is no control over the submission queue per-se and we are
	 * inspecting the active state at a random point in time, with an
	 * inspecting the active state at a random point in time, with an
	 * unknown queue. Play safe and make sure the timeline remains valid.
	 * unknown queue. Play safe and make sure the timeline remains valid.
@@ -1672,7 +1649,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,


	drm_printf(m, "\tRequests:\n");
	drm_printf(m, "\tRequests:\n");


	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&engine->sched_engine->lock, flags);
	rq = intel_engine_find_active_request(engine);
	rq = intel_engine_find_active_request(engine);
	if (rq) {
	if (rq) {
		struct intel_timeline *tl = get_timeline(rq);
		struct intel_timeline *tl = get_timeline(rq);
@@ -1703,8 +1680,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
			hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
		}
		}
	}
	}
	drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
	drm_printf(m, "\tOn hold?: %lu\n",
	spin_unlock_irqrestore(&engine->active.lock, flags);
		   list_count(&engine->sched_engine->hold));
	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);


	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
	drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
	wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
@@ -1784,7 +1762,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
	 * At all other times, we must assume the GPU is still running, but
	 * At all other times, we must assume the GPU is still running, but
	 * we only care about the snapshot of this moment.
	 * we only care about the snapshot of this moment.
	 */
	 */
	lockdep_assert_held(&engine->active.lock);
	lockdep_assert_held(&engine->sched_engine->lock);


	rcu_read_lock();
	rcu_read_lock();
	request = execlists_active(&engine->execlists);
	request = execlists_active(&engine->execlists);
@@ -1802,7 +1780,8 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
	if (active)
	if (active)
		return active;
		return active;


	list_for_each_entry(request, &engine->active.requests, sched.link) {
	list_for_each_entry(request, &engine->sched_engine->requests,
			    sched.link) {
		if (__i915_request_is_complete(request))
		if (__i915_request_is_complete(request))
			continue;
			continue;


+0 −6
Original line number Original line Diff line number Diff line
@@ -304,12 +304,6 @@ struct intel_engine_cs {


	struct intel_sseu sseu;
	struct intel_sseu sseu;


	struct {
		spinlock_t lock;
		struct list_head requests;
		struct list_head hold; /* ready requests, but on hold */
	} active;

	struct i915_sched_engine *sched_engine;
	struct i915_sched_engine *sched_engine;


	/* keep a request in reserve for a [pm] barrier under oom */
	/* keep a request in reserve for a [pm] barrier under oom */
+50 −48
Original line number Original line Diff line number Diff line
@@ -325,7 +325,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
	 * Check against the first request in ELSP[1], it will, thanks to the
	 * Check against the first request in ELSP[1], it will, thanks to the
	 * power of PI, be the highest priority of that context.
	 * power of PI, be the highest priority of that context.
	 */
	 */
	if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
	if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
		return true;
		return true;


@@ -367,10 +367,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
	struct list_head *pl;
	struct list_head *pl;
	int prio = I915_PRIORITY_INVALID;
	int prio = I915_PRIORITY_INVALID;


	lockdep_assert_held(&engine->active.lock);
	lockdep_assert_held(&engine->sched_engine->lock);


	list_for_each_entry_safe_reverse(rq, rn,
	list_for_each_entry_safe_reverse(rq, rn,
					 &engine->active.requests,
					 &engine->sched_engine->requests,
					 sched.link) {
					 sched.link) {
		if (__i915_request_is_complete(rq)) {
		if (__i915_request_is_complete(rq)) {
			list_del_init(&rq->sched.link);
			list_del_init(&rq->sched.link);
@@ -534,13 +534,13 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
{
{
	struct intel_engine_cs *engine = rq->engine;
	struct intel_engine_cs *engine = rq->engine;


	spin_lock_irq(&engine->active.lock);
	spin_lock_irq(&engine->sched_engine->lock);


	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
	WRITE_ONCE(rq->engine, &ve->base);
	WRITE_ONCE(rq->engine, &ve->base);
	ve->base.submit_request(rq);
	ve->base.submit_request(rq);


	spin_unlock_irq(&engine->active.lock);
	spin_unlock_irq(&engine->sched_engine->lock);
}
}


static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
@@ -579,7 +579,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
	unsigned int ccid;
	unsigned int ccid;


	/*
	/*
	 * NB process_csb() is not under the engine->active.lock and hence
	 * NB process_csb() is not under the engine->sched_engine->lock and hence
	 * schedule_out can race with schedule_in meaning that we should
	 * schedule_out can race with schedule_in meaning that we should
	 * refrain from doing non-trivial work here.
	 * refrain from doing non-trivial work here.
	 */
	 */
@@ -1133,7 +1133,8 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
		return false;
		return false;


	/* If ELSP[1] is occupied, always check to see if worth slicing */
	/* If ELSP[1] is occupied, always check to see if worth slicing */
	if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
	if (!list_is_last_rcu(&rq->sched.link,
			      &engine->sched_engine->requests)) {
		ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
		ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
		return true;
		return true;
	}
	}
@@ -1266,7 +1267,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
	 * and context switches) submission.
	 * and context switches) submission.
	 */
	 */


	spin_lock(&engine->active.lock);
	spin_lock(&sched_engine->lock);


	/*
	/*
	 * If the queue is higher priority than the last
	 * If the queue is higher priority than the last
@@ -1366,7 +1367,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
				 * Even if ELSP[1] is occupied and not worthy
				 * Even if ELSP[1] is occupied and not worthy
				 * of timeslices, our queue might be.
				 * of timeslices, our queue might be.
				 */
				 */
				spin_unlock(&engine->active.lock);
				spin_unlock(&sched_engine->lock);
				return;
				return;
			}
			}
		}
		}
@@ -1376,7 +1377,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
	while ((ve = first_virtual_engine(engine))) {
	while ((ve = first_virtual_engine(engine))) {
		struct i915_request *rq;
		struct i915_request *rq;


		spin_lock(&ve->base.active.lock);
		spin_lock(&ve->base.sched_engine->lock);


		rq = ve->request;
		rq = ve->request;
		if (unlikely(!virtual_matches(ve, rq, engine)))
		if (unlikely(!virtual_matches(ve, rq, engine)))
@@ -1386,13 +1387,13 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
		GEM_BUG_ON(rq->context != &ve->context);
		GEM_BUG_ON(rq->context != &ve->context);


		if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
		if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
			spin_unlock(&ve->base.active.lock);
			spin_unlock(&ve->base.sched_engine->lock);
			break;
			break;
		}
		}


		if (last && !can_merge_rq(last, rq)) {
		if (last && !can_merge_rq(last, rq)) {
			spin_unlock(&ve->base.active.lock);
			spin_unlock(&ve->base.sched_engine->lock);
			spin_unlock(&engine->active.lock);
			spin_unlock(&engine->sched_engine->lock);
			return; /* leave this for another sibling */
			return; /* leave this for another sibling */
		}
		}


@@ -1438,7 +1439,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)


		i915_request_put(rq);
		i915_request_put(rq);
unlock:
unlock:
		spin_unlock(&ve->base.active.lock);
		spin_unlock(&ve->base.sched_engine->lock);


		/*
		/*
		 * Hmm, we have a bunch of virtual engine requests,
		 * Hmm, we have a bunch of virtual engine requests,
@@ -1554,7 +1555,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
	 */
	 */
	sched_engine->queue_priority_hint = queue_prio(sched_engine);
	sched_engine->queue_priority_hint = queue_prio(sched_engine);
	i915_sched_engine_reset_on_empty(sched_engine);
	i915_sched_engine_reset_on_empty(sched_engine);
	spin_unlock(&engine->active.lock);
	spin_unlock(&sched_engine->lock);


	/*
	/*
	 * We can skip poking the HW if we ended up with exactly the same set
	 * We can skip poking the HW if we ended up with exactly the same set
@@ -1981,7 +1982,8 @@ static void __execlists_hold(struct i915_request *rq)
			__i915_request_unsubmit(rq);
			__i915_request_unsubmit(rq);


		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
		list_move_tail(&rq->sched.link, &rq->engine->active.hold);
		list_move_tail(&rq->sched.link,
			       &rq->engine->sched_engine->hold);
		i915_request_set_hold(rq);
		i915_request_set_hold(rq);
		RQ_TRACE(rq, "on hold\n");
		RQ_TRACE(rq, "on hold\n");


@@ -2018,7 +2020,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
	if (i915_request_on_hold(rq))
	if (i915_request_on_hold(rq))
		return false;
		return false;


	spin_lock_irq(&engine->active.lock);
	spin_lock_irq(&engine->sched_engine->lock);


	if (__i915_request_is_complete(rq)) { /* too late! */
	if (__i915_request_is_complete(rq)) { /* too late! */
		rq = NULL;
		rq = NULL;
@@ -2034,10 +2036,10 @@ static bool execlists_hold(struct intel_engine_cs *engine,
	GEM_BUG_ON(i915_request_on_hold(rq));
	GEM_BUG_ON(i915_request_on_hold(rq));
	GEM_BUG_ON(rq->engine != engine);
	GEM_BUG_ON(rq->engine != engine);
	__execlists_hold(rq);
	__execlists_hold(rq);
	GEM_BUG_ON(list_empty(&engine->active.hold));
	GEM_BUG_ON(list_empty(&engine->sched_engine->hold));


unlock:
unlock:
	spin_unlock_irq(&engine->active.lock);
	spin_unlock_irq(&engine->sched_engine->lock);
	return rq;
	return rq;
}
}


@@ -2117,7 +2119,7 @@ static void __execlists_unhold(struct i915_request *rq)
static void execlists_unhold(struct intel_engine_cs *engine,
static void execlists_unhold(struct intel_engine_cs *engine,
			     struct i915_request *rq)
			     struct i915_request *rq)
{
{
	spin_lock_irq(&engine->active.lock);
	spin_lock_irq(&engine->sched_engine->lock);


	/*
	/*
	 * Move this request back to the priority queue, and all of its
	 * Move this request back to the priority queue, and all of its
@@ -2130,7 +2132,7 @@ static void execlists_unhold(struct intel_engine_cs *engine,
		tasklet_hi_schedule(&engine->execlists.tasklet);
		tasklet_hi_schedule(&engine->execlists.tasklet);
	}
	}


	spin_unlock_irq(&engine->active.lock);
	spin_unlock_irq(&engine->sched_engine->lock);
}
}


struct execlists_capture {
struct execlists_capture {
@@ -2260,13 +2262,13 @@ static void execlists_capture(struct intel_engine_cs *engine)
	if (!cap)
	if (!cap)
		return;
		return;


	spin_lock_irq(&engine->active.lock);
	spin_lock_irq(&engine->sched_engine->lock);
	cap->rq = active_context(engine, active_ccid(engine));
	cap->rq = active_context(engine, active_ccid(engine));
	if (cap->rq) {
	if (cap->rq) {
		cap->rq = active_request(cap->rq->context->timeline, cap->rq);
		cap->rq = active_request(cap->rq->context->timeline, cap->rq);
		cap->rq = i915_request_get_rcu(cap->rq);
		cap->rq = i915_request_get_rcu(cap->rq);
	}
	}
	spin_unlock_irq(&engine->active.lock);
	spin_unlock_irq(&engine->sched_engine->lock);
	if (!cap->rq)
	if (!cap->rq)
		goto err_free;
		goto err_free;


@@ -2470,7 +2472,7 @@ static bool ancestor_on_hold(const struct intel_engine_cs *engine,
			     const struct i915_request *rq)
			     const struct i915_request *rq)
{
{
	GEM_BUG_ON(i915_request_on_hold(rq));
	GEM_BUG_ON(i915_request_on_hold(rq));
	return !list_empty(&engine->active.hold) && hold_request(rq);
	return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
}
}


static void execlists_submit_request(struct i915_request *request)
static void execlists_submit_request(struct i915_request *request)
@@ -2479,11 +2481,12 @@ static void execlists_submit_request(struct i915_request *request)
	unsigned long flags;
	unsigned long flags;


	/* Will be called from irq-context when using foreign fences. */
	/* Will be called from irq-context when using foreign fences. */
	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&engine->sched_engine->lock, flags);


	if (unlikely(ancestor_on_hold(engine, request))) {
	if (unlikely(ancestor_on_hold(engine, request))) {
		RQ_TRACE(request, "ancestor on hold\n");
		RQ_TRACE(request, "ancestor on hold\n");
		list_add_tail(&request->sched.link, &engine->active.hold);
		list_add_tail(&request->sched.link,
			      &engine->sched_engine->hold);
		i915_request_set_hold(request);
		i915_request_set_hold(request);
	} else {
	} else {
		queue_request(engine, request);
		queue_request(engine, request);
@@ -2495,7 +2498,7 @@ static void execlists_submit_request(struct i915_request *request)
			__execlists_kick(&engine->execlists);
			__execlists_kick(&engine->execlists);
	}
	}


	spin_unlock_irqrestore(&engine->active.lock, flags);
	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
}


static int
static int
@@ -2959,9 +2962,9 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)


	/* Push back any incomplete requests for replay after the reset. */
	/* Push back any incomplete requests for replay after the reset. */
	rcu_read_lock();
	rcu_read_lock();
	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&engine->sched_engine->lock, flags);
	__unwind_incomplete_requests(engine);
	__unwind_incomplete_requests(engine);
	spin_unlock_irqrestore(&engine->active.lock, flags);
	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
	rcu_read_unlock();
	rcu_read_unlock();
}
}


@@ -3001,10 +3004,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
	execlists_reset_csb(engine, true);
	execlists_reset_csb(engine, true);


	rcu_read_lock();
	rcu_read_lock();
	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&engine->sched_engine->lock, flags);


	/* Mark all executing requests as skipped. */
	/* Mark all executing requests as skipped. */
	list_for_each_entry(rq, &engine->active.requests, sched.link)
	list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
		i915_request_put(i915_request_mark_eio(rq));
		i915_request_put(i915_request_mark_eio(rq));
	intel_engine_signal_breadcrumbs(engine);
	intel_engine_signal_breadcrumbs(engine);


@@ -3024,7 +3027,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
	}
	}


	/* On-hold requests will be flushed to timeline upon their release */
	/* On-hold requests will be flushed to timeline upon their release */
	list_for_each_entry(rq, &engine->active.hold, sched.link)
	list_for_each_entry(rq, &sched_engine->hold, sched.link)
		i915_request_put(i915_request_mark_eio(rq));
		i915_request_put(i915_request_mark_eio(rq));


	/* Cancel all attached virtual engines */
	/* Cancel all attached virtual engines */
@@ -3035,7 +3038,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
		rb_erase_cached(rb, &execlists->virtual);
		rb_erase_cached(rb, &execlists->virtual);
		RB_CLEAR_NODE(rb);
		RB_CLEAR_NODE(rb);


		spin_lock(&ve->base.active.lock);
		spin_lock(&ve->base.sched_engine->lock);
		rq = fetch_and_zero(&ve->request);
		rq = fetch_and_zero(&ve->request);
		if (rq) {
		if (rq) {
			if (i915_request_mark_eio(rq)) {
			if (i915_request_mark_eio(rq)) {
@@ -3047,7 +3050,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)


			ve->base.sched_engine->queue_priority_hint = INT_MIN;
			ve->base.sched_engine->queue_priority_hint = INT_MIN;
		}
		}
		spin_unlock(&ve->base.active.lock);
		spin_unlock(&ve->base.sched_engine->lock);
	}
	}


	/* Remaining _unready_ requests will be nop'ed when submitted */
	/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -3058,7 +3061,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
	GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
	execlists->tasklet.callback = nop_submission_tasklet;
	execlists->tasklet.callback = nop_submission_tasklet;


	spin_unlock_irqrestore(&engine->active.lock, flags);
	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
	rcu_read_unlock();
	rcu_read_unlock();
}
}


@@ -3304,7 +3307,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
	if (unlikely(ve->request)) {
	if (unlikely(ve->request)) {
		struct i915_request *old;
		struct i915_request *old;


		spin_lock_irq(&ve->base.active.lock);
		spin_lock_irq(&ve->base.sched_engine->lock);


		old = fetch_and_zero(&ve->request);
		old = fetch_and_zero(&ve->request);
		if (old) {
		if (old) {
@@ -3313,7 +3316,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
			i915_request_put(old);
			i915_request_put(old);
		}
		}


		spin_unlock_irq(&ve->base.active.lock);
		spin_unlock_irq(&ve->base.sched_engine->lock);
	}
	}


	/*
	/*
@@ -3333,13 +3336,13 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
		if (RB_EMPTY_NODE(node))
		if (RB_EMPTY_NODE(node))
			continue;
			continue;


		spin_lock_irq(&sibling->active.lock);
		spin_lock_irq(&sibling->sched_engine->lock);


		/* Detachment is lazily performed in the execlists tasklet */
		/* Detachment is lazily performed in the execlists tasklet */
		if (!RB_EMPTY_NODE(node))
		if (!RB_EMPTY_NODE(node))
			rb_erase_cached(node, &sibling->execlists.virtual);
			rb_erase_cached(node, &sibling->execlists.virtual);


		spin_unlock_irq(&sibling->active.lock);
		spin_unlock_irq(&sibling->sched_engine->lock);
	}
	}
	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
	GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
@@ -3509,7 +3512,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
		if (!READ_ONCE(ve->request))
		if (!READ_ONCE(ve->request))
			break; /* already handled by a sibling's tasklet */
			break; /* already handled by a sibling's tasklet */


		spin_lock_irq(&sibling->active.lock);
		spin_lock_irq(&sibling->sched_engine->lock);


		if (unlikely(!(mask & sibling->mask))) {
		if (unlikely(!(mask & sibling->mask))) {
			if (!RB_EMPTY_NODE(&node->rb)) {
			if (!RB_EMPTY_NODE(&node->rb)) {
@@ -3562,7 +3565,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
			tasklet_hi_schedule(&sibling->execlists.tasklet);
			tasklet_hi_schedule(&sibling->execlists.tasklet);


unlock_engine:
unlock_engine:
		spin_unlock_irq(&sibling->active.lock);
		spin_unlock_irq(&sibling->sched_engine->lock);


		if (intel_context_inflight(&ve->context))
		if (intel_context_inflight(&ve->context))
			break;
			break;
@@ -3580,7 +3583,7 @@ static void virtual_submit_request(struct i915_request *rq)


	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
	GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);


	spin_lock_irqsave(&ve->base.active.lock, flags);
	spin_lock_irqsave(&ve->base.sched_engine->lock, flags);


	/* By the time we resubmit a request, it may be completed */
	/* By the time we resubmit a request, it may be completed */
	if (__i915_request_is_complete(rq)) {
	if (__i915_request_is_complete(rq)) {
@@ -3603,7 +3606,7 @@ static void virtual_submit_request(struct i915_request *rq)
	tasklet_hi_schedule(&ve->base.execlists.tasklet);
	tasklet_hi_schedule(&ve->base.execlists.tasklet);


unlock:
unlock:
	spin_unlock_irqrestore(&ve->base.active.lock, flags);
	spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
}
}


static struct ve_bond *
static struct ve_bond *
@@ -3687,7 +3690,6 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,


	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
	snprintf(ve->base.name, sizeof(ve->base.name), "virtual");


	intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
	intel_engine_init_execlists(&ve->base);
	intel_engine_init_execlists(&ve->base);


	ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
	ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
@@ -3860,17 +3862,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
				   unsigned int max)
				   unsigned int max)
{
{
	const struct intel_engine_execlists *execlists = &engine->execlists;
	const struct intel_engine_execlists *execlists = &engine->execlists;
	const struct i915_sched_engine *sched_engine = engine->sched_engine;
	struct i915_sched_engine *sched_engine = engine->sched_engine;
	struct i915_request *rq, *last;
	struct i915_request *rq, *last;
	unsigned long flags;
	unsigned long flags;
	unsigned int count;
	unsigned int count;
	struct rb_node *rb;
	struct rb_node *rb;


	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&sched_engine->lock, flags);


	last = NULL;
	last = NULL;
	count = 0;
	count = 0;
	list_for_each_entry(rq, &engine->active.requests, sched.link) {
	list_for_each_entry(rq, &sched_engine->requests, sched.link) {
		if (count++ < max - 1)
		if (count++ < max - 1)
			show_request(m, rq, "\t\t", 0);
			show_request(m, rq, "\t\t", 0);
		else
		else
@@ -3933,7 +3935,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
		show_request(m, last, "\t\t", 0);
		show_request(m, last, "\t\t", 0);
	}
	}


	spin_unlock_irqrestore(&engine->active.lock, flags);
	spin_unlock_irqrestore(&sched_engine->lock, flags);
}
}


#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+6 −6
Original line number Original line Diff line number Diff line
@@ -339,9 +339,9 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
	u32 head;
	u32 head;


	rq = NULL;
	rq = NULL;
	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&engine->sched_engine->lock, flags);
	rcu_read_lock();
	rcu_read_lock();
	list_for_each_entry(pos, &engine->active.requests, sched.link) {
	list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
		if (!__i915_request_is_complete(pos)) {
		if (!__i915_request_is_complete(pos)) {
			rq = pos;
			rq = pos;
			break;
			break;
@@ -396,7 +396,7 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
	}
	}
	engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
	engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);


	spin_unlock_irqrestore(&engine->active.lock, flags);
	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
}


static void reset_finish(struct intel_engine_cs *engine)
static void reset_finish(struct intel_engine_cs *engine)
@@ -408,16 +408,16 @@ static void reset_cancel(struct intel_engine_cs *engine)
	struct i915_request *request;
	struct i915_request *request;
	unsigned long flags;
	unsigned long flags;


	spin_lock_irqsave(&engine->active.lock, flags);
	spin_lock_irqsave(&engine->sched_engine->lock, flags);


	/* Mark all submitted requests as skipped. */
	/* Mark all submitted requests as skipped. */
	list_for_each_entry(request, &engine->active.requests, sched.link)
	list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
		i915_request_put(i915_request_mark_eio(request));
		i915_request_put(i915_request_mark_eio(request));
	intel_engine_signal_breadcrumbs(engine);
	intel_engine_signal_breadcrumbs(engine);


	/* Remaining _unready_ requests will be nop'ed when submitted */
	/* Remaining _unready_ requests will be nop'ed when submitted */


	spin_unlock_irqrestore(&engine->active.lock, flags);
	spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
}
}


static void i9xx_submit_request(struct i915_request *request)
static void i9xx_submit_request(struct i915_request *request)
Loading