Commit 2867ff6c authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter
Browse files

drm/i915: Strip out internal priorities



Since we are not using any internal priority levels, and in the next few
patches will introduce a new index for which the optimisation is not so
lear cut, discard the small table within the priolist.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarAndi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210120121439.17600-1-chris@chris-wilson.co.uk


Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 06debd6e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -125,7 +125,7 @@ static void heartbeat(struct work_struct *wrk)
			 * low latency and no jitter] the chance to naturally
			 * complete before being preempted.
			 */
			attr.priority = I915_PRIORITY_MASK;
			attr.priority = 0;
			if (rq->sched.attr.priority >= attr.priority)
				attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
			if (rq->sched.attr.priority >= attr.priority)
+5 −17
Original line number Diff line number Diff line
@@ -274,22 +274,13 @@ static int effective_prio(const struct i915_request *rq)

static int queue_prio(const struct intel_engine_execlists *execlists)
{
	struct i915_priolist *p;
	struct rb_node *rb;

	rb = rb_first_cached(&execlists->queue);
	if (!rb)
		return INT_MIN;

	/*
	 * As the priolist[] are inverted, with the highest priority in [0],
	 * we have to flip the index value to become priority.
	 */
	p = to_priolist(rb);
	if (!I915_USER_PRIORITY_SHIFT)
		return p->priority;

	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
	return to_priolist(rb)->priority;
}

static int virtual_prio(const struct intel_engine_execlists *el)
@@ -1452,9 +1443,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
	while ((rb = rb_first_cached(&execlists->queue))) {
		struct i915_priolist *p = to_priolist(rb);
		struct i915_request *rq, *rn;
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
		priolist_for_each_request_consume(rq, rn, p) {
			bool merge = true;

			/*
@@ -2968,9 +2958,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
	/* Flush the queued requests to the timeline list (for retiring). */
	while ((rb = rb_first_cached(&execlists->queue))) {
		struct i915_priolist *p = to_priolist(rb);
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
		priolist_for_each_request_consume(rq, rn, p) {
			i915_request_mark_eio(rq);
			__i915_request_submit(rq);
		}
@@ -3244,7 +3233,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)

static struct list_head *virtual_queue(struct virtual_engine *ve)
{
	return &ve->base.execlists.default_priolist.requests[0];
	return &ve->base.execlists.default_priolist.requests;
}

static void rcu_virtual_context_destroy(struct work_struct *wrk)
@@ -3840,9 +3829,8 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
	count = 0;
	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
		int i;

		priolist_for_each_request(rq, p, i) {
		priolist_for_each_request(rq, p) {
			if (count++ < max - 1)
				show_request(m, rq, "\t\t", 0);
			else
+0 −1
Original line number Diff line number Diff line
@@ -1081,7 +1081,6 @@ create_rewinder(struct intel_context *ce,

	intel_ring_advance(rq, cs);

	rq->sched.attr.priority = I915_PRIORITY_MASK;
	err = 0;
err:
	i915_request_get(rq);
+0 −1
Original line number Diff line number Diff line
@@ -733,7 +733,6 @@ create_timestamp(struct intel_context *ce, void *slot, int idx)

	intel_ring_advance(rq, cs);

	rq->sched.attr.priority = I915_PRIORITY_MASK;
	err = 0;
err:
	i915_request_get(rq);
+2 −4
Original line number Diff line number Diff line
@@ -206,9 +206,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
	while ((rb = rb_first_cached(&execlists->queue))) {
		struct i915_priolist *p = to_priolist(rb);
		struct i915_request *rq, *rn;
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
		priolist_for_each_request_consume(rq, rn, p) {
			if (last && rq->context != last->context) {
				if (port == last_port)
					goto done;
@@ -361,9 +360,8 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
	/* Flush the queued requests to the timeline list (for retiring). */
	while ((rb = rb_first_cached(&execlists->queue))) {
		struct i915_priolist *p = to_priolist(rb);
		int i;

		priolist_for_each_request_consume(rq, rn, p, i) {
		priolist_for_each_request_consume(rq, rn, p) {
			list_del_init(&rq->sched.link);
			__i915_request_submit(rq);
			dma_fence_set_error(&rq->fence, -EIO);
Loading