Commit 58b779d1 authored by Rodrigo Vivi's avatar Rodrigo Vivi
Browse files

Merge tag 'gvt-next-fixes-2021-02-22' of https://github.com/intel/gvt-linux...

Merge tag 'gvt-next-fixes-2021-02-22' of https://github.com/intel/gvt-linux

 into drm-intel-next-fixes

gvt-next-fixes-2021-02-22

- Fix to use i915 default state for cmd parser on all engines (Chris)
- Purge dev_priv->gt (Chris)
- Fix gvt object ww locking (Zhi)

Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210222083402.GD1551@zhen-hp.sh.intel.com
parents 81ce8f04 67f11203
Loading
Loading
Loading
Loading
+20 −73
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@
#include "gt/intel_lrc.h"
#include "gt/intel_ring.h"
#include "gt/intel_gt_requests.h"
#include "gt/shmem_utils.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
@@ -3094,71 +3095,28 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 */
void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
{
	const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
	struct intel_gvt *gvt = vgpu->gvt;
	struct drm_i915_private *dev_priv = gvt->gt->i915;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
	struct i915_request *rq;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct i915_request *requests[I915_NUM_ENGINES] = {};
	bool is_ctx_pinned[I915_NUM_ENGINES] = {};
	int ret = 0;

	if (gvt->is_reg_whitelist_updated)
		return;

	for_each_engine(engine, &dev_priv->gt, id) {
		ret = intel_context_pin(s->shadow[id]);
		if (ret) {
			gvt_vgpu_err("fail to pin shadow ctx\n");
			goto out;
		}
		is_ctx_pinned[id] = true;

		rq = i915_request_create(s->shadow[id]);
		if (IS_ERR(rq)) {
			gvt_vgpu_err("fail to alloc default request\n");
			ret = -EIO;
			goto out;
		}
		requests[id] = i915_request_get(rq);
		i915_request_add(rq);
	}

	if (intel_gt_wait_for_idle(&dev_priv->gt,
				I915_GEM_IDLE_TIMEOUT) == -ETIME) {
		ret = -EIO;
		goto out;
	}

	/* scan init ctx to update cmd accessible list */
	for_each_engine(engine, &dev_priv->gt, id) {
		int size = engine->context_size - PAGE_SIZE;
		void *vaddr;
	for_each_engine(engine, gvt->gt, id) {
		struct parser_exec_state s;
		struct drm_i915_gem_object *obj;
		struct i915_request *rq;

		rq = requests[id];
		GEM_BUG_ON(!i915_request_completed(rq));
		GEM_BUG_ON(!intel_context_is_pinned(rq->context));
		obj = rq->context->state->obj;

		if (!obj) {
			ret = -EIO;
			goto out;
		}
		void *vaddr;
		int ret;

		i915_gem_object_set_cache_coherency(obj,
						    I915_CACHE_LLC);
		if (!engine->default_state)
			continue;

		vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
		vaddr = shmem_pin_map(engine->default_state);
		if (IS_ERR(vaddr)) {
			gvt_err("failed to pin init ctx obj, ring=%d, err=%lx\n",
				id, PTR_ERR(vaddr));
			ret = PTR_ERR(vaddr);
			goto out;
			gvt_err("failed to map %s->default state, err:%zd\n",
				engine->name, PTR_ERR(vaddr));
			return;
		}

		s.buf_type = RING_BUFFER_CTX;
@@ -3166,9 +3124,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
		s.vgpu = vgpu;
		s.engine = engine;
		s.ring_start = 0;
		s.ring_size = size;
		s.ring_size = engine->context_size - start;
		s.ring_head = 0;
		s.ring_tail = size;
		s.ring_tail = s.ring_size;
		s.rb_va = vaddr + start;
		s.workload = NULL;
		s.is_ctx_wa = false;
@@ -3176,29 +3134,18 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)

		/* skipping the first RING_CTX_SIZE(0x50) dwords */
		ret = ip_gma_set(&s, RING_CTX_SIZE);
		if (ret) {
			i915_gem_object_unpin_map(obj);
			goto out;
		}

		ret = command_scan(&s, 0, size, 0, size);
		if (ret == 0) {
			ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
			if (ret)
				gvt_err("Scan init ctx error\n");
		}

		i915_gem_object_unpin_map(obj);
		shmem_unpin_map(engine->default_state, vaddr);
		if (ret)
			return;
	}

out:
	if (!ret)
	gvt->is_reg_whitelist_updated = true;

	for (id = 0; id < I915_NUM_ENGINES ; id++) {
		if (requests[id])
			i915_request_put(requests[id]);

		if (is_ctx_pinned[id])
			intel_context_unpin(s->shadow[id]);
	}
}

int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
+3 −5
Original line number Diff line number Diff line
@@ -522,12 +522,11 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu,
static void clean_execlist(struct intel_vgpu *vgpu,
			   intel_engine_mask_t engine_mask)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
	struct intel_engine_cs *engine;
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct intel_engine_cs *engine;
	intel_engine_mask_t tmp;

	for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
	for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
		kfree(s->ring_scan_buffer[engine->id]);
		s->ring_scan_buffer[engine->id] = NULL;
		s->ring_scan_buffer_size[engine->id] = 0;
@@ -537,11 +536,10 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
			   intel_engine_mask_t engine_mask)
{
	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
	struct intel_engine_cs *engine;
	intel_engine_mask_t tmp;

	for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
	for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp)
		init_vgpu_execlist(vgpu, engine);
}

+41 −11
Original line number Diff line number Diff line
@@ -412,7 +412,9 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	if (!wa_ctx->indirect_ctx.obj)
		return;

	i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
	i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
	i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
	i915_gem_object_put(wa_ctx->indirect_ctx.obj);

	wa_ctx->indirect_ctx.obj = NULL;
@@ -520,6 +522,7 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
	struct intel_gvt *gvt = workload->vgpu->gvt;
	const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
	struct intel_vgpu_shadow_bb *bb;
	struct i915_gem_ww_ctx ww;
	int ret;

	list_for_each_entry(bb, &workload->shadow_bb, list) {
@@ -544,10 +547,19 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
		 * directly
		 */
		if (!bb->ppgtt) {
			bb->vma = i915_gem_object_ggtt_pin(bb->obj,
			i915_gem_ww_ctx_init(&ww, false);
retry:
			i915_gem_object_lock(bb->obj, &ww);

			bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
							      NULL, 0, 0, 0);
			if (IS_ERR(bb->vma)) {
				ret = PTR_ERR(bb->vma);
				if (ret == -EDEADLK) {
					ret = i915_gem_ww_ctx_backoff(&ww);
					if (!ret)
						goto retry;
				}
				goto err;
			}

@@ -561,13 +573,15 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
						      0);
			if (ret)
				goto err;
		}

			/* No one is going to touch shadow bb from now on. */
			i915_gem_object_flush_map(bb->obj);
			i915_gem_object_unlock(bb->obj);
		}
	}
	return 0;
err:
	i915_gem_ww_ctx_fini(&ww);
	release_shadow_batch_buffer(workload);
	return ret;
}
@@ -594,14 +608,29 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
	unsigned char *per_ctx_va =
		(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
		wa_ctx->indirect_ctx.size;
	struct i915_gem_ww_ctx ww;
	int ret;

	if (wa_ctx->indirect_ctx.size == 0)
		return 0;

	vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
	i915_gem_ww_ctx_init(&ww, false);
retry:
	i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);

	vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
					  0, CACHELINE_BYTES, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		if (ret == -EDEADLK) {
			ret = i915_gem_ww_ctx_backoff(&ww);
			if (!ret)
				goto retry;
		}
		return ret;
	}

	i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);

	/* FIXME: we are not tracking our pinned VMA leaving it
	 * up to the core to fix up the stray pin_count upon
@@ -635,12 +664,14 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)

	list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
		if (bb->obj) {
			i915_gem_object_lock(bb->obj, NULL);
			if (bb->va && !IS_ERR(bb->va))
				i915_gem_object_unpin_map(bb->obj);

			if (bb->vma && !IS_ERR(bb->vma))
				i915_vma_unpin(bb->vma);

			i915_gem_object_unlock(bb->obj);
			i915_gem_object_put(bb->obj);
		}
		list_del(&bb->list);
@@ -1015,13 +1046,12 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
				intel_engine_mask_t engine_mask)
{
	struct intel_vgpu_submission *s = &vgpu->submission;
	struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
	struct intel_engine_cs *engine;
	struct intel_vgpu_workload *pos, *n;
	intel_engine_mask_t tmp;

	/* free the unsubmited workloads in the queues. */
	for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
	for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
		list_for_each_entry_safe(pos, n,
			&s->workload_q_head[engine->id], list) {
			list_del_init(&pos->list);