Commit b827c84f authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/etnaviv: Use scheduler dependency handling



We need to pull the drm_sched_job_init much earlier, but that's very
minor surgery.

v2: Actually fix up cleanup paths by calling drm_sched_job_init, which
I wanted to to in the previous round (and did, for all other drivers).
Spotted by Lucas.

v3: Rebase over renamed functions to add dependencies.

v4: Rebase over patches from Christian.

v5: More rebasing over work from Christian.

Acked-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: etnaviv@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Link: https://patchwork.freedesktop.org/patch/msgid/20220331204651.2699107-2-daniel.vetter@ffwll.ch
parent ae059c0b
Loading
Loading
Loading
Loading
+1 −3
Original line number Diff line number Diff line
@@ -80,8 +80,6 @@ struct etnaviv_gem_submit_bo {
	u64 va;
	struct etnaviv_gem_object *obj;
	struct etnaviv_vram_mapping *mapping;
	unsigned int nr_fences;
	struct dma_fence **fences;
};

/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@@ -94,7 +92,7 @@ struct etnaviv_gem_submit {
	struct etnaviv_file_private *ctx;
	struct etnaviv_gpu *gpu;
	struct etnaviv_iommu_context *mmu_context, *prev_mmu_context;
	struct dma_fence *out_fence, *in_fence;
	struct dma_fence *out_fence;
	int out_fence_id;
	struct list_head node; /* GPU active submit list */
	struct etnaviv_cmdbuf cmdbuf;
+31 −20
Original line number Diff line number Diff line
@@ -188,9 +188,9 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
			continue;

		ret = dma_resv_get_fences(robj,
					  bo->flags & ETNA_SUBMIT_BO_WRITE,
					  &bo->nr_fences, &bo->fences);
		ret = drm_sched_job_add_implicit_dependencies(&submit->sched_job,
							      &bo->obj->base,
							      bo->flags & ETNA_SUBMIT_BO_WRITE);
		if (ret)
			return ret;
	}
@@ -398,8 +398,6 @@ static void submit_cleanup(struct kref *kref)

	wake_up_all(&submit->gpu->fence_event);

	if (submit->in_fence)
		dma_fence_put(submit->in_fence);
	if (submit->out_fence) {
		/* first remove from IDR, so fence can not be found anymore */
		mutex_lock(&submit->gpu->fence_lock);
@@ -530,58 +528,69 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
	ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &submit->cmdbuf,
				  ALIGN(args->stream_size, 8) + 8);
	if (ret)
		goto err_submit_objects;
		goto err_submit_put;

	submit->ctx = file->driver_priv;
	submit->mmu_context = etnaviv_iommu_context_get(submit->ctx->mmu);
	submit->exec_state = args->exec_state;
	submit->flags = args->flags;

	ret = drm_sched_job_init(&submit->sched_job,
				 &ctx->sched_entity[args->pipe],
				 submit->ctx);
	if (ret)
		goto err_submit_put;

	ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	if ((priv->mmu_global->version != ETNAVIV_IOMMU_V2) &&
	    !etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
				      relocs, args->nr_relocs)) {
		ret = -EINVAL;
		goto err_submit_objects;
		goto err_submit_job;
	}

	if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
		submit->in_fence = sync_file_get_fence(args->fence_fd);
		if (!submit->in_fence) {
		struct dma_fence *in_fence = sync_file_get_fence(args->fence_fd);
		if (!in_fence) {
			ret = -EINVAL;
			goto err_submit_objects;
			goto err_submit_job;
		}

		ret = drm_sched_job_add_dependency(&submit->sched_job,
						   in_fence);
		if (ret)
			goto err_submit_job;
	}

	ret = submit_pin_objects(submit);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	ret = submit_reloc(submit, stream, args->stream_size / 4,
			   relocs, args->nr_relocs);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	ret = submit_perfmon_validate(submit, args->exec_state, pmrs);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);

	ret = submit_lock_objects(submit, &ticket);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	ret = submit_fence_sync(submit);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
	ret = etnaviv_sched_push_job(submit);
	if (ret)
		goto err_submit_objects;
		goto err_submit_job;

	submit_attach_object_fences(submit);

@@ -595,7 +604,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
		sync_file = sync_file_create(submit->out_fence);
		if (!sync_file) {
			ret = -ENOMEM;
			goto err_submit_objects;
			goto err_submit_job;
		}
		fd_install(out_fence_fd, sync_file->file);
	}
@@ -603,7 +612,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
	args->fence_fd = out_fence_fd;
	args->fence = submit->out_fence_id;

err_submit_objects:
err_submit_job:
	drm_sched_job_cleanup(&submit->sched_job);
err_submit_put:
	etnaviv_submit_put(submit);

err_submit_ww_acquire:
+2 −51
Original line number Diff line number Diff line
@@ -17,48 +17,6 @@ module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
static int etnaviv_hw_jobs_limit = 4;
module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);

static struct dma_fence *
etnaviv_sched_dependency(struct drm_sched_job *sched_job,
			 struct drm_sched_entity *entity)
{
	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
	struct dma_fence *fence;
	int i;

	if (unlikely(submit->in_fence)) {
		fence = submit->in_fence;
		submit->in_fence = NULL;

		if (!dma_fence_is_signaled(fence))
			return fence;

		dma_fence_put(fence);
	}

	for (i = 0; i < submit->nr_bos; i++) {
		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
		int j;

		for (j = 0; j < bo->nr_fences; j++) {
			if (!bo->fences[j])
				continue;

			fence = bo->fences[j];
			bo->fences[j] = NULL;

			if (!dma_fence_is_signaled(fence))
				return fence;

			dma_fence_put(fence);
		}
		kfree(bo->fences);
		bo->nr_fences = 0;
		bo->fences = NULL;
	}

	return NULL;
}

static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
{
	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
@@ -132,29 +90,22 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
}

static const struct drm_sched_backend_ops etnaviv_sched_ops = {
	.dependency = etnaviv_sched_dependency,
	.run_job = etnaviv_sched_run_job,
	.timedout_job = etnaviv_sched_timedout_job,
	.free_job = etnaviv_sched_free_job,
};

int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
			   struct etnaviv_gem_submit *submit)
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
	int ret = 0;

	/*
	 * Hold the fence lock across the whole operation to avoid jobs being
	 * pushed out of order with regard to their sched fence seqnos as
	 * allocated in drm_sched_job_init.
	 * allocated in drm_sched_job_arm.
	 */
	mutex_lock(&submit->gpu->fence_lock);

	ret = drm_sched_job_init(&submit->sched_job, sched_entity,
				 submit->ctx);
	if (ret)
		goto out_unlock;

	drm_sched_job_arm(&submit->sched_job);

	submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
+1 −2
Original line number Diff line number Diff line
@@ -18,7 +18,6 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)

int etnaviv_sched_init(struct etnaviv_gpu *gpu);
void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
			   struct etnaviv_gem_submit *submit);
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit);

#endif /* __ETNAVIV_SCHED_H__ */