Commit 1d8a5ca4 authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm: Conversion to drm scheduler

For existing adrenos, there is one or more ringbuffer, depending on
whether preemption is supported.  When preemption is supported, each
ringbuffer has it's own priority.  A submitqueue (which maps to a
gl context or vk queue in userspace) is mapped to a specific ring-
buffer at creation time, based on the submitqueue's priority.

Each ringbuffer has it's own drm_gpu_scheduler.  Each submitqueue
maps to a drm_sched_entity.  And each submit maps to a drm_sched_job.

Closes: https://gitlab.freedesktop.org/drm/msm/-/issues/4


Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20210728010632.2633470-10-robdclark@gmail.com


Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 79341eb7
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@ config DRM_MSM
	select REGULATOR
	select DRM_KMS_HELPER
	select DRM_PANEL
	select DRM_SCHED
	select SHMEM
	select TMPFS
	select QCOM_SCM if ARCH_QCOM
+0 −35
Original line number Diff line number Diff line
@@ -804,41 +804,6 @@ void msm_gem_vunmap(struct drm_gem_object *obj)
	msm_obj->vaddr = NULL;
}

/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
{
	struct dma_resv_list *fobj;
	struct dma_fence *fence;
	int i, ret;

	fobj = dma_resv_shared_list(obj->resv);
	if (!fobj || (fobj->shared_count == 0)) {
		fence = dma_resv_excl_fence(obj->resv);
		/* don't need to wait on our own fences, since ring is fifo */
		if (fence && (fence->context != fctx->context)) {
			ret = dma_fence_wait(fence, true);
			if (ret)
				return ret;
		}
	}

	if (!exclusive || !fobj)
		return 0;

	for (i = 0; i < fobj->shared_count; i++) {
		fence = rcu_dereference_protected(fobj->shared[i],
						dma_resv_held(obj->resv));
		if (fence->context != fctx->context) {
			ret = dma_fence_wait(fence, true);
			if (ret)
				return ret;
		}
	}

	return 0;
}

void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+23 −3
Original line number Diff line number Diff line
@@ -9,6 +9,7 @@

#include <linux/kref.h>
#include <linux/dma-resv.h>
#include "drm/gpu_scheduler.h"
#include "msm_drv.h"

/* Make all GEM related WARN_ON()s ratelimited.. when things go wrong they
@@ -143,8 +144,6 @@ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive);
void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
void msm_gem_active_put(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
@@ -311,6 +310,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj);
 * make it easier to unwind when things go wrong, etc).
 */
struct msm_gem_submit {
	struct drm_sched_job base;
	struct kref ref;
	struct drm_device *dev;
	struct msm_gpu *gpu;
@@ -319,7 +319,22 @@ struct msm_gem_submit {
	struct list_head bo_list;
	struct ww_acquire_ctx ticket;
	uint32_t seqno;		/* Sequence number of the submit on the ring */
	struct dma_fence *fence;

	/* Array of struct dma_fence * to block on before submitting this job.
	 */
	struct xarray deps;
	unsigned long last_dep;

	/* Hw fence, which is created when the scheduler executes the job, and
	 * is signaled when the hw finishes (via seqno write from cmdstream)
	 */
	struct dma_fence *hw_fence;

	/* Userspace visible fence, which is signaled by the scheduler after
	 * the hw_fence is signaled.
	 */
	struct dma_fence *user_fence;

	int fence_id;       /* key into queue->fence_idr */
	struct msm_gpu_submitqueue *queue;
	struct pid *pid;    /* submitting process */
@@ -350,6 +365,11 @@ struct msm_gem_submit {
	} bos[];
};

static inline struct msm_gem_submit *to_msm_submit(struct drm_sched_job *job)
{
	return container_of(job, struct msm_gem_submit, base);
}

void __msm_gem_submit_destroy(struct kref *kref);

static inline void msm_gem_submit_get(struct msm_gem_submit *submit)
+79 −82
Original line number Diff line number Diff line
@@ -33,6 +33,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
{
	struct msm_gem_submit *submit;
	uint64_t sz;
	int ret;

	sz = struct_size(submit, bos, nr_bos) +
			((u64)nr_cmds * sizeof(submit->cmd[0]));
@@ -44,6 +45,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
	if (!submit)
		return ERR_PTR(-ENOMEM);

	ret = drm_sched_job_init(&submit->base, &queue->entity, queue);
	if (ret) {
		kfree(submit);
		return ERR_PTR(ret);
	}

	xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);

	kref_init(&submit->ref);
	submit->dev = dev;
	submit->aspace = queue->ctx->aspace;
@@ -63,6 +72,8 @@ void __msm_gem_submit_destroy(struct kref *kref)
{
	struct msm_gem_submit *submit =
			container_of(kref, struct msm_gem_submit, ref);
	unsigned long index;
	struct dma_fence *fence;
	unsigned i;

	if (submit->fence_id) {
@@ -71,7 +82,14 @@ void __msm_gem_submit_destroy(struct kref *kref)
		mutex_unlock(&submit->queue->lock);
	}

	dma_fence_put(submit->fence);
	xa_for_each (&submit->deps, index, fence) {
		dma_fence_put(fence);
	}

	xa_destroy(&submit->deps);

	dma_fence_put(submit->user_fence);
	dma_fence_put(submit->hw_fence);

	put_pid(submit->pid);
	msm_submitqueue_put(submit->queue);
@@ -307,7 +325,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
	int i, ret = 0;

	for (i = 0; i < submit->nr_bos; i++) {
		struct msm_gem_object *msm_obj = submit->bos[i].obj;
		struct drm_gem_object *obj = &submit->bos[i].obj->base;
		bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;

		if (!write) {
@@ -316,8 +334,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
			 * strange place to call it.  OTOH this is a
			 * convenient can-fail point to hook it in.
			 */
			ret = dma_resv_reserve_shared(msm_obj->base.resv,
								1);
			ret = dma_resv_reserve_shared(obj->resv, 1);
			if (ret)
				return ret;
		}
@@ -325,7 +342,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
		if (no_implicit)
			continue;

		ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
		ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
			write);
		if (ret)
			break;
@@ -376,9 +393,9 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
		struct drm_gem_object *obj = &submit->bos[i].obj->base;

		if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
			dma_resv_add_excl_fence(obj->resv, submit->fence);
			dma_resv_add_excl_fence(obj->resv, submit->user_fence);
		else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
			dma_resv_add_shared_fence(obj->resv, submit->fence);
			dma_resv_add_shared_fence(obj->resv, submit->user_fence);
	}
}

@@ -517,7 +534,7 @@ struct msm_submit_post_dep {
	struct dma_fence_chain *chain;
};

static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
                                           struct drm_file *file,
                                           uint64_t in_syncobjs_addr,
                                           uint32_t nr_in_syncobjs,
@@ -546,7 +563,7 @@ static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
		}

		if (syncobj_desc.point &&
		    !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
		    !drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
			ret = -EOPNOTSUPP;
			break;
		}
@@ -561,10 +578,7 @@ static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
		if (ret)
			break;

		if (!dma_fence_match_context(fence, ring->fctx->context))
			ret = dma_fence_wait(fence, true);

		dma_fence_put(fence);
		ret = drm_gem_fence_array_add(&submit->deps, fence);
		if (ret)
			break;

@@ -741,34 +755,55 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
	trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
		args->nr_bos, args->nr_cmds);

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		goto out_post_unlock;

	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
			ret = out_fence_fd;
			goto out_unlock;
		}
	}

	submit = submit_create(dev, gpu, queue, args->nr_bos,
		args->nr_cmds);
	if (IS_ERR(submit)) {
		ret = PTR_ERR(submit);
		goto out_unlock;
	}

	submit->pid = pid;
	submit->ident = submitid;

	if (args->flags & MSM_SUBMIT_SUDO)
		submit->in_rb = true;

	if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
		struct dma_fence *in_fence;

		in_fence = sync_file_get_fence(args->fence_fd);

		if (!in_fence)
			return -EINVAL;

		/*
		 * Wait if the fence is from a foreign context, or if the fence
		 * array contains any fence from a foreign context.
		 */
		ret = 0;
		if (!dma_fence_match_context(in_fence, ring->fctx->context))
			ret = dma_fence_wait(in_fence, true);
		if (!in_fence) {
			ret = -EINVAL;
			goto out_unlock;
		}

		dma_fence_put(in_fence);
		ret = drm_gem_fence_array_add(&submit->deps, in_fence);
		if (ret)
			return ret;
			goto out_unlock;
	}

	if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
		syncobjs_to_reset = msm_wait_deps(dev, file,
		syncobjs_to_reset = msm_parse_deps(submit, file,
		                                   args->in_syncobjs,
		                                   args->nr_in_syncobjs,
		                                   args->syncobj_stride, ring);
		if (IS_ERR(syncobjs_to_reset))
			return PTR_ERR(syncobjs_to_reset);
		if (IS_ERR(syncobjs_to_reset)) {
			ret = PTR_ERR(syncobjs_to_reset);
			goto out_unlock;
		}
	}

	if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
@@ -778,51 +813,17 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
		                                args->syncobj_stride);
		if (IS_ERR(post_deps)) {
			ret = PTR_ERR(post_deps);
			goto out_post_unlock;
		}
	}

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		goto out_post_unlock;

	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
		if (out_fence_fd < 0) {
			ret = out_fence_fd;
			goto out_unlock;
		}
	}

	submit = submit_create(dev, gpu, queue, args->nr_bos,
		args->nr_cmds);
	if (IS_ERR(submit)) {
		ret = PTR_ERR(submit);
		goto out_unlock;
	}

	submit->pid = pid;
	submit->ident = submitid;

	if (args->flags & MSM_SUBMIT_SUDO)
		submit->in_rb = true;

	ret = submit_lookup_objects(submit, args, file);
	if (ret)
		goto out_pre_pm;
		goto out;

	ret = submit_lookup_cmds(submit, args, file);
	if (ret)
		goto out_pre_pm;

	/*
	 * Thanks to dev_pm_opp opp_table_lock interactions with mm->mmap_sem
	 * in the resume path, we need to to rpm get before we lock objs.
	 * Which unfortunately might involve powering up the GPU sooner than
	 * is necessary.  But at least in the explicit fencing case, we will
	 * have already done all the fence waiting.
	 */
	pm_runtime_get_sync(&gpu->pdev->dev);
		goto out;

	/* copy_*_user while holding a ww ticket upsets lockdep */
	ww_acquire_init(&submit->ticket, &reservation_ww_class);
@@ -869,12 +870,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,

	submit->nr_cmds = i;

	submit->fence = msm_fence_alloc(ring->fctx);
	if (IS_ERR(submit->fence)) {
		ret = PTR_ERR(submit->fence);
		submit->fence = NULL;
		goto out;
	}
	submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);

	/*
	 * Allocate an id which can be used by WAIT_FENCE ioctl to map back
@@ -882,7 +878,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
	 */
	mutex_lock(&queue->lock);
	submit->fence_id = idr_alloc_cyclic(&queue->fence_idr,
			submit->fence, 0, INT_MAX, GFP_KERNEL);
			submit->user_fence, 0, INT_MAX, GFP_KERNEL);
	mutex_unlock(&queue->lock);
	if (submit->fence_id < 0) {
		ret = submit->fence_id = 0;
@@ -891,7 +887,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
	}

	if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
		struct sync_file *sync_file = sync_file_create(submit->fence);
		struct sync_file *sync_file = sync_file_create(submit->user_fence);
		if (!sync_file) {
			ret = -ENOMEM;
			goto out;
@@ -902,18 +898,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,

	submit_attach_object_fences(submit);

	msm_gpu_submit(gpu, submit);
	/* The scheduler owns a ref now: */
	msm_gem_submit_get(submit);

	drm_sched_entity_push_job(&submit->base, &queue->entity);

	args->fence = submit->fence_id;

	msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
	msm_process_post_deps(post_deps, args->nr_out_syncobjs,
	                      submit->fence);
	                      submit->user_fence);


out:
	pm_runtime_put(&gpu->pdev->dev);
out_pre_pm:
	submit_cleanup(submit, !!ret);
	if (has_ww_ticket)
		ww_acquire_fini(&submit->ticket);
+3 −10
Original line number Diff line number Diff line
@@ -176,8 +176,8 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
			break;

		msm_update_fence(submit->ring->fctx,
			submit->fence->seqno);
		dma_fence_signal(submit->fence);
			submit->hw_fence->seqno);
		dma_fence_signal(submit->hw_fence);
	}
	spin_unlock_irqrestore(&ring->submit_lock, flags);
}
@@ -380,10 +380,6 @@ static void recover_worker(struct kthread_work *work)
			put_task_struct(task);
		}

		/* msm_rd_dump_submit() needs bo locked to dump: */
		for (i = 0; i < submit->nr_bos; i++)
			msm_gem_lock(&submit->bos[i].obj->base);

		if (comm && cmd) {
			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
				gpu->name, comm, cmd);
@@ -393,9 +389,6 @@ static void recover_worker(struct kthread_work *work)
		} else {
			msm_rd_dump_submit(priv->hangrd, submit, NULL);
		}

		for (i = 0; i < submit->nr_bos; i++)
			msm_gem_unlock(&submit->bos[i].obj->base);
	}

	/* Record the crash state */
@@ -704,7 +697,7 @@ static void retire_submits(struct msm_gpu *gpu)
			 * been signalled, then later submits are not signalled
			 * either, so we are also done.
			 */
			if (submit && dma_fence_is_signaled(submit->fence)) {
			if (submit && dma_fence_is_signaled(submit->hw_fence)) {
				retire_submit(gpu, ring, submit);
			} else {
				break;
Loading