Commit 1a8b612e authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm: Take lru lock once per job_run



Rather than acquiring it and dropping it for each individual obj.

Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/551019/
parent a895037e
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
@@ -509,14 +509,11 @@ void msm_gem_unpin_locked(struct drm_gem_object *obj)
 */
void msm_gem_unpin_active(struct drm_gem_object *obj)
{
	struct msm_drm_private *priv = obj->dev->dev_private;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&priv->lru.lock);
	msm_obj->pin_count--;
	GEM_WARN_ON(msm_obj->pin_count < 0);
	update_lru_active(obj);
	mutex_unlock(&priv->lru.lock);
}

struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+5 −0
Original line number Diff line number Diff line
@@ -16,10 +16,13 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
	struct msm_gem_submit *submit = to_msm_submit(job);
	struct msm_fence_context *fctx = submit->ring->fctx;
	struct msm_gpu *gpu = submit->gpu;
	struct msm_drm_private *priv = gpu->dev->dev_private;
	int i;

	msm_fence_init(submit->hw_fence, fctx);

	mutex_lock(&priv->lru.lock);

	for (i = 0; i < submit->nr_bos; i++) {
		struct drm_gem_object *obj = &submit->bos[i].obj->base;

@@ -28,6 +31,8 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
		submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);
	}

	mutex_unlock(&priv->lru.lock);

	/* TODO move submit path over to using a per-ring lock.. */
	mutex_lock(&gpu->lock);