Commit e2183fb1 authored by Maarten Lankhorst's avatar Maarten Lankhorst
Browse files

Revert "drm/scheduler: Job timeout handler returns status (v3)"



This reverts commit c10983e1.

This commit is not meant for drm-misc-next-fixes, and was accidentally
cherry picked over.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
parent 4b8878ee
Loading
Loading
Loading
Loading
+2 −4
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"

static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{
	struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
	struct amdgpu_job *job = to_amdgpu_job(s_job);
@@ -41,7 +41,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
	    amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
		DRM_ERROR("ring %s timeout, but soft recovered\n",
			  s_job->sched->name);
		return DRM_GPU_SCHED_STAT_NOMINAL;
		return;
	}

	amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
@@ -53,12 +53,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)

	if (amdgpu_device_should_recover_gpu(ring->adev)) {
		amdgpu_device_gpu_recover(ring->adev, job);
		return DRM_GPU_SCHED_STAT_NOMINAL;
	} else {
		drm_sched_suspend_timeout(&ring->sched);
		if (amdgpu_sriov_vf(adev))
			adev->virt.tdr_debug = true;
		return DRM_GPU_SCHED_STAT_NOMINAL;
	}
}

+1 −6
Original line number Diff line number Diff line
@@ -82,8 +82,7 @@ static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
	return fence;
}

static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
							  *sched_job)
static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
	struct etnaviv_gpu *gpu = submit->gpu;
@@ -121,13 +120,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job

	drm_sched_resubmit_jobs(&gpu->sched);

	drm_sched_start(&gpu->sched, true);
	return DRM_GPU_SCHED_STAT_NOMINAL;

out_no_timeout:
	/* restart scheduler after GPU is usable again */
	drm_sched_start(&gpu->sched, true);
	return DRM_GPU_SCHED_STAT_NOMINAL;
}

static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
+1 −3
Original line number Diff line number Diff line
@@ -415,7 +415,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
	mutex_unlock(&dev->error_task_list_lock);
}

static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
static void lima_sched_timedout_job(struct drm_sched_job *job)
{
	struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
	struct lima_sched_task *task = to_lima_task(job);
@@ -449,8 +449,6 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job

	drm_sched_resubmit_jobs(&pipe->base);
	drm_sched_start(&pipe->base, true);

	return DRM_GPU_SCHED_STAT_NOMINAL;
}

static void lima_sched_free_job(struct drm_sched_job *job)
+3 −6
Original line number Diff line number Diff line
@@ -432,8 +432,7 @@ static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
	mutex_unlock(&queue->lock);
}

static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
						     *sched_job)
static void panfrost_job_timedout(struct drm_sched_job *sched_job)
{
	struct panfrost_job *job = to_panfrost_job(sched_job);
	struct panfrost_device *pfdev = job->pfdev;
@@ -444,7 +443,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
	 * spurious. Bail out.
	 */
	if (dma_fence_is_signaled(job->done_fence))
		return DRM_GPU_SCHED_STAT_NOMINAL;
		return;

	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
		js,
@@ -456,13 +455,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job

	/* Scheduler is already stopped, nothing to do. */
	if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
		return DRM_GPU_SCHED_STAT_NOMINAL;
		return;

	/* Schedule a reset if there's no reset in progress. */
	if (!atomic_xchg(&pfdev->reset.pending, 1))
		schedule_work(&pfdev->reset.work);

	return DRM_GPU_SCHED_STAT_NOMINAL;
}

static const struct drm_sched_backend_ops panfrost_sched_ops = {
+3 −1
Original line number Diff line number Diff line
@@ -527,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);

/**
 * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
 * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
 *
 * @sched: scheduler instance
 *
@@ -561,6 +561,8 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
		} else {
			s_job->s_fence->parent = fence;
		}


	}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
Loading