Commit 147862d0 authored by Shiwu Zhang's avatar Shiwu Zhang Committed by Alex Deucher
Browse files

drm/amdgpu: enable the ring and IB test for slave kcq



With the mec FW update to utilize the mqd base set by
driver for kcq mapping, slave kcq ring test and IB test
can be re-enabled.

Signed-off-by: default avatarShiwu Zhang <shiwu.zhang@amd.com>
Reviewed-by: default avatarLe Ma <Le.Ma@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 89cf4549
Loading
Loading
Loading
Loading
+27 −32
Original line number Diff line number Diff line
@@ -449,8 +449,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,

			ring->mqd_size = mqd_size;
			/* prepare MQD backup */
			adev->gfx.mec.mqd_backup[j] = kmalloc(mqd_size, GFP_KERNEL);
			if (!adev->gfx.mec.mqd_backup[j])
			adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings] = kmalloc(mqd_size, GFP_KERNEL);
			if (!adev->gfx.mec.mqd_backup[j + xcc_id * adev->gfx.num_compute_rings])
				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
		}
	}
@@ -502,7 +502,6 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
		return -EINVAL;

	spin_lock(&kiq->ring_lock);
	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
					adev->gfx.num_compute_rings)) {
		spin_unlock(&kiq->ring_lock);
@@ -515,9 +514,8 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
					   &adev->gfx.compute_ring[i],
					   RESET_QUEUES, 0, 0);
	}
	}

	if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
	if (kiq_ring->sched.ready && !adev->job_hang)
		r = amdgpu_ring_test_helper(kiq_ring);
	spin_unlock(&kiq->ring_lock);

@@ -598,8 +596,6 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
							kiq_ring->queue);
	spin_lock(&kiq->ring_lock);
	/* No need to map kcq on the slave */
	if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
					adev->gfx.num_compute_rings +
					kiq->pmf->set_resources_size);
@@ -616,8 +612,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
		j = i + xcc_id * adev->gfx.num_compute_rings;
			kiq->pmf->kiq_map_queues(kiq_ring,
						 &adev->gfx.compute_ring[i]);
		}
						 &adev->gfx.compute_ring[j]);
	}

	r = amdgpu_ring_test_helper(kiq_ring);
+0 −5
Original line number Diff line number Diff line
@@ -433,11 +433,6 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
		else
			tmo = tmo_gfx;

		/* skip ib test on the slave kcq */
		if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
		    !amdgpu_gfx_is_master_xcc(adev, ring->xcc_id))
			continue;

		r = amdgpu_ring_test_ib(ring, tmo);
		if (!r) {
			DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+5 −7
Original line number Diff line number Diff line
@@ -1956,13 +1956,11 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
		if (r)
			return r;

		/* skip ring test on slave kcq */
		if (amdgpu_gfx_is_master_xcc(adev, i)) {
		for (j = 0; j < adev->gfx.num_compute_rings; j++) {
				ring = &adev->gfx.compute_ring[j +
					i * adev->gfx.num_compute_rings];
				amdgpu_ring_test_helper(ring);
			}
			ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings];
			r = amdgpu_ring_test_helper(ring);
			if (r)
				return r;
		}

		gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);