Commit ae204faa authored by Guchun Chen's avatar Guchun Chen Committed by Alex Deucher
Browse files

Revert "drm/amdgpu: Ensure the DMA engine is deactivated during set ups"



This reverts commit b992a190.

This causes regression in GPU reset related test.

Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: ricetons@gmail.com
Signed-off-by: default avatarGuchun Chen <guchun.chen@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent ea64228d
Loading
Loading
Loading
Loading
+45 −64
Original line number Diff line number Diff line
@@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
	}
}


/**
 * sdma_v5_2_gfx_stop - stop the gfx async dma engines
 *
@@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
}

/**
 * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines
 * context switch for an instance
 * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
 *
 * @adev: amdgpu_device pointer
 * @instance_idx: the index of the SDMA instance
 * @enable: enable/disable the DMA MEs context switch.
 *
 * Unhalt the async dma engines context switch.
 * Halt or unhalt the async dma engines context switch.
 */
static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx)
static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
	u32 f32_cntl, phase_quantum = 0;

	if (WARN_ON(instance_idx >= adev->sdma.num_instances)) {
		return;
	}
	int i;

	if (amdgpu_sdma_phase_quantum) {
		unsigned value = amdgpu_sdma_phase_quantum;
@@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev,
		phase_quantum =
			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
	}

		WREG32_SOC15_IP(GC,
			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM),
	for (i = 0; i < adev->sdma.num_instances; i++) {
		if (enable && amdgpu_sdma_phase_quantum) {
			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
			       phase_quantum);
		WREG32_SOC15_IP(GC,
			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM),
			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
			       phase_quantum);
		WREG32_SOC15_IP(GC,
			sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM),
			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
			       phase_quantum);
		}

		if (!amdgpu_sriov_vf(adev)) {
		f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL));
		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
				AUTO_CTXSW_ENABLE, 1);
		WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl);
	}
}

/**
 * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch
 *
 * @adev: amdgpu_device pointer
 *
 * Halt the async dma engines context switch.
 */
static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev)
{
	u32 f32_cntl;
	int i;

	if (amdgpu_sriov_vf(adev))
		return;

	for (i = 0; i < adev->sdma.num_instances; i++) {
			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
				AUTO_CTXSW_ENABLE, 0);
					AUTO_CTXSW_ENABLE, enable ? 1 : 0);
			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
		}
	}

}

/**
 * sdma_v5_2_halt - stop the async dma engines
 * sdma_v5_2_enable - stop the async dma engines
 *
 * @adev: amdgpu_device pointer
 * @enable: enable/disable the DMA MEs.
 *
 * Halt the async dma engines.
 * Halt or unhalt the async dma engines.
 */
static void sdma_v5_2_halt(struct amdgpu_device *adev)
static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
{
	int i;
	u32 f32_cntl;
	int i;

	if (!enable) {
		sdma_v5_2_gfx_stop(adev);
		sdma_v5_2_rlc_stop(adev);
	}

	if (!amdgpu_sriov_vf(adev)) {
		for (i = 0; i < adev->sdma.num_instances; i++) {
			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1);
			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
		}
	}
@@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev)
 * @adev: amdgpu_device pointer
 *
 * Set up the gfx DMA ring buffers and enable them.
 * It assumes that the dma engine is stopped for each instance.
 * The function enables the engine and preemptions sequentially for each instance.
 *
 * Returns 0 for success, error for failure.
 */
static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
@@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)

		ring->sched.ready = true;

		sdma_v5_2_ctx_switch_enable_for_instance(adev, i);
		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
			sdma_v5_2_ctx_switch_enable(adev, true);
			sdma_v5_2_enable(adev, true);
		}

		r = amdgpu_ring_test_ring(ring);
		if (r) {
@@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
	int i, j;

	/* halt the MEs */
	sdma_v5_2_halt(adev);
	sdma_v5_2_enable(adev, false);

	for (i = 0; i < adev->sdma.num_instances; i++) {
		if (!adev->sdma.instance[i].fw)
@@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
	int r = 0;

	if (amdgpu_sriov_vf(adev)) {
		sdma_v5_2_ctx_switch_disable_all(adev);
		sdma_v5_2_halt(adev);
		sdma_v5_2_ctx_switch_enable(adev, false);
		sdma_v5_2_enable(adev, false);

		/* set RB registers */
		r = sdma_v5_2_gfx_resume(adev);
@@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
		amdgpu_gfx_off_ctrl(adev, false);

	sdma_v5_2_soft_reset(adev);
	/* unhalt the MEs */
	sdma_v5_2_enable(adev, true);
	/* enable sdma ring preemption */
	sdma_v5_2_ctx_switch_enable(adev, true);

	/* Soft reset supposes to disable the dma engine and preemption.
	 * Now start the gfx rings and rlc compute queues.
	 */
	/* start the gfx rings and rlc compute queues */
	r = sdma_v5_2_gfx_resume(adev);
	if (adev->in_s0ix)
		amdgpu_gfx_off_ctrl(adev, true);
@@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle)
	if (amdgpu_sriov_vf(adev))
		return 0;

	sdma_v5_2_ctx_switch_disable_all(adev);
	sdma_v5_2_halt(adev);
	sdma_v5_2_ctx_switch_enable(adev, false);
	sdma_v5_2_enable(adev, false);

	return 0;
}