Commit c0459bdd authored by Alan Liu's avatar Alan Liu Committed by Alex Deucher
Browse files

drm/amd/display: Implement secure display on DCN21



[Why]
Porting secure display feature from DCN10 to DCN21. Support single
display for now and will extend to multiple displays.

[How]
- use workqueue to offload works for dmub or dmcu firmware
- after receiving ROI update from userspace, set skip_frame_cnt to 1
- refactor amdgpu_dm_crtc_handle_crc_window_irq()
- disable PSR before activating secure_display on a crtc
- check if secure_display is activated before enabling psr
- only work for single display for now.

Tested-by: default avatarMark Broadworth <mark.broadworth@amd.com>
Reviewed-by: default avatarWayne Lin <Wayne.Lin@amd.com>
Acked-by: default avatarRodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Signed-off-by: default avatarAlan Liu <HaoPing.Liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d1bc26cb
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -7840,6 +7840,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
			 */
			if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
			    acrtc_attach->dm_irq_params.allow_psr_entry &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
			    !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
			    !acrtc_state->stream->link->psr_settings.psr_allow_active)
				amdgpu_dm_psr_enable(acrtc_state->stream);
		} else {
@@ -8301,8 +8304,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
				if (amdgpu_dm_crc_window_is_activated(crtc)) {
					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
					acrtc->dm_irq_params.crc_window.update_win = true;
					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
					acrtc->dm_irq_params.window_param.update_win = true;
					acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
					crc_rd_wrk->crtc = crtc;
					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+71 −80
Original line number Diff line number Diff line
@@ -89,13 +89,13 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	acrtc->dm_irq_params.crc_window.x_start = 0;
	acrtc->dm_irq_params.crc_window.y_start = 0;
	acrtc->dm_irq_params.crc_window.x_end = 0;
	acrtc->dm_irq_params.crc_window.y_end = 0;
	acrtc->dm_irq_params.crc_window.activated = false;
	acrtc->dm_irq_params.crc_window.update_win = false;
	acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
	acrtc->dm_irq_params.window_param.roi.x_start = 0;
	acrtc->dm_irq_params.window_param.roi.y_start = 0;
	acrtc->dm_irq_params.window_param.roi.x_end = 0;
	acrtc->dm_irq_params.window_param.roi.y_end = 0;
	acrtc->dm_irq_params.window_param.activated = false;
	acrtc->dm_irq_params.window_param.update_win = false;
	acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
	spin_unlock_irq(&drm_dev->event_lock);
}

@@ -135,6 +135,22 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
	}
}

static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
	struct crc_fw_work *crc_fw_wrk;
	struct amdgpu_display_manager *dm;

	crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work);
	dm = crc_fw_wrk->dm;

	mutex_lock(&dm->dc_lock);
	dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->roi, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd);
	mutex_unlock(&dm->dc_lock);

	kfree(crc_fw_wrk);
}

bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
{
	struct drm_device *drm_dev = crtc->dev;
@@ -142,7 +158,7 @@ bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
	bool ret = false;

	spin_lock_irq(&drm_dev->event_lock);
	ret = acrtc->dm_irq_params.crc_window.activated;
	ret = acrtc->dm_irq_params.window_param.activated;
	spin_unlock_irq(&drm_dev->event_lock);

	return ret;
@@ -187,9 +203,11 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
			if (adev->dm.crc_rd_wrk) {
				flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
				spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);

				if (adev->dm.crc_rd_wrk->crtc == crtc) {
					dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc,
									dm_crtc_state->stream);
					/* stop ROI update on this crtc */
					dc_stream_forward_crc_window(stream_state->ctx->dc,
							NULL, stream_state, true);
					adev->dm.crc_rd_wrk->crtc = NULL;
				}
				spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
@@ -439,14 +457,9 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
	enum amdgpu_dm_pipe_crc_source cur_crc_src;
	struct amdgpu_crtc *acrtc = NULL;
	struct amdgpu_device *adev = NULL;
	struct crc_rd_work *crc_rd_wrk = NULL;
	struct crc_params *crc_window = NULL, tmp_window;
	struct crc_rd_work *crc_rd_wrk;
	struct crc_fw_work *crc_fw_wrk;
	unsigned long flags1, flags2;
	struct crtc_position position;
	uint32_t v_blank;
	uint32_t v_back_porch;
	uint32_t crc_window_latch_up_line;
	struct dc_crtc_timing *timing_out;

	if (crtc == NULL)
		return;
@@ -458,75 +471,53 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
	spin_lock_irqsave(&drm_dev->event_lock, flags1);
	stream_state = acrtc->dm_irq_params.stream;
	cur_crc_src = acrtc->dm_irq_params.crc_src;
	timing_out = &stream_state->timing;

	/* Early return if CRC capture is not enabled. */
	if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
		goto cleanup;

	if (dm_is_crc_source_crtc(cur_crc_src)) {
		if (acrtc->dm_irq_params.crc_window.activated) {
			if (acrtc->dm_irq_params.crc_window.update_win) {
				if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) {
					acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1;
	if (!dm_is_crc_source_crtc(cur_crc_src))
		goto cleanup;

	if (!acrtc->dm_irq_params.window_param.activated)
		goto cleanup;

	if (acrtc->dm_irq_params.window_param.update_win) {
		if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
			acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
			goto cleanup;
		}
				crc_window = &tmp_window;

				tmp_window.windowa_x_start =
							acrtc->dm_irq_params.crc_window.x_start;
				tmp_window.windowa_y_start =
							acrtc->dm_irq_params.crc_window.y_start;
				tmp_window.windowa_x_end =
							acrtc->dm_irq_params.crc_window.x_end;
				tmp_window.windowa_y_end =
							acrtc->dm_irq_params.crc_window.y_end;
				tmp_window.windowb_x_start =
							acrtc->dm_irq_params.crc_window.x_start;
				tmp_window.windowb_y_start =
							acrtc->dm_irq_params.crc_window.y_start;
				tmp_window.windowb_x_end =
							acrtc->dm_irq_params.crc_window.x_end;
				tmp_window.windowb_y_end =
							acrtc->dm_irq_params.crc_window.y_end;

				dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc,
									stream_state, crc_window);

				acrtc->dm_irq_params.crc_window.update_win = false;

				dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1,
					&position.vertical_count,
					&position.nominal_vcount);

				v_blank = timing_out->v_total - timing_out->v_border_top -
					timing_out->v_addressable - timing_out->v_border_bottom;

				v_back_porch = v_blank - timing_out->v_front_porch -
					timing_out->v_sync_width;

				crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width;

				/* take 3 lines margin*/
				if ((position.vertical_count + 3) >= crc_window_latch_up_line)
					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1;
				else
					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;

		/* prepare work for dmub to update ROI */
		crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC);
		if (!crc_fw_wrk)
			goto cleanup;

		INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window);
		crc_fw_wrk->dm = &adev->dm;
		crc_fw_wrk->stream = stream_state;
		crc_fw_wrk->roi.x_start = acrtc->dm_irq_params.window_param.roi.x_start;
		crc_fw_wrk->roi.y_start = acrtc->dm_irq_params.window_param.roi.y_start;
		crc_fw_wrk->roi.x_end = acrtc->dm_irq_params.window_param.roi.x_end;
		crc_fw_wrk->roi.y_end = acrtc->dm_irq_params.window_param.roi.y_end;
		schedule_work(&crc_fw_wrk->forward_roi_work);

		acrtc->dm_irq_params.window_param.update_win = false;
		acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;

	} else {
				if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) {
		if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
			acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
			goto cleanup;
		}

		if (adev->dm.crc_rd_wrk) {
			crc_rd_wrk = adev->dm.crc_rd_wrk;
			spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
						crc_rd_wrk->phy_inst =
							stream_state->link->link_enc_hw_inst;
			crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst;
			spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
			schedule_work(&crc_rd_wrk->notify_ta_work);
		}
				} else {
					acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1;
				}
			}
		}
	}

cleanup:
+12 −5
Original line number Diff line number Diff line
@@ -40,11 +40,8 @@ enum amdgpu_dm_pipe_crc_source {
};

#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
struct crc_window_parm {
	uint16_t x_start;
	uint16_t y_start;
	uint16_t x_end;
	uint16_t y_end;
struct crc_window_param {
	struct crc_region roi;
	/* CRC windwo is activated or not*/
	bool activated;
	/* Update crc window during vertical blank or not */
@@ -53,6 +50,7 @@ struct crc_window_parm {
	int skip_frame_cnt;
};

/* read_work for driver to call PSP to read */
struct crc_rd_work {
	struct work_struct notify_ta_work;
	/* To protect crc_rd_work carried fields*/
@@ -60,6 +58,15 @@ struct crc_rd_work {
	struct drm_crtc *crtc;
	uint8_t phy_inst;
};

/* forward_work for driver to forward ROI to dmu */
struct crc_fw_work {
	struct work_struct forward_roi_work;
	struct amdgpu_display_manager *dm;
	struct dc_stream_state *stream;
	struct crc_region roi;
	bool is_stop_cmd;
};
#endif

static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source)
+3 −0
Original line number Diff line number Diff line
@@ -127,6 +127,9 @@ static void vblank_control_worker(struct work_struct *work)
				amdgpu_dm_psr_disable(vblank_work->stream);
		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
			   !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
			amdgpu_dm_psr_enable(vblank_work->stream);
		}
+33 −22
Original line number Diff line number Diff line
@@ -38,6 +38,10 @@
#include "link_hwss.h"
#include "dc/dc_dmub_srv.h"

#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
#include "amdgpu_dm_psr.h"
#endif

struct dmub_debugfs_trace_header {
	uint32_t entry_count;
	uint32_t reserved[3];
@@ -3081,8 +3085,8 @@ static int crc_win_x_start_set(void *data, u64 val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val;
	acrtc->dm_irq_params.crc_window.update_win = false;
	acrtc->dm_irq_params.window_param.roi.x_start = (uint16_t) val;
	acrtc->dm_irq_params.window_param.update_win = false;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3098,7 +3102,7 @@ static int crc_win_x_start_get(void *data, u64 *val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	*val = acrtc->dm_irq_params.crc_window.x_start;
	*val = acrtc->dm_irq_params.window_param.roi.x_start;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3118,8 +3122,8 @@ static int crc_win_y_start_set(void *data, u64 val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val;
	acrtc->dm_irq_params.crc_window.update_win = false;
	acrtc->dm_irq_params.window_param.roi.y_start = (uint16_t) val;
	acrtc->dm_irq_params.window_param.update_win = false;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3135,7 +3139,7 @@ static int crc_win_y_start_get(void *data, u64 *val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	*val = acrtc->dm_irq_params.crc_window.y_start;
	*val = acrtc->dm_irq_params.window_param.roi.y_start;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3154,8 +3158,8 @@ static int crc_win_x_end_set(void *data, u64 val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val;
	acrtc->dm_irq_params.crc_window.update_win = false;
	acrtc->dm_irq_params.window_param.roi.x_end = (uint16_t) val;
	acrtc->dm_irq_params.window_param.update_win = false;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3171,7 +3175,7 @@ static int crc_win_x_end_get(void *data, u64 *val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	*val = acrtc->dm_irq_params.crc_window.x_end;
	*val = acrtc->dm_irq_params.window_param.roi.x_end;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3190,8 +3194,8 @@ static int crc_win_y_end_set(void *data, u64 val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val;
	acrtc->dm_irq_params.crc_window.update_win = false;
	acrtc->dm_irq_params.window_param.roi.y_end = (uint16_t) val;
	acrtc->dm_irq_params.window_param.update_win = false;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3207,7 +3211,7 @@ static int crc_win_y_end_get(void *data, u64 *val)
	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);

	spin_lock_irq(&drm_dev->event_lock);
	*val = acrtc->dm_irq_params.crc_window.y_end;
	*val = acrtc->dm_irq_params.window_param.roi.y_end;
	spin_unlock_irq(&drm_dev->event_lock);

	return 0;
@@ -3230,31 +3234,38 @@ static int crc_win_update_set(void *data, u64 val)
		return 0;

	if (val) {
		new_acrtc = to_amdgpu_crtc(new_crtc);
		mutex_lock(&adev->dm.dc_lock);
		/* PSR may write to OTG CRC window control register,
		 * so close it before starting secure_display.
		 */
		amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream);

		spin_lock_irq(&adev_to_drm(adev)->event_lock);
		spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
		if (crc_rd_wrk->crtc) {
			old_crtc = crc_rd_wrk->crtc;
			old_acrtc = to_amdgpu_crtc(old_crtc);
		}
		new_acrtc = to_amdgpu_crtc(new_crtc);

		if (old_crtc && old_crtc != new_crtc) {
			old_acrtc->dm_irq_params.crc_window.activated = false;
			old_acrtc->dm_irq_params.crc_window.update_win = false;
			old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
			old_acrtc->dm_irq_params.window_param.activated = false;
			old_acrtc->dm_irq_params.window_param.update_win = false;
			old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;

			new_acrtc->dm_irq_params.crc_window.activated = true;
			new_acrtc->dm_irq_params.crc_window.update_win = true;
			new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
			new_acrtc->dm_irq_params.window_param.activated = true;
			new_acrtc->dm_irq_params.window_param.update_win = true;
			new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
			crc_rd_wrk->crtc = new_crtc;
		} else {
			new_acrtc->dm_irq_params.crc_window.activated = true;
			new_acrtc->dm_irq_params.crc_window.update_win = true;
			new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0;
			new_acrtc->dm_irq_params.window_param.activated = true;
			new_acrtc->dm_irq_params.window_param.update_win = true;
			new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
			crc_rd_wrk->crtc = new_crtc;
		}
		spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
		spin_unlock_irq(&adev_to_drm(adev)->event_lock);
		mutex_unlock(&adev->dm.dc_lock);
	}

	return 0;
Loading