Commit 00136429 authored by Jani Nikula's avatar Jani Nikula
Browse files

drm/i915/wm: switch to intel_de_* register accessors in display code



Avoid direct uncore use in display code.

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/588815fc60752b6470ee4067246698d478309fa1.1670433372.git.jani.nikula@intel.com
parent 01f84f74
Loading
Loading
Loading
Loading
+18 −24
Original line number Diff line number Diff line
@@ -45,8 +45,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
	enum dbuf_slice slice;

	for_each_dbuf_slice(i915, slice) {
		if (intel_uncore_read(&i915->uncore,
				      DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
		if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
			enabled_slices |= BIT(slice);
	}

@@ -75,7 +74,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
	if (DISPLAY_VER(i915) >= 14) {
		u32 val;

		val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
		val = intel_de_read(i915, MTL_LATENCY_SAGV);

		return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
	} else if (DISPLAY_VER(i915) >= 12) {
@@ -756,18 +755,18 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,

	/* Cursor doesn't support NV12/planar, so no extra calculation needed */
	if (plane_id == PLANE_CURSOR) {
		val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
		val = intel_de_read(i915, CUR_BUF_CFG(pipe));
		skl_ddb_entry_init_from_hw(ddb, val);
		return;
	}

	val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
	val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
	skl_ddb_entry_init_from_hw(ddb, val);

	if (DISPLAY_VER(i915) >= 11)
		return;

	val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
	val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
	skl_ddb_entry_init_from_hw(ddb_y, val);
}

@@ -2821,36 +2820,32 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,

		for (level = 0; level <= max_level; level++) {
			if (plane_id != PLANE_CURSOR)
				val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
				val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
			else
				val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
				val = intel_de_read(i915, CUR_WM(pipe, level));

			skl_wm_level_from_reg_val(val, &wm->wm[level]);
		}

		if (plane_id != PLANE_CURSOR)
			val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
			val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
		else
			val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
			val = intel_de_read(i915, CUR_WM_TRANS(pipe));

		skl_wm_level_from_reg_val(val, &wm->trans_wm);

		if (HAS_HW_SAGV_WM(i915)) {
			if (plane_id != PLANE_CURSOR)
				val = intel_uncore_read(&i915->uncore,
							PLANE_WM_SAGV(pipe, plane_id));
				val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
			else
				val = intel_uncore_read(&i915->uncore,
							CUR_WM_SAGV(pipe));
				val = intel_de_read(i915, CUR_WM_SAGV(pipe));

			skl_wm_level_from_reg_val(val, &wm->sagv.wm0);

			if (plane_id != PLANE_CURSOR)
				val = intel_uncore_read(&i915->uncore,
							PLANE_WM_SAGV_TRANS(pipe, plane_id));
				val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
			else
				val = intel_uncore_read(&i915->uncore,
							CUR_WM_SAGV_TRANS(pipe));
				val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));

			skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
		} else if (DISPLAY_VER(i915) >= 12) {
@@ -3126,7 +3121,7 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915)
	if (!HAS_IPC(i915))
		return;

	intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
	intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
		     skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
}

@@ -3201,19 +3196,18 @@ adjust_wm_latency(struct drm_i915_private *i915,

static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
	struct intel_uncore *uncore = &i915->uncore;
	int max_level = ilk_wm_max_level(i915);
	u32 val;

	val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
	val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
	wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
	wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);

	val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
	val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
	wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
	wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);

	val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
	val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
	wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
	wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);