Commit 8e04cddf authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-2023-06-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-next



drm/i915 feature pull #2 for v6.5:

Features and functionality:
- Meteorlake PM demand (Vinod, Mika)
- Switch to dedicated workqueues to stop using flush_scheduled_work() (Luca)

Refactoring and cleanups:
- Move display runtime init under display/ (Matt)
- Async flip error message clarifications (Arun)

Fixes:
- Remove 10bit gamma on desktop gen3 parts, they don't support it (Ville)
- Fix driver probe error handling if driver creation fails (Matt)
- Fix all -Wunused-but-set-variable warnings, and enable it for i915 (Jani)
- Stop using edid_blob_ptr (Jani)
- Fix log level for "CDS interlane align done" (Khaled)
- Fix an unnecessary include prefix (Matt)

Merges:
- Backmerge drm-next to sync with drm-intel-gt-next (Jani)

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87o7lnpxz2.fsf@intel.com
parents 901bdf5e 69f06e4f
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@ subdir-ccflags-y += -Wno-type-limits
subdir-ccflags-y += -Wno-missing-field-initializers
subdir-ccflags-y += -Wno-sign-compare
subdir-ccflags-y += -Wno-shift-negative-value
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror

@@ -269,6 +269,7 @@ i915-y += \
	display/intel_pch_display.o \
	display/intel_pch_refclk.o \
	display/intel_plane_initial.o \
	display/intel_pmdemand.o \
	display/intel_psr.o \
	display/intel_quirks.o \
	display/intel_sprite.o \
+3 −0
Original line number Diff line number Diff line
@@ -1033,10 +1033,13 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
					       DSPLINOFF(i9xx_plane));
		base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK;
	} else {
		offset = 0;
		base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
	}
	plane_config->base = base;

	drm_WARN_ON(&dev_priv->drm, offset != 0);

	val = intel_de_read(dev_priv, PIPESRC(pipe));
	fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1;
	fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1;
+1 −1
Original line number Diff line number Diff line
@@ -722,7 +722,7 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
{
	struct intel_crtc_state *crtc_state =
		intel_atomic_get_new_crtc_state(state, crtc);
	struct intel_plane_state *plane_state;
	struct intel_plane_state __maybe_unused *plane_state;
	struct intel_plane *plane;
	int i;

+228 −117
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv,
	val2 = intel_uncore_read(&dev_priv->uncore,
				 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
	sp->dclk = DIV_ROUND_UP((16667 * dclk), 1000);
	sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);

@@ -379,7 +379,7 @@ static const struct intel_sa_info mtl_sa_info = {
	.deburst = 32,
	.deprogbwlimit = 38, /* GB/s */
	.displayrtids = 256,
	.derating = 20,
	.derating = 10,
};

static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
@@ -534,10 +534,14 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel

			bi->deratedbw[j] = min(maxdebw,
					       bw * (100 - sa->derating) / 100);
			bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
							  num_channels *
							  qi.channel_width, 8);

			drm_dbg_kms(&dev_priv->drm,
				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
				    i, j, bi->num_planes, bi->deratedbw[j]);
				    "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
				    i, j, bi->num_planes, bi->deratedbw[j],
				    bi->peakbw[j]);
		}

		for (j = 0; j < qi.num_psf_points; j++) {
@@ -589,7 +593,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915)
	i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED;
}

static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv,
				     int num_planes, int qgv_point)
{
	int i;
@@ -611,13 +615,13 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
			return UINT_MAX;

		if (num_planes >= bi->num_planes)
			return bi->deratedbw[qgv_point];
			return i;
	}

	return 0;
	return UINT_MAX;
}

static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv,
				     int num_planes, int qgv_point)
{
	int i;
@@ -639,10 +643,10 @@ static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv,
			return UINT_MAX;

		if (num_planes <= bi->num_planes)
			return bi->deratedbw[qgv_point];
			return i;
	}

	return dev_priv->display.bw.max[0].deratedbw[qgv_point];
	return 0;
}

static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
@@ -799,6 +803,210 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
	return to_intel_bw_state(bw_state);
}

static int mtl_find_qgv_points(struct drm_i915_private *i915,
			       unsigned int data_rate,
			       unsigned int num_active_planes,
			       struct intel_bw_state *new_bw_state)
{
	unsigned int best_rate = UINT_MAX;
	unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
	unsigned int qgv_peak_bw  = 0;
	int i;
	int ret;

	ret = intel_atomic_lock_global_state(&new_bw_state->base);
	if (ret)
		return ret;

	/*
	 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
	 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
	 * not enabled. PM Demand code will clamp the value for the register
	 */
	if (!intel_can_enable_sagv(i915, new_bw_state)) {
		new_bw_state->qgv_point_peakbw = U16_MAX;
		drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw.");
		return 0;
	}

	/*
	 * Find the best QGV point by comparing the data_rate with max data rate
	 * offered per plane group
	 */
	for (i = 0; i < num_qgv_points; i++) {
		unsigned int bw_index =
			tgl_max_bw_index(i915, num_active_planes, i);
		unsigned int max_data_rate;

		if (bw_index >= ARRAY_SIZE(i915->display.bw.max))
			continue;

		max_data_rate = i915->display.bw.max[bw_index].deratedbw[i];

		if (max_data_rate < data_rate)
			continue;

		if (max_data_rate - data_rate < best_rate) {
			best_rate = max_data_rate - data_rate;
			qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i];
		}

		drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
			    i, max_data_rate, data_rate, qgv_peak_bw);
	}

	drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
		    qgv_peak_bw, data_rate);

	/*
	 * The display configuration cannot be supported if no QGV point
	 * satisfying the required data rate is found
	 */
	if (qgv_peak_bw == 0) {
		drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
			    data_rate, num_active_planes);
		return -EINVAL;
	}

	/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
	new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);

	return 0;
}

static int icl_find_qgv_points(struct drm_i915_private *i915,
			       unsigned int data_rate,
			       unsigned int num_active_planes,
			       const struct intel_bw_state *old_bw_state,
			       struct intel_bw_state *new_bw_state)
{
	unsigned int max_bw_point = 0;
	unsigned int max_bw = 0;
	unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
	unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
	u16 psf_points = 0;
	u16 qgv_points = 0;
	int i;
	int ret;

	ret = intel_atomic_lock_global_state(&new_bw_state->base);
	if (ret)
		return ret;

	for (i = 0; i < num_qgv_points; i++) {
		unsigned int idx;
		unsigned int max_data_rate;

		if (DISPLAY_VER(i915) > 11)
			idx = tgl_max_bw_index(i915, num_active_planes, i);
		else
			idx = icl_max_bw_index(i915, num_active_planes, i);

		if (idx >= ARRAY_SIZE(i915->display.bw.max))
			continue;

		max_data_rate = i915->display.bw.max[idx].deratedbw[i];

		/*
		 * We need to know which qgv point gives us
		 * maximum bandwidth in order to disable SAGV
		 * if we find that we exceed SAGV block time
		 * with watermarks. By that moment we already
		 * have those, as it is calculated earlier in
		 * intel_atomic_check,
		 */
		if (max_data_rate > max_bw) {
			max_bw_point = i;
			max_bw = max_data_rate;
		}
		if (max_data_rate >= data_rate)
			qgv_points |= BIT(i);

		drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
			    i, max_data_rate, data_rate);
	}

	for (i = 0; i < num_psf_gv_points; i++) {
		unsigned int max_data_rate = adl_psf_bw(i915, i);

		if (max_data_rate >= data_rate)
			psf_points |= BIT(i);

		drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
			    " required %d\n",
			    i, max_data_rate, data_rate);
	}

	/*
	 * BSpec states that we always should have at least one allowed point
	 * left, so if we couldn't - simply reject the configuration for obvious
	 * reasons.
	 */
	if (qgv_points == 0) {
		drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
			    " bandwidth %d for display configuration(%d active planes).\n",
			    data_rate, num_active_planes);
		return -EINVAL;
	}

	if (num_psf_gv_points > 0 && psf_points == 0) {
		drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
			    " bandwidth %d for display configuration(%d active planes).\n",
			    data_rate, num_active_planes);
		return -EINVAL;
	}

	/*
	 * Leave only single point with highest bandwidth, if
	 * we can't enable SAGV due to the increased memory latency it may
	 * cause.
	 */
	if (!intel_can_enable_sagv(i915, new_bw_state)) {
		qgv_points = BIT(max_bw_point);
		drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
			    max_bw_point);
	}

	/*
	 * We store the ones which need to be masked as that is what PCode
	 * actually accepts as a parameter.
	 */
	new_bw_state->qgv_points_mask =
		~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
		  ADLS_PCODE_REQ_PSF_PT(psf_points)) &
		icl_qgv_points_mask(i915);

	/*
	 * If the actual mask had changed we need to make sure that
	 * the commits are serialized(in case this is a nomodeset, nonblocking)
	 */
	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
		if (ret)
			return ret;
	}

	return 0;
}

static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
				     const struct intel_bw_state *old_bw_state,
				     struct intel_bw_state *new_bw_state)
{
	unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
	unsigned int num_active_planes =
			intel_bw_num_active_planes(i915, new_bw_state);

	data_rate = DIV_ROUND_UP(data_rate, 1000);

	if (DISPLAY_VER(i915) >= 14)
		return mtl_find_qgv_points(i915, data_rate, num_active_planes,
					   new_bw_state);
	else
		return icl_find_qgv_points(i915, data_rate, num_active_planes,
					   old_bw_state, new_bw_state);
}

static bool intel_bw_state_changed(struct drm_i915_private *i915,
				   const struct intel_bw_state *old_bw_state,
				   const struct intel_bw_state *new_bw_state)
@@ -1045,20 +1253,14 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan

int intel_bw_atomic_check(struct intel_atomic_state *state)
{
	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
	const struct intel_bw_state *old_bw_state;
	struct intel_bw_state *new_bw_state;
	unsigned int data_rate;
	unsigned int num_active_planes;
	int i, ret;
	u16 qgv_points = 0, psf_points = 0;
	unsigned int max_bw_point = 0, max_bw = 0;
	unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
	unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
	bool changed = false;
	struct drm_i915_private *i915 = to_i915(state->base.dev);
	struct intel_bw_state *new_bw_state;
	const struct intel_bw_state *old_bw_state;
	int ret;

	/* FIXME earlier gens need some checks too */
	if (DISPLAY_VER(dev_priv) < 11)
	if (DISPLAY_VER(i915) < 11)
		return 0;

	ret = intel_bw_check_data_rate(state, &changed);
@@ -1069,8 +1271,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
	new_bw_state = intel_atomic_get_new_bw_state(state);

	if (new_bw_state &&
	    intel_can_enable_sagv(dev_priv, old_bw_state) !=
	    intel_can_enable_sagv(dev_priv, new_bw_state))
	    intel_can_enable_sagv(i915, old_bw_state) !=
	    intel_can_enable_sagv(i915, new_bw_state))
		changed = true;

	/*
@@ -1080,100 +1282,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
	if (!changed)
		return 0;

	ret = intel_atomic_lock_global_state(&new_bw_state->base);
	if (ret)
		return ret;

	data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
	data_rate = DIV_ROUND_UP(data_rate, 1000);

	num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);

	for (i = 0; i < num_qgv_points; i++) {
		unsigned int max_data_rate;

		if (DISPLAY_VER(dev_priv) > 11)
			max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
		else
			max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
		/*
		 * We need to know which qgv point gives us
		 * maximum bandwidth in order to disable SAGV
		 * if we find that we exceed SAGV block time
		 * with watermarks. By that moment we already
		 * have those, as it is calculated earlier in
		 * intel_atomic_check,
		 */
		if (max_data_rate > max_bw) {
			max_bw_point = i;
			max_bw = max_data_rate;
		}
		if (max_data_rate >= data_rate)
			qgv_points |= BIT(i);

		drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
			    i, max_data_rate, data_rate);
	}

	for (i = 0; i < num_psf_gv_points; i++) {
		unsigned int max_data_rate = adl_psf_bw(dev_priv, i);

		if (max_data_rate >= data_rate)
			psf_points |= BIT(i);

		drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
			    " required %d\n",
			    i, max_data_rate, data_rate);
	}

	/*
	 * BSpec states that we always should have at least one allowed point
	 * left, so if we couldn't - simply reject the configuration for obvious
	 * reasons.
	 */
	if (qgv_points == 0) {
		drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
			    " bandwidth %d for display configuration(%d active planes).\n",
			    data_rate, num_active_planes);
		return -EINVAL;
	}

	if (num_psf_gv_points > 0 && psf_points == 0) {
		drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
			    " bandwidth %d for display configuration(%d active planes).\n",
			    data_rate, num_active_planes);
		return -EINVAL;
	}

	/*
	 * Leave only single point with highest bandwidth, if
	 * we can't enable SAGV due to the increased memory latency it may
	 * cause.
	 */
	if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
		qgv_points = BIT(max_bw_point);
		drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
			    max_bw_point);
	}

	/*
	 * We store the ones which need to be masked as that is what PCode
	 * actually accepts as a parameter.
	 */
	new_bw_state->qgv_points_mask =
		~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
		  ADLS_PCODE_REQ_PSF_PT(psf_points)) &
		icl_qgv_points_mask(dev_priv);

	/*
	 * If the actual mask had changed we need to make sure that
	 * the commits are serialized(in case this is a nomodeset, nonblocking)
	 */
	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
	ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
	if (ret)
		return ret;
	}

	return 0;
}
+6 −0
Original line number Diff line number Diff line
@@ -34,6 +34,12 @@ struct intel_bw_state {
	/* bitmask of active pipes */
	u8 active_pipes;

	/*
	 * From MTL onwards, to lock a QGV point, punit expects the peak BW of
	 * the selected QGV point as the parameter in multiples of 100MB/s
	 */
	u16 qgv_point_peakbw;

	/*
	 * Current QGV points mask, which restricts
	 * some particular SAGV states, not to confuse
Loading