Commit ce7c3bde authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-next-2021-01-29' of git://anongit.freedesktop.org/drm/drm-intel into drm-next



- WARN if plane src coords are too big (Ville)
- Prevent double YUV range correction on HDR planes (Andres)
- DP MST related Fixes (Sean, Imre)
- More clean-up around DRAM detection code (Jose)
- Actually async flips enable for all ilk+ platforms (Ville)

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210129225328.GA1041349@intel.com
parents c106c5e2 3b7bbb36
Loading
Loading
Loading
Loading
+18 −6
Original line number Diff line number Diff line
@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
	return 0;
}

static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
/**
 * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
 * @link_rate: link rate in 10kbits/s units
 * @link_lane_count: lane count
 *
 * Calculate the total bandwidth of a MultiStream Transport link. The returned
 * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
 * convert the number of PBNs required for a given stream to the number of
 * timeslots this stream requires in each MTP.
 */
int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
	if (dp_link_bw == 0 || dp_link_count == 0)
		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
			      dp_link_bw, dp_link_count);
	if (link_rate == 0 || link_lane_count == 0)
		DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
			      link_rate, link_lane_count);

	return dp_link_bw * dp_link_count / 2;
	/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
	return link_rate * link_lane_count / 54000;
}
EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);

/**
 * drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
			goto out_unlock;
		}

		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
		mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
							mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
		if (mgr->pbn_div == 0) {
			ret = -EINVAL;
+204 −9
Original line number Diff line number Diff line
@@ -276,6 +276,13 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
		}
	}

	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
		drm_WARN_ON(&dev_priv->drm, src_x > 8191 || src_y > 4095);
	} else if (INTEL_GEN(dev_priv) >= 4 &&
		   fb->modifier == I915_FORMAT_MOD_X_TILED) {
		drm_WARN_ON(&dev_priv->drm, src_x > 4095 || src_y > 4095);
	}

	plane_state->color_plane[0].offset = offset;
	plane_state->color_plane[0].x = src_x;
	plane_state->color_plane[0].y = src_y;
@@ -488,6 +495,129 @@ static void i9xx_disable_plane(struct intel_plane *plane,
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

static void
g4x_primary_async_flip(struct intel_plane *plane,
		       const struct intel_crtc_state *crtc_state,
		       const struct intel_plane_state *plane_state,
		       bool async_flip)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
	u32 dspaddr_offset = plane_state->color_plane[0].offset;
	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
	unsigned long irqflags;

	if (async_flip)
		dspcntr |= DISPPLANE_ASYNC_FLIP;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
	intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
			  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

static void
vlv_primary_async_flip(struct intel_plane *plane,
		       const struct intel_crtc_state *crtc_state,
		       const struct intel_plane_state *plane_state,
		       bool async_flip)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	u32 dspaddr_offset = plane_state->color_plane[0].offset;
	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
	unsigned long irqflags;

	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
	intel_de_write_fw(dev_priv, DSPADDR_VLV(i9xx_plane),
			  intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}

static void
bdw_primary_enable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;

	spin_lock_irq(&i915->irq_lock);
	bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
	spin_unlock_irq(&i915->irq_lock);
}

static void
bdw_primary_disable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;

	spin_lock_irq(&i915->irq_lock);
	bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE);
	spin_unlock_irq(&i915->irq_lock);
}

static void
ivb_primary_enable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);

	spin_lock_irq(&i915->irq_lock);
	ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
	spin_unlock_irq(&i915->irq_lock);
}

static void
ivb_primary_disable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);

	spin_lock_irq(&i915->irq_lock);
	ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane));
	spin_unlock_irq(&i915->irq_lock);
}

static void
ilk_primary_enable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);

	spin_lock_irq(&i915->irq_lock);
	ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
	spin_unlock_irq(&i915->irq_lock);
}

static void
ilk_primary_disable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);

	spin_lock_irq(&i915->irq_lock);
	ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane));
	spin_unlock_irq(&i915->irq_lock);
}

static void
vlv_primary_enable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;

	spin_lock_irq(&i915->irq_lock);
	i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
	spin_unlock_irq(&i915->irq_lock);
}

static void
vlv_primary_disable_flip_done(struct intel_plane *plane)
{
	struct drm_i915_private *i915 = to_i915(plane->base.dev);
	enum pipe pipe = plane->pipe;

	spin_lock_irq(&i915->irq_lock);
	i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV);
	spin_unlock_irq(&i915->irq_lock);
}

static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
				    enum pipe *pipe)
{
@@ -523,21 +653,56 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
	return ret;
}

unsigned int
i9xx_plane_max_stride(struct intel_plane *plane,
static unsigned int
hsw_primary_max_stride(struct intel_plane *plane,
		       u32 pixel_format, u64 modifier,
		       unsigned int rotation)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
	const struct drm_format_info *info = drm_format_info(pixel_format);
	int cpp = info->cpp[0];

	/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
	return min(8192 * cpp, 32 * 1024);
}

static unsigned int
ilk_primary_max_stride(struct intel_plane *plane,
		       u32 pixel_format, u64 modifier,
		       unsigned int rotation)
{
	const struct drm_format_info *info = drm_format_info(pixel_format);
	int cpp = info->cpp[0];

	if (!HAS_GMCH(dev_priv)) {
	/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
	if (modifier == I915_FORMAT_MOD_X_TILED)
		return min(4096 * cpp, 32 * 1024);
	else
		return 32 * 1024;
	} else if (INTEL_GEN(dev_priv) >= 4) {
}

unsigned int
i965_plane_max_stride(struct intel_plane *plane,
		      u32 pixel_format, u64 modifier,
		      unsigned int rotation)
{
	const struct drm_format_info *info = drm_format_info(pixel_format);
	int cpp = info->cpp[0];

	/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
	if (modifier == I915_FORMAT_MOD_X_TILED)
			return 16*1024;
		return min(4096 * cpp, 16 * 1024);
	else
		return 32 * 1024;
	} else if (INTEL_GEN(dev_priv) >= 3) {
}

static unsigned int
i9xx_plane_max_stride(struct intel_plane *plane,
		      u32 pixel_format, u64 modifier,
		      unsigned int rotation)
{
	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);

	if (INTEL_GEN(dev_priv) >= 3) {
		if (modifier == I915_FORMAT_MOD_X_TILED)
			return 8*1024;
		else
@@ -649,12 +814,42 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
	else
		plane->min_cdclk = i9xx_plane_min_cdclk;

	if (HAS_GMCH(dev_priv)) {
		if (INTEL_GEN(dev_priv) >= 4)
			plane->max_stride = i965_plane_max_stride;
		else
			plane->max_stride = i9xx_plane_max_stride;
	} else {
		if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
			plane->max_stride = hsw_primary_max_stride;
		else
			plane->max_stride = ilk_primary_max_stride;
	}

	plane->update_plane = i9xx_update_plane;
	plane->disable_plane = i9xx_disable_plane;
	plane->get_hw_state = i9xx_plane_get_hw_state;
	plane->check_plane = i9xx_plane_check;

	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
		plane->async_flip = vlv_primary_async_flip;
		plane->enable_flip_done = vlv_primary_enable_flip_done;
		plane->disable_flip_done = vlv_primary_disable_flip_done;
	} else if (IS_BROADWELL(dev_priv)) {
		plane->need_async_flip_disable_wa = true;
		plane->async_flip = g4x_primary_async_flip;
		plane->enable_flip_done = bdw_primary_enable_flip_done;
		plane->disable_flip_done = bdw_primary_disable_flip_done;
	} else if (INTEL_GEN(dev_priv) >= 7) {
		plane->async_flip = g4x_primary_async_flip;
		plane->enable_flip_done = ivb_primary_enable_flip_done;
		plane->disable_flip_done = ivb_primary_disable_flip_done;
	} else if (INTEL_GEN(dev_priv) >= 5) {
		plane->async_flip = g4x_primary_async_flip;
		plane->enable_flip_done = ilk_primary_enable_flip_done;
		plane->disable_flip_done = ilk_primary_disable_flip_done;
	}

	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
		ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
					       0, plane_funcs,
+1 −1
Original line number Diff line number Diff line
@@ -13,7 +13,7 @@ struct drm_i915_private;
struct intel_plane;
struct intel_plane_state;

unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
unsigned int i965_plane_max_stride(struct intel_plane *plane,
				   u32 pixel_format, u64 modifier,
				   unsigned int rotation);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+8 −72
Original line number Diff line number Diff line
@@ -20,76 +20,9 @@ struct intel_qgv_point {
struct intel_qgv_info {
	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
	u8 num_points;
	u8 num_channels;
	u8 t_bl;
	enum intel_dram_type dram_type;
};

static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
					  struct intel_qgv_info *qi)
{
	u32 val = 0;
	int ret;

	ret = sandybridge_pcode_read(dev_priv,
				     ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
				     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO,
				     &val, NULL);
	if (ret)
		return ret;

	if (IS_GEN(dev_priv, 12)) {
		switch (val & 0xf) {
		case 0:
			qi->dram_type = INTEL_DRAM_DDR4;
			break;
		case 3:
			qi->dram_type = INTEL_DRAM_LPDDR4;
			break;
		case 4:
			qi->dram_type = INTEL_DRAM_DDR3;
			break;
		case 5:
			qi->dram_type = INTEL_DRAM_LPDDR3;
			break;
		default:
			MISSING_CASE(val & 0xf);
			break;
		}
	} else if (IS_GEN(dev_priv, 11)) {
		switch (val & 0xf) {
		case 0:
			qi->dram_type = INTEL_DRAM_DDR4;
			break;
		case 1:
			qi->dram_type = INTEL_DRAM_DDR3;
			break;
		case 2:
			qi->dram_type = INTEL_DRAM_LPDDR3;
			break;
		case 3:
			qi->dram_type = INTEL_DRAM_LPDDR4;
			break;
		default:
			MISSING_CASE(val & 0xf);
			break;
		}
	} else {
		MISSING_CASE(INTEL_GEN(dev_priv));
		qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */
	}

	qi->num_channels = (val & 0xf0) >> 4;
	qi->num_points = (val & 0xf00) >> 8;

	if (IS_GEN(dev_priv, 12))
		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16;
	else if (IS_GEN(dev_priv, 11))
		qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8;

	return 0;
}

static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
					 struct intel_qgv_point *sp,
					 int point)
@@ -139,11 +72,15 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
			      struct intel_qgv_info *qi)
{
	const struct dram_info *dram_info = &dev_priv->dram_info;
	int i, ret;

	ret = icl_pcode_read_mem_global_info(dev_priv, qi);
	if (ret)
		return ret;
	qi->num_points = dram_info->num_qgv_points;

	if (IS_GEN(dev_priv, 12))
		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 16;
	else if (IS_GEN(dev_priv, 11))
		qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8;

	if (drm_WARN_ON(&dev_priv->drm,
			qi->num_points > ARRAY_SIZE(qi->points)))
@@ -209,7 +146,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
{
	struct intel_qgv_info qi = {};
	bool is_y_tile = true; /* assume y tile may be used */
	int num_channels;
	int num_channels = dev_priv->dram_info.num_channels;
	int deinterleave;
	int ipqdepth, ipqdepthpch;
	int dclk_max;
@@ -222,7 +159,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
			    "Failed to get memory subsystem information, ignoring bandwidth limits");
		return ret;
	}
	num_channels = qi.num_channels;

	deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
	dclk_max = icl_sagv_max_dclk(&qi);
+13 −3
Original line number Diff line number Diff line
@@ -1317,6 +1317,11 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
		return 0;
}

static bool has_async_flips(struct drm_i915_private *i915)
{
	return INTEL_GEN(i915) >= 5;
}

static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
					 int color_plane)
{
@@ -1331,7 +1336,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
	case DRM_FORMAT_MOD_LINEAR:
		return intel_linear_alignment(dev_priv);
	case I915_FORMAT_MOD_X_TILED:
		if (INTEL_GEN(dev_priv) >= 9)
		if (has_async_flips(dev_priv))
			return 256 * 1024;
		return 0;
	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
@@ -3086,6 +3091,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
		}
	}

	drm_WARN_ON(&dev_priv->drm, x > 8191 || y > 8191);

	plane_state->color_plane[0].offset = offset;
	plane_state->color_plane[0].x = x;
	plane_state->color_plane[0].y = y;
@@ -3158,6 +3165,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
		}
	}

	drm_WARN_ON(&i915->drm, x > 8191 || y > 8191);

	plane_state->color_plane[uv_plane].offset = offset;
	plane_state->color_plane[uv_plane].x = x;
	plane_state->color_plane[uv_plane].y = y;
@@ -3577,6 +3586,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
	} else if (fb->format->is_yuv) {
		plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
		if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
			plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
	}

	return plane_color_ctl;
@@ -14771,8 +14782,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)

	mode_config->funcs = &intel_mode_funcs;

	if (INTEL_GEN(i915) >= 9)
		mode_config->async_page_flip = true;
	mode_config->async_page_flip = has_async_flips(i915);

	/*
	 * Maximum framebuffer dimensions, chosen to match
Loading