Commit beaa71d6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2023-08-25' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "A bit bigger than I'd care for, but it's mostly a single vmwgfx fix
  and a fix for an i915 hotplug probing. Otherwise misc i915, bridge,
  panfrost and dma-buf fixes.

  core:
   - add a HPD poll helper

  i915:
   - fix regression in i915 polling
   - fix docs build warning
   - fix DG2 idle power consumption

  bridge:
   - samsung-dsim: init fix

  panfrost:
   - fix speed binning issue

  dma-buf:
   - fix recursive lock in fence signal

  vmwgfx:
   - fix shader stage validation
   - fix NULL ptr derefs in gem put"

* tag 'drm-fixes-2023-08-25' of git://anongit.freedesktop.org/drm/drm:
  drm/i915: Fix HPD polling, reenabling the output poll work as needed
  drm: Add an HPD poll helper to reschedule the poll work
  drm/vmwgfx: Fix possible invalid drm gem put calls
  drm/vmwgfx: Fix shader stage validation
  dma-buf/sw_sync: Avoid recursive lock during fence signal
  drm/i915: fix Sphinx indentation warning
  drm/i915/dgfx: Enable d3cold at s2idle
  drm/display/dp: Fix the DP DSC Receiver cap size
  drm/panfrost: Skip speed binning on EOPNOTSUPP
  drm: bridge: samsung-dsim: Fix init during host transfer
parents 4f9e7fab 59fe2029
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
 */
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
{
	LIST_HEAD(signalled);
	struct sync_pt *pt, *next;

	trace_sync_timeline(obj);
@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
		if (!timeline_fence_signaled(&pt->base))
			break;

		list_del_init(&pt->link);
		dma_fence_get(&pt->base);

		list_move_tail(&pt->link, &signalled);
		rb_erase(&pt->node, &obj->pt_tree);

		/*
		 * A signal callback may release the last reference to this
		 * fence, causing it to be freed. That operation has to be
		 * last to avoid a use after free inside this loop, and must
		 * be after we remove the fence from the timeline in order to
		 * prevent deadlocking on timeline->lock inside
		 * timeline_fence_release().
		 */
		dma_fence_signal_locked(&pt->base);
	}

	spin_unlock_irq(&obj->lock);

	list_for_each_entry_safe(pt, next, &signalled, link) {
		list_del_init(&pt->link);
		dma_fence_put(&pt->base);
	}
}

/**
+17 −10
Original line number Diff line number Diff line
@@ -1386,6 +1386,18 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
	disable_irq(dsi->irq);
}

static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
{
	u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);

	if (enable)
		reg |= DSIM_FORCE_STOP_STATE;
	else
		reg &= ~DSIM_FORCE_STOP_STATE;

	samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
}

static int samsung_dsim_init(struct samsung_dsim *dsi)
{
	const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -1445,15 +1457,12 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
				       struct drm_bridge_state *old_bridge_state)
{
	struct samsung_dsim *dsi = bridge_to_dsi(bridge);
	u32 reg;

	if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
		samsung_dsim_set_display_mode(dsi);
		samsung_dsim_set_display_enable(dsi, true);
	} else {
		reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
		reg &= ~DSIM_FORCE_STOP_STATE;
		samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
		samsung_dsim_set_stop_state(dsi, false);
	}

	dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
@@ -1463,16 +1472,12 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
					struct drm_bridge_state *old_bridge_state)
{
	struct samsung_dsim *dsi = bridge_to_dsi(bridge);
	u32 reg;

	if (!(dsi->state & DSIM_STATE_ENABLED))
		return;

	if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
		reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
		reg |= DSIM_FORCE_STOP_STATE;
		samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
	}
	if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
		samsung_dsim_set_stop_state(dsi, true);

	dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
}
@@ -1775,6 +1780,8 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
	if (ret)
		return ret;

	samsung_dsim_set_stop_state(dsi, false);

	ret = mipi_dsi_create_packet(&xfer.packet, msg);
	if (ret < 0)
		return ret;
+46 −22
Original line number Diff line number Diff line
@@ -262,6 +262,26 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
}

#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void reschedule_output_poll_work(struct drm_device *dev)
{
	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;

	if (dev->mode_config.delayed_event)
		/*
		 * FIXME:
		 *
		 * Use short (1s) delay to handle the initial delayed event.
		 * This delay should not be needed, but Optimus/nouveau will
		 * fail in a mysterious way if the delayed event is handled as
		 * soon as possible like it is done in
		 * drm_helper_probe_single_connector_modes() in case the poll
		 * was enabled before.
		 */
		delay = HZ;

	schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
}

/**
 * drm_kms_helper_poll_enable - re-enable output polling.
 * @dev: drm_device
@@ -279,37 +299,41 @@ static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
 */
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
	bool poll = false;
	unsigned long delay = DRM_OUTPUT_POLL_PERIOD;

	if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
	    dev->mode_config.poll_running)
		return;

	poll = drm_kms_helper_enable_hpd(dev);

	if (dev->mode_config.delayed_event) {
		/*
		 * FIXME:
		 *
		 * Use short (1s) delay to handle the initial delayed event.
		 * This delay should not be needed, but Optimus/nouveau will
		 * fail in a mysterious way if the delayed event is handled as
		 * soon as possible like it is done in
		 * drm_helper_probe_single_connector_modes() in case the poll
		 * was enabled before.
		 */
		poll = true;
		delay = HZ;
	}

	if (poll)
		schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
	if (drm_kms_helper_enable_hpd(dev) ||
	    dev->mode_config.delayed_event)
		reschedule_output_poll_work(dev);

	dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);

/**
 * drm_kms_helper_poll_reschedule - reschedule the output polling work
 * @dev: drm_device
 *
 * This function reschedules the output polling work, after polling for a
 * connector has been enabled.
 *
 * Drivers must call this helper after enabling polling for a connector by
 * setting %DRM_CONNECTOR_POLL_CONNECT / %DRM_CONNECTOR_POLL_DISCONNECT flags
 * in drm_connector::polled. Note that after disabling polling by clearing these
 * flags for a connector will stop the output polling work automatically if
 * the polling is disabled for all other connectors as well.
 *
 * The function can be called only after polling has been enabled by calling
 * drm_kms_helper_poll_init() / drm_kms_helper_poll_enable().
 */
void drm_kms_helper_poll_reschedule(struct drm_device *dev)
{
	if (dev->mode_config.poll_running)
		reschedule_output_poll_work(dev);
}
EXPORT_SYMBOL(drm_kms_helper_poll_reschedule);

static enum drm_connector_status
drm_helper_probe_detect_ctx(struct drm_connector *connector, bool force)
{
+2 −2
Original line number Diff line number Diff line
@@ -211,7 +211,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)

	/* Enable polling and queue hotplug re-enabling. */
	if (hpd_disabled) {
		drm_kms_helper_poll_enable(&dev_priv->drm);
		drm_kms_helper_poll_reschedule(&dev_priv->drm);
		mod_delayed_work(dev_priv->unordered_wq,
				 &dev_priv->display.hotplug.reenable_work,
				 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
@@ -649,7 +649,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
	drm_connector_list_iter_end(&conn_iter);

	if (enabled)
		drm_kms_helper_poll_enable(&dev_priv->drm);
		drm_kms_helper_poll_reschedule(&dev_priv->drm);

	mutex_unlock(&dev_priv->drm.mode_config.mutex);

+2 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@
 * The kernel driver is only responsible for loading the HuC firmware and
 * triggering its security authentication. This is done differently depending
 * on the platform:
 *
 * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA
 *   and the authentication via GuC
 * - DG2: load and authentication are both performed via GSC.
@@ -33,6 +34,7 @@
 *   not-DG2 older platforms), while the authentication is done in 2-steps,
 *   a first auth for clear-media workloads via GuC and a second one for all
 *   workloads via GSC.
 *
 * On platforms where the GuC does the authentication, to correctly do so the
 * HuC binary must be loaded before the GuC one.
 * Loading the HuC is optional; however, not using the HuC might negatively
Loading