Commit 8b419482 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2023-01-01' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Daniel Vetter:
 "I'm just back from the mountains, and Dave is out at the beach and
  should be back in a week again. Just i915 fixes and since Rodrigo
  bothered to make the pull last week I figured I should warm up gpg and
  forward this in a nice signed tag as a new years present!

   - i915 fixes for newer platforms

   - i915 locking rework to not give up in vm eviction fallback path too
     early"

* tag 'drm-fixes-2023-01-01' of git://anongit.freedesktop.org/drm/drm:
  drm/i915/dsi: fix MIPI_BKLT_EN_1 native GPIO index
  drm/i915/dsi: add support for ICL+ native MIPI GPIO sequence
  drm/i915/uc: Fix two issues with over-size firmware files
  drm/i915: improve the catch-all evict to handle lock contention
  drm/i915: Remove __maybe_unused from mtl_info
  drm/i915: fix TLB invalidation for Gen12.50 video and compute engines
parents e4cf7c25 a9f5a752
Loading
Loading
Loading
Loading
+91 −3
Original line number Diff line number Diff line
@@ -41,9 +41,11 @@

#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_gmbus_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_regs.h"
#include "vlv_sideband.h"
@@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector,
	drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}

enum {
	MIPI_RESET_1 = 0,
	MIPI_AVDD_EN_1,
	MIPI_BKLT_EN_1,
	MIPI_AVEE_EN_1,
	MIPI_VIO_EN_1,
	MIPI_RESET_2,
	MIPI_AVDD_EN_2,
	MIPI_BKLT_EN_2,
	MIPI_AVEE_EN_2,
	MIPI_VIO_EN_2,
};

static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
				      int gpio, bool value)
{
	int index;

	if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
		return;

	switch (gpio) {
	case MIPI_RESET_1:
	case MIPI_RESET_2:
		index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;

		/*
		 * Disable HPD to set the pin to output, and set output
		 * value. The HPD pin should not be enabled for DSI anyway,
		 * assuming the board design and VBT are sane, and the pin isn't
		 * used by a non-DSI encoder.
		 *
		 * The locking protects against concurrent SHOTPLUG_CTL_DDI
		 * modifications in irq setup and handling.
		 */
		spin_lock_irq(&dev_priv->irq_lock);
		intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
			     SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
			     SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
			     value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
		spin_unlock_irq(&dev_priv->irq_lock);
		break;
	case MIPI_AVDD_EN_1:
	case MIPI_AVDD_EN_2:
		index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;

		intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
			     value ? PANEL_POWER_ON : 0);
		break;
	case MIPI_BKLT_EN_1:
	case MIPI_BKLT_EN_2:
		index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;

		intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
			     value ? EDP_BLC_ENABLE : 0);
		break;
	case MIPI_AVEE_EN_1:
	case MIPI_AVEE_EN_2:
		index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;

		intel_de_rmw(dev_priv, GPIO(dev_priv, index),
			     GPIO_CLOCK_VAL_OUT,
			     GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
			     GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
		break;
	case MIPI_VIO_EN_1:
	case MIPI_VIO_EN_2:
		index = gpio == MIPI_VIO_EN_1 ? 1 : 2;

		intel_de_rmw(dev_priv, GPIO(dev_priv, index),
			     GPIO_DATA_VAL_OUT,
			     GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
			     GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
		break;
	default:
		MISSING_CASE(gpio);
	}
}

static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
	struct drm_device *dev = intel_dsi->base.base.dev;
@@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
	struct intel_connector *connector = intel_dsi->attached_connector;
	u8 gpio_source, gpio_index = 0, gpio_number;
	bool value;

	drm_dbg_kms(&dev_priv->drm, "\n");
	bool native = DISPLAY_VER(dev_priv) >= 11;

	if (connector->panel.vbt.dsi.seq_version >= 3)
		gpio_index = *data++;
@@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
	else
		gpio_source = 0;

	if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
		native = false;

	/* pull up/down */
	value = *data++ & 1;

	if (DISPLAY_VER(dev_priv) >= 11)
	drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
		    gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));

	if (native)
		icl_native_gpio_set_value(dev_priv, gpio_number, value);
	else if (DISPLAY_VER(dev_priv) >= 11)
		icl_exec_gpio(connector, gpio_source, gpio_index, value);
	else if (IS_VALLEYVIEW(dev_priv))
		vlv_exec_gpio(connector, gpio_source, gpio_number, value);
+48 −11
Original line number Diff line number Diff line
@@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
	bool unpinned;

	/*
	 * Attempt to pin all of the buffers into the GTT.
	 * This is done in 2 phases:
	 * We have one more buffers that we couldn't bind, which could be due to
	 * various reasons. To resolve this we have 4 passes, with every next
	 * level turning the screws tighter:
	 *
	 * 1. Unbind all objects that do not match the GTT constraints for
	 *    the execbuffer (fenceable, mappable, alignment etc).
	 * 2. Bind new objects.
	 * 0. Unbind all objects that do not match the GTT constraints for the
	 * execbuffer (fenceable, mappable, alignment etc). Bind all new
	 * objects.  This avoids unnecessary unbinding of later objects in order
	 * to make room for the earlier objects *unless* we need to defragment.
	 *
	 * This avoid unnecessary unbinding of later objects in order to make
	 * room for the earlier objects *unless* we need to defragment.
	 * 1. Reorder the buffers, where objects with the most restrictive
	 * placement requirements go first (ignoring fixed location buffers for
	 * now).  For example, objects needing the mappable aperture (the first
	 * 256M of GTT), should go first vs objects that can be placed just
	 * about anywhere. Repeat the previous pass.
	 *
	 * Defragmenting is skipped if all objects are pinned at a fixed location.
	 * 2. Consider buffers that are pinned at a fixed location. Also try to
	 * evict the entire VM this time, leaving only objects that we were
	 * unable to lock. Try again to bind the buffers. (still using the new
	 * buffer order).
	 *
	 * 3. We likely have object lock contention for one or more stubborn
	 * objects in the VM, for which we need to evict to make forward
	 * progress (perhaps we are fighting the shrinker?). When evicting the
	 * VM this time around, anything that we can't lock we now track using
	 * the busy_bo, using the full lock (after dropping the vm->mutex to
	 * prevent deadlocks), instead of trylock. We then continue to evict the
	 * VM, this time with the stubborn object locked, which we can now
	 * hopefully unbind (if still bound in the VM). Repeat until the VM is
	 * evicted. Finally we should be able bind everything.
	 */
	for (pass = 0; pass <= 2; pass++) {
	for (pass = 0; pass <= 3; pass++) {
		int pin_flags = PIN_USER | PIN_VALIDATE;

		if (pass == 0)
			pin_flags |= PIN_NONBLOCK;

		if (pass >= 1)
			unpinned = eb_unbind(eb, pass == 2);
			unpinned = eb_unbind(eb, pass >= 2);

		if (pass == 2) {
			err = mutex_lock_interruptible(&eb->context->vm->mutex);
			if (!err) {
				err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
				err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
				mutex_unlock(&eb->context->vm->mutex);
			}
			if (err)
				return err;
		}

		if (pass == 3) {
retry:
			err = mutex_lock_interruptible(&eb->context->vm->mutex);
			if (!err) {
				struct drm_i915_gem_object *busy_bo = NULL;

				err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
				mutex_unlock(&eb->context->vm->mutex);
				if (err && busy_bo) {
					err = i915_gem_object_lock(busy_bo, &eb->ww);
					i915_gem_object_put(busy_bo);
					if (!err)
						goto retry;
				}
			}
			if (err)
				return err;
+1 −1
Original line number Diff line number Diff line
@@ -369,7 +369,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
		if (vma == ERR_PTR(-ENOSPC)) {
			ret = mutex_lock_interruptible(&ggtt->vm.mutex);
			if (!ret) {
				ret = i915_gem_evict_vm(&ggtt->vm, &ww);
				ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
				mutex_unlock(&ggtt->vm.mutex);
			}
			if (ret)
+7 −1
Original line number Diff line number Diff line
@@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt)
			continue;

		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
			u32 val = BIT(engine->instance);

			if (engine->class == VIDEO_DECODE_CLASS ||
			    engine->class == VIDEO_ENHANCEMENT_CLASS ||
			    engine->class == COMPUTE_CLASS)
				val = _MASKED_BIT_ENABLE(val);
			intel_gt_mcr_multicast_write_fw(gt,
							xehp_regs[engine->class],
							BIT(engine->instance));
							val);
		} else {
			rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
			if (!i915_mmio_reg_offset(rb.reg))
+28 −14
Original line number Diff line number Diff line
@@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt,
	return 0;
}

static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
{
	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
	struct device *dev = gt->i915->drm.dev;
	int err;

	err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);

	if (err)
		return err;

	if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
		drm_err(&gt->i915->drm,
			"%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
			intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
			(*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);

		/* try to find another blob to load */
		release_firmware(*fw);
		*fw = NULL;
		return -ENOENT;
	}

	return 0;
}

/**
 * intel_uc_fw_fetch - fetch uC firmware
 * @uc_fw: uC firmware
@@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
	struct drm_i915_private *i915 = gt->i915;
	struct intel_uc_fw_file file_ideal;
	struct device *dev = i915->drm.dev;
	struct drm_i915_gem_object *obj;
	const struct firmware *fw = NULL;
	bool old_ver = false;
@@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
	__force_fw_fetch_failures(uc_fw, -EINVAL);
	__force_fw_fetch_failures(uc_fw, -ESTALE);

	err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
	err = try_firmware_load(uc_fw, &fw);
	memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));

	if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
		drm_err(&i915->drm,
			"%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
			intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
			fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);

		/* try to find another blob to load */
		release_firmware(fw);
		err = -ENOENT;
	}

	/* Any error is terminal if overriding. Don't bother searching for older versions */
	if (err && intel_uc_fw_is_overridden(uc_fw))
		goto fail;
@@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
			break;
		}

		err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
		err = try_firmware_load(uc_fw, &fw);
	}

	if (err)
Loading