Commit f569ae75 authored by Tvrtko Ursulin's avatar Tvrtko Ursulin Committed by Matt Roper
Browse files

drm/i915: Handle all GTs on driver (un)load paths



This, along with the changes already landed in commit 1c66a12a
("drm/i915: Handle each GT on init/release and suspend/resume") makes
engines from all GTs actually known to the driver.

To accomplish this we need to sprinkle a lot of for_each_gt calls around
but is otherwise pretty un-eventuful.

v2:
 - Consolidate adjacent GT loops in a couple places.  (Daniele)

Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220915232654.3283095-5-matthew.d.roper@intel.com
parent 4b3823ff
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -1647,7 +1647,8 @@ static int intel_runtime_suspend(struct device *kdev)

		intel_runtime_pm_enable_interrupts(dev_priv);

		intel_gt_runtime_resume(to_gt(dev_priv));
		for_each_gt(gt, dev_priv, i)
			intel_gt_runtime_resume(gt);

		enable_rpm_wakeref_asserts(rpm);

+31 −12
Original line number Diff line number Diff line
@@ -1091,6 +1091,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,

int i915_gem_init(struct drm_i915_private *dev_priv)
{
	struct intel_gt *gt;
	unsigned int i;
	int ret;

	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
@@ -1122,9 +1124,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
	 */
	intel_init_clock_gating(dev_priv);

	ret = intel_gt_init(to_gt(dev_priv));
	for_each_gt(gt, dev_priv, i) {
		ret = intel_gt_init(gt);
		if (ret)
			goto err_unlock;
	}

	return 0;

@@ -1137,8 +1141,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_unlock:
	i915_gem_drain_workqueue(dev_priv);

	if (ret != -EIO)
		intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
	if (ret != -EIO) {
		for_each_gt(gt, dev_priv, i) {
			intel_gt_driver_remove(gt);
			intel_gt_driver_release(gt);
			intel_uc_cleanup_firmwares(&gt->uc);
		}
	}

	if (ret == -EIO) {
		/*
@@ -1146,10 +1155,12 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
		 * as wedged. But we only want to do this when the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		if (!intel_gt_is_wedged(to_gt(dev_priv))) {
		for_each_gt(gt, dev_priv, i) {
			if (!intel_gt_is_wedged(gt)) {
				i915_probe_error(dev_priv,
						 "Failed to initialize GPU, declaring it wedged!\n");
			intel_gt_set_wedged(to_gt(dev_priv));
				intel_gt_set_wedged(gt);
			}
		}

		/* Minimal basic recovery for KMS */
@@ -1177,10 +1188,14 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915)

void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
{
	struct intel_gt *gt;
	unsigned int i;

	intel_wakeref_auto_fini(&to_gt(dev_priv)->userfault_wakeref);

	i915_gem_suspend_late(dev_priv);
	intel_gt_driver_remove(to_gt(dev_priv));
	for_each_gt(gt, dev_priv, i)
		intel_gt_driver_remove(gt);
	dev_priv->uabi_engines = RB_ROOT;

	/* Flush any outstanding unpin_work. */
@@ -1191,9 +1206,13 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)

void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
	intel_gt_driver_release(to_gt(dev_priv));
	struct intel_gt *gt;
	unsigned int i;

	intel_uc_cleanup_firmwares(&to_gt(dev_priv)->uc);
	for_each_gt(gt, dev_priv, i) {
		intel_gt_driver_release(gt);
		intel_uc_cleanup_firmwares(&gt->uc);
	}

	i915_gem_drain_freed_objects(dev_priv);