Commit e66c8dcf authored by Anshuman Gupta's avatar Anshuman Gupta
Browse files

drm/i915: Encapsulate lmem rpm stuff in intel_runtime_pm



Runtime pm is not really per GT, therefore it make sense to
move lmem_userfault_list, lmem_userfault_lock and
userfault_wakeref from intel_gt to intel_runtime_pm structure,
which is embedded to i915.

No functional change.

v2:
- Fixes the code comment nit. [Matt Auld]

Signed-off-by: default avatarAnshuman Gupta <anshuman.gupta@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Acked-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221027092242.1476080-2-anshuman.gupta@intel.com
parent 876e9047
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -413,7 +413,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
	vma->mmo = mmo;

	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
		intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));

	if (write) {
@@ -589,9 +589,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
	spin_unlock(&obj->mmo.lock);

	if (obj->userfault_count) {
		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		list_del(&obj->userfault_link);
		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		obj->userfault_count = 0;
	}
}
+1 −1
Original line number Diff line number Diff line
@@ -27,7 +27,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)

	GEM_TRACE("%s\n", dev_name(i915->drm.dev));

	intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
	intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref, 0);
	flush_workqueue(i915->wq);

	/*
+4 −4
Original line number Diff line number Diff line
@@ -1101,13 +1101,13 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
	/* ttm_bo_vm_reserve() already has dma_resv_lock */
	if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
		obj->userfault_count = 1;
		mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
		list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
		mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
		mutex_lock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
		list_add(&obj->userfault_link, &to_i915(obj->base.dev)->runtime_pm.lmem_userfault_list);
		mutex_unlock(&to_i915(obj->base.dev)->runtime_pm.lmem_userfault_lock);
	}

	if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
		intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
		intel_wakeref_auto(&to_i915(obj->base.dev)->runtime_pm.userfault_wakeref,
				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));

	i915_ttm_adjust_lru(obj);
+0 −3
Original line number Diff line number Diff line
@@ -40,8 +40,6 @@ void intel_gt_common_init_early(struct intel_gt *gt)
{
	spin_lock_init(gt->irq_lock);

	INIT_LIST_HEAD(&gt->lmem_userfault_list);
	mutex_init(&gt->lmem_userfault_lock);
	INIT_LIST_HEAD(&gt->closed_vma);
	spin_lock_init(&gt->closed_lock);

@@ -859,7 +857,6 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
	}

	intel_uncore_init_early(gt->uncore, gt);
	intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);

	ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
	if (ret)
+0 −17
Original line number Diff line number Diff line
@@ -145,20 +145,6 @@ struct intel_gt {
	struct intel_wakeref wakeref;
	atomic_t user_wakeref;

	/**
	 *  Protects access to lmem usefault list.
	 *  It is required, if we are outside of the runtime suspend path,
	 *  access to @lmem_userfault_list requires always first grabbing the
	 *  runtime pm, to ensure we can't race against runtime suspend.
	 *  Once we have that we also need to grab @lmem_userfault_lock,
	 *  at which point we have exclusive access.
	 *  The runtime suspend path is special since it doesn't really hold any locks,
	 *  but instead has exclusive access by virtue of all other accesses requiring
	 *  holding the runtime pm wakeref.
	 */
	struct mutex lmem_userfault_lock;
	struct list_head lmem_userfault_list;

	struct list_head closed_vma;
	spinlock_t closed_lock; /* guards the list of closed_vma */

@@ -174,9 +160,6 @@ struct intel_gt {
	 */
	intel_wakeref_t awake;

	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
	struct intel_wakeref_auto userfault_wakeref;

	u32 clock_frequency;
	u32 clock_period_ns;

Loading