Commit abd2f577 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter
Browse files

drm/i915: Flatten obj->mm.lock



With userptr fixed, there is no need for all separate lockdep classes
now, and we can remove all lockdep tricks used. A trylock in the
shrinker is all we need now to flatten the locking hierarchy.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
[danvet: Resolve conflict because we don't have the patch from Chris
to rebrand i915_gem_shrinker_taints_mutex to fs_reclaim_taints_mutex.
It's not a bad idea, but if we do it, it should be moved to the right
header. See
https://lore.kernel.org/intel-gfx/20210202154318.19246-1-chris@chris-wilson.co.uk/

]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-18-maarten.lankhorst@linux.intel.com
parent ed29c269
Loading
Loading
Loading
Loading
+1 −5
Original line number Diff line number Diff line
@@ -62,7 +62,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops,
			  struct lock_class_key *key, unsigned flags)
{
	__mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key);
	mutex_init(&obj->mm.lock);

	spin_lock_init(&obj->vma.lock);
	INIT_LIST_HEAD(&obj->vma.list);
@@ -86,10 +86,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
	mutex_init(&obj->mm.get_page.lock);
	INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_dma_page.lock);

	if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
		i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
					       &obj->mm.lock);
}

/**
+2 −18
Original line number Diff line number Diff line
@@ -346,27 +346,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
	I915_MM_NORMAL = 0,
	/*
	 * Only used by struct_mutex, when called "recursively" from
	 * direct-reclaim-esque. Safe because there is only every one
	 * struct_mutex in the entire system.
	 */
	I915_MM_SHRINKER = 1,
	/*
	 * Used for obj->mm.lock when allocating pages. Safe because the object
	 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
	 * it. As soon as the object has pages, obj->mm.lock nests within
	 * fs_reclaim.
	 */
	I915_MM_GET_PAGES = 1,
};

static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
	might_lock(&obj->mm.lock);

	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
		return 0;
@@ -410,6 +393,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);

+17 −17
Original line number Diff line number Diff line
@@ -114,7 +114,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	int err;

	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;

@@ -196,21 +196,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
	return pages;
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;
	int err;

	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	/* May be called by shrinker from within get_pages() (on another bo) */
	mutex_lock(&obj->mm.lock);
	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
		err = -EBUSY;
		goto unlock;
	}

	i915_gem_object_release_mmap_offset(obj);

	/*
@@ -226,14 +218,22 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
	 * get_pages backends we should be better able to handle the
	 * cancellation of the async task in a more uniform manner.
	 */
	if (!pages)
		pages = ERR_PTR(-EINVAL);

	if (!IS_ERR(pages))
	if (!IS_ERR_OR_NULL(pages))
		obj->ops->put_pages(obj, pages);

	err = 0;
unlock:
	return 0;
}

int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
	int err;

	if (i915_gem_object_has_pinned_pages(obj))
		return -EBUSY;

	/* May be called by shrinker from within get_pages() (on another bo) */
	mutex_lock(&obj->mm.lock);
	err = __i915_gem_object_put_pages_locked(obj);
	mutex_unlock(&obj->mm.lock);

	return err;
@@ -341,7 +341,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
	    !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
		return ERR_PTR(-ENXIO);

	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return ERR_PTR(err);

+1 −1
Original line number Diff line number Diff line
@@ -236,7 +236,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
	if (err)
		return err;

	err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto err_unlock;

+5 −5
Original line number Diff line number Diff line
@@ -49,9 +49,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
		flags = I915_GEM_OBJECT_UNBIND_TEST;

	if (i915_gem_object_unbind(obj, flags) == 0)
		__i915_gem_object_put_pages(obj);
		return true;

	return !i915_gem_object_has_pages(obj);
	return false;
}

static void try_to_writeback(struct drm_i915_gem_object *obj,
@@ -200,10 +200,10 @@ i915_gem_shrink(struct drm_i915_private *i915,

			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);

			if (unsafe_drop_pages(obj, shrink)) {
			if (unsafe_drop_pages(obj, shrink) &&
			    mutex_trylock(&obj->mm.lock)) {
				/* May arrive from get_pages on another bo */
				mutex_lock(&obj->mm.lock);
				if (!i915_gem_object_has_pages(obj)) {
				if (!__i915_gem_object_put_pages_locked(obj)) {
					try_to_writeback(obj, shrink);
					count += obj->base.size >> PAGE_SHIFT;
				}
Loading