Commit 599089c6 authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm/gem: Move locking in shrinker path



Move grabbing the bo lock into shrinker, with a msm_gem_trylock() to
skip over bo's that are already locked.  This gets rid of the nested
lock classes.

Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarKristian H. Kristensen <hoegsberg@google.com>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent e4b87d22
Loading
Loading
Loading
Loading
+5 −19
Original line number Diff line number Diff line
@@ -18,8 +18,6 @@
#include "msm_gpu.h"
#include "msm_mmu.h"

static void msm_gem_vunmap_locked(struct drm_gem_object *obj);


static dma_addr_t physaddr(struct drm_gem_object *obj)
{
@@ -692,20 +690,19 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
	return (madv != __MSM_MADV_PURGED);
}

void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
void msm_gem_purge(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
	WARN_ON(!msm_gem_is_locked(obj));
	WARN_ON(!is_purgeable(msm_obj));
	WARN_ON(obj->import_attach);

	mutex_lock_nested(&msm_obj->lock, subclass);

	put_iova(obj);

	msm_gem_vunmap_locked(obj);
	msm_gem_vunmap(obj);

	put_pages(obj);

@@ -723,11 +720,9 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)

	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);

	msm_gem_unlock(obj);
}

static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
void msm_gem_vunmap(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

@@ -740,15 +735,6 @@ static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
	msm_obj->vaddr = NULL;
}

void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	msm_gem_unlock(obj);
}

/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
		struct msm_fence_context *fctx, bool exclusive)
@@ -985,7 +971,7 @@ static void free_object(struct msm_gem_object *msm_obj)

		drm_prime_gem_destroy(obj, msm_obj->sgt);
	} else {
		msm_gem_vunmap_locked(obj);
		msm_gem_vunmap(obj);
		put_pages(obj);
	}

+11 −18
Original line number Diff line number Diff line
@@ -162,6 +162,13 @@ msm_gem_lock(struct drm_gem_object *obj)
	mutex_lock(&msm_obj->lock);
}

static inline bool __must_check
msm_gem_trylock(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return mutex_trylock(&msm_obj->lock) == 1;
}

static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
@@ -190,6 +197,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)

static inline bool is_purgeable(struct msm_gem_object *msm_obj)
{
	WARN_ON(!msm_gem_is_locked(&msm_obj->base));
	WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
			!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
@@ -197,27 +205,12 @@ static inline bool is_purgeable(struct msm_gem_object *msm_obj)

static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
{
	WARN_ON(!msm_gem_is_locked(&msm_obj->base));
	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
}

/* The shrinker can be triggered while we hold objA->lock, and need
 * to grab objB->lock to purge it.  Lockdep just sees these as a single
 * class of lock, so we use subclasses to teach it the difference.
 *
 * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
 * OBJ_LOCK_SHRINKER is used by shrinker.
 *
 * It is *essential* that we never go down paths that could trigger the
 * shrinker for a purgable object.  This is ensured by checking that
 * msm_obj->madv == MSM_MADV_WILLNEED.
 */
enum msm_gem_lock {
	OBJ_LOCK_NORMAL,
	OBJ_LOCK_SHRINKER,
};

void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_purge(struct drm_gem_object *obj);
void msm_gem_vunmap(struct drm_gem_object *obj);
void msm_gem_free_work(struct work_struct *work);

/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+19 −8
Original line number Diff line number Diff line
@@ -52,8 +52,11 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
		return 0;

	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
		if (!msm_gem_trylock(&msm_obj->base))
			continue;
		if (is_purgeable(msm_obj))
			count += msm_obj->base.size >> PAGE_SHIFT;
		msm_gem_unlock(&msm_obj->base);
	}

	if (unlock)
@@ -78,10 +81,13 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
		if (freed >= sc->nr_to_scan)
			break;
		if (!msm_gem_trylock(&msm_obj->base))
			continue;
		if (is_purgeable(msm_obj)) {
			msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
			msm_gem_purge(&msm_obj->base);
			freed += msm_obj->base.size >> PAGE_SHIFT;
		}
		msm_gem_unlock(&msm_obj->base);
	}

	if (unlock)
@@ -107,8 +113,14 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
		return NOTIFY_DONE;

	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
		if (!msm_gem_trylock(&msm_obj->base))
			continue;
		if (is_vunmapable(msm_obj)) {
			msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
			msm_gem_vunmap(&msm_obj->base);
			unmapped++;
		}
		msm_gem_unlock(&msm_obj->base);

		/* since we don't know any better, lets bail after a few
		 * and if necessary the shrinker will be invoked again.
		 * Seems better than unmapping *everything*
@@ -116,7 +128,6 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
		if (++unmapped >= 15)
			break;
	}
	}

	if (unlock)
		mutex_unlock(&dev->struct_mutex);