Commit fcd371c2 authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm/shrinker: We can vmap shrink active_list too



Just because a obj is active, if the vmap_count is zero, we can still
tear down the vmap.

Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent ab5c54cb
Loading
Loading
Loading
Loading
+35 −12
Original line number Diff line number Diff line
@@ -6,6 +6,7 @@

#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_gpu_trace.h"

static unsigned long
@@ -61,17 +62,19 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
	return freed;
}

static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
/* since we don't know any better, lets bail after a few
 * and if necessary the shrinker will be invoked again.
 * Seems better than unmapping *everything*
 */
static const int vmap_shrink_limit = 15;

static unsigned
vmap_shrink(struct list_head *mm_list)
{
	struct msm_drm_private *priv =
		container_of(nb, struct msm_drm_private, vmap_notifier);
	struct msm_gem_object *msm_obj;
	unsigned unmapped = 0;

	mutex_lock(&priv->mm_lock);

	list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
	list_for_each_entry(msm_obj, mm_list, mm_list) {
		if (!msm_gem_trylock(&msm_obj->base))
			continue;
		if (is_vunmapable(msm_obj)) {
@@ -80,11 +83,31 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
		}
		msm_gem_unlock(&msm_obj->base);

		/* since we don't know any better, lets bail after a few
		 * and if necessary the shrinker will be invoked again.
		 * Seems better than unmapping *everything*
		 */
		if (++unmapped >= 15)
		if (++unmapped >= vmap_shrink_limit)
			break;
	}

	return unmapped;
}

static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
	struct msm_drm_private *priv =
		container_of(nb, struct msm_drm_private, vmap_notifier);
	struct list_head *mm_lists[] = {
		&priv->inactive_list,
		priv->gpu ? &priv->gpu->active_list : NULL,
		NULL,
	};
	unsigned idx, unmapped = 0;

	mutex_lock(&priv->mm_lock);

	for (idx = 0; mm_lists[idx]; idx++) {
		unmapped += vmap_shrink(mm_lists[idx]);

		if (unmapped >= vmap_shrink_limit)
			break;
	}