drm/msm/shrinker: We can vmap shrink active_list too
authorRob Clark <robdclark@chromium.org>
Mon, 16 Nov 2020 17:48:50 +0000 (09:48 -0800)
committerRob Clark <robdclark@chromium.org>
Sat, 21 Nov 2020 17:50:24 +0000 (09:50 -0800)
Just because a obj is active, if the vmap_count is zero, we can still
tear down the vmap.

Signed-off-by: Rob Clark <robdclark@chromium.org>
drivers/gpu/drm/msm/msm_gem_shrinker.c

index 6f4b1355725f4d1df4fc5b2c9916556c1cc3b3d9..9d51c1eb808de762cdd4426504f04a7a83e330de 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "msm_drv.h"
 #include "msm_gem.h"
+#include "msm_gpu.h"
 #include "msm_gpu_trace.h"
 
 static unsigned long
@@ -61,17 +62,19 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        return freed;
 }
 
-static int
-msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+/* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+static const int vmap_shrink_limit = 15;
+
+static unsigned
+vmap_shrink(struct list_head *mm_list)
 {
-       struct msm_drm_private *priv =
-               container_of(nb, struct msm_drm_private, vmap_notifier);
        struct msm_gem_object *msm_obj;
        unsigned unmapped = 0;
 
-       mutex_lock(&priv->mm_lock);
-
-       list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+       list_for_each_entry(msm_obj, mm_list, mm_list) {
                if (!msm_gem_trylock(&msm_obj->base))
                        continue;
                if (is_vunmapable(msm_obj)) {
@@ -80,11 +83,31 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
                }
                msm_gem_unlock(&msm_obj->base);
 
-               /* since we don't know any better, lets bail after a few
-                * and if necessary the shrinker will be invoked again.
-                * Seems better than unmapping *everything*
-                */
-               if (++unmapped >= 15)
+               if (++unmapped >= vmap_shrink_limit)
+                       break;
+       }
+
+       return unmapped;
+}
+
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+       struct msm_drm_private *priv =
+               container_of(nb, struct msm_drm_private, vmap_notifier);
+       struct list_head *mm_lists[] = {
+               &priv->inactive_list,
+               priv->gpu ? &priv->gpu->active_list : NULL,
+               NULL,
+       };
+       unsigned idx, unmapped = 0;
+
+       mutex_lock(&priv->mm_lock);
+
+       for (idx = 0; mm_lists[idx]; idx++) {
+               unmapped += vmap_shrink(mm_lists[idx]);
+
+               if (unmapped >= vmap_shrink_limit)
                        break;
        }