mm: zswap: function ordering: shrink_memcg_cb
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 30 Jan 2024 01:36:56 +0000 (20:36 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 18:24:45 +0000 (10:24 -0800)
shrink_memcg_cb() is called by the shrinker and is based on
zswap_writeback_entry(). Move it in between. Save one fwd decl.

Link: https://lkml.kernel.org/r/20240130014208.565554-21-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Chengming Zhou <zhouchengming@bytedance.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zswap.c

index 667ed3e193406a9e490abb1206a48bd02fb41b94..2bf4bf1d356cfeacabbc48c191ce29e63fafa9b0 100644 (file)
@@ -1254,7 +1254,67 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
 * shrinker functions
 **********************************/
 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
-                                      spinlock_t *lock, void *arg);
+                                      spinlock_t *lock, void *arg)
+{
+       struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
+       bool *encountered_page_in_swapcache = (bool *)arg;
+       swp_entry_t swpentry;
+       enum lru_status ret = LRU_REMOVED_RETRY;
+       int writeback_result;
+
+       /*
+        * Rotate the entry to the tail before unlocking the LRU,
+        * so that in case of an invalidation race concurrent
+        * reclaimers don't waste their time on it.
+        *
+        * If writeback succeeds, or failure is due to the entry
+        * being invalidated by the swap subsystem, the invalidation
+        * will unlink and free it.
+        *
+        * Temporary failures, where the same entry should be tried
+        * again immediately, almost never happen for this shrinker.
+        * We don't do any trylocking; -ENOMEM comes closest,
+        * but that's extremely rare and doesn't happen spuriously
+        * either. Don't bother distinguishing this case.
+        *
+        * But since they do exist in theory, the entry cannot just
+        * be unlinked, or we could leak it. Hence, rotate.
+        */
+       list_move_tail(item, &l->list);
+
+       /*
+        * Once the lru lock is dropped, the entry might get freed. The
+        * swpentry is copied to the stack, and entry isn't deref'd again
+        * until the entry is verified to still be alive in the tree.
+        */
+       swpentry = entry->swpentry;
+
+       /*
+        * It's safe to drop the lock here because we return either
+        * LRU_REMOVED_RETRY or LRU_RETRY.
+        */
+       spin_unlock(lock);
+
+       writeback_result = zswap_writeback_entry(entry, swpentry);
+
+       if (writeback_result) {
+               zswap_reject_reclaim_fail++;
+               ret = LRU_RETRY;
+
+               /*
+                * Encountering a page already in swap cache is a sign that we are shrinking
+                * into the warmer region. We should terminate shrinking (if we're in the dynamic
+                * shrinker context).
+                */
+               if (writeback_result == -EEXIST && encountered_page_in_swapcache)
+                       *encountered_page_in_swapcache = true;
+       } else {
+               zswap_written_back_pages++;
+       }
+
+       spin_lock(lock);
+       return ret;
+}
 
 static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
                struct shrink_control *sc)
@@ -1354,69 +1414,6 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool)
        pool->shrinker->seeks = DEFAULT_SEEKS;
 }
 
-static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
-                                      spinlock_t *lock, void *arg)
-{
-       struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
-       bool *encountered_page_in_swapcache = (bool *)arg;
-       swp_entry_t swpentry;
-       enum lru_status ret = LRU_REMOVED_RETRY;
-       int writeback_result;
-
-       /*
-        * Rotate the entry to the tail before unlocking the LRU,
-        * so that in case of an invalidation race concurrent
-        * reclaimers don't waste their time on it.
-        *
-        * If writeback succeeds, or failure is due to the entry
-        * being invalidated by the swap subsystem, the invalidation
-        * will unlink and free it.
-        *
-        * Temporary failures, where the same entry should be tried
-        * again immediately, almost never happen for this shrinker.
-        * We don't do any trylocking; -ENOMEM comes closest,
-        * but that's extremely rare and doesn't happen spuriously
-        * either. Don't bother distinguishing this case.
-        *
-        * But since they do exist in theory, the entry cannot just
-        * be unlinked, or we could leak it. Hence, rotate.
-        */
-       list_move_tail(item, &l->list);
-
-       /*
-        * Once the lru lock is dropped, the entry might get freed. The
-        * swpentry is copied to the stack, and entry isn't deref'd again
-        * until the entry is verified to still be alive in the tree.
-        */
-       swpentry = entry->swpentry;
-
-       /*
-        * It's safe to drop the lock here because we return either
-        * LRU_REMOVED_RETRY or LRU_RETRY.
-        */
-       spin_unlock(lock);
-
-       writeback_result = zswap_writeback_entry(entry, swpentry);
-
-       if (writeback_result) {
-               zswap_reject_reclaim_fail++;
-               ret = LRU_RETRY;
-
-               /*
-                * Encountering a page already in swap cache is a sign that we are shrinking
-                * into the warmer region. We should terminate shrinking (if we're in the dynamic
-                * shrinker context).
-                */
-               if (writeback_result == -EEXIST && encountered_page_in_swapcache)
-                       *encountered_page_in_swapcache = true;
-       } else {
-               zswap_written_back_pages++;
-       }
-
-       spin_lock(lock);
-       return ret;
-}
-
 static int shrink_memcg(struct mem_cgroup *memcg)
 {
        struct zswap_pool *pool;