mm: swap: use swap_entries_free() to free swap entry in swap_entry_put_locked()
authorKemeng Shi <shikemeng@huaweicloud.com>
Tue, 25 Mar 2025 16:25:23 +0000 (00:25 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 12 May 2025 00:48:13 +0000 (17:48 -0700)
In swap_entry_put_locked(), we will set slot to SWAP_HAS_CACHE before
using swap_entries_free() to do actual swap entry freeing.  This introduce
an unnecessary intermediate state.  By using swap_entries_free() in
swap_entry_put_locked(), we can eliminate the need to set slot to
SWAP_HAS_CACHE.  This change would make the behavior of
swap_entry_put_locked() more consistent with other put() operations which
will do actual free work after put last reference.

Link: https://lkml.kernel.org/r/20250325162528.68385-4-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/swapfile.c

index c6b4c74622fcf6ece2f980a4c6da26f6421e30df..f0ba27db9b3edbf844ce87d3c6a52dc4ef55ef30 100644 (file)
@@ -1356,9 +1356,11 @@ out:
 }
 
 static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
-                                          unsigned long offset,
+                                          struct swap_cluster_info *ci,
+                                          swp_entry_t entry,
                                           unsigned char usage)
 {
+       unsigned long offset = swp_offset(entry);
        unsigned char count;
        unsigned char has_cache;
 
@@ -1390,7 +1392,7 @@ static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
        if (usage)
                WRITE_ONCE(si->swap_map[offset], usage);
        else
-               WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
+               swap_entries_free(si, ci, entry, 1);
 
        return usage;
 }
@@ -1469,9 +1471,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
        unsigned char usage;
 
        ci = lock_cluster(si, offset);
-       usage = swap_entry_put_locked(si, offset, 1);
-       if (!usage)
-               swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
+       usage = swap_entry_put_locked(si, ci, entry, 1);
        unlock_cluster(ci);
 
        return usage;
@@ -1570,8 +1570,8 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
 
        ci = lock_cluster(si, offset);
        do {
-               if (!swap_entry_put_locked(si, offset, usage))
-                       swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
+               swap_entry_put_locked(si, ci, swp_entry(si->type, offset),
+                                     usage);
        } while (++offset < end);
        unlock_cluster(ci);
 }
@@ -1616,10 +1616,8 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
        if (swap_only_has_cache(si, offset, size))
                swap_entries_free(si, ci, entry, size);
        else {
-               for (int i = 0; i < size; i++, entry.val++) {
-                       if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
-                               swap_entries_free(si, ci, entry, 1);
-               }
+               for (int i = 0; i < size; i++, entry.val++)
+                       swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
        }
        unlock_cluster(ci);
 }