mm/swap: stop using page->private on tail pages for THP_SWAP
authorDavid Hildenbrand <david@redhat.com>
Mon, 21 Aug 2023 16:08:46 +0000 (18:08 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 24 Aug 2023 23:20:28 +0000 (16:20 -0700)
Patch series "mm/swap: stop using page->private on tail pages for THP_SWAP
+ cleanups".

This series stops using page->private on tail pages for THP_SWAP, replaces
folio->private by folio->swap for swapcache folios, and starts using
"new_folio" for tail pages that we are splitting to remove the usage of
page->private for swapcache handling completely.

This patch (of 4):

Let's stop using page->private on tail pages, making it possible to just
unconditionally reuse that field in the tail pages of large folios.

The remaining usage of the private field for THP_SWAP is in the THP
splitting code (mm/huge_memory.c), that we'll handle separately later.

Update the THP_SWAP documentation and sanity checks in mm_types.h and
__split_huge_page_tail().

[david@redhat.com: stop using page->private on tail pages for THP_SWAP]
Link: https://lkml.kernel.org/r/6f0a82a3-6948-20d9-580b-be1dbf415701@redhat.com
Link: https://lkml.kernel.org/r/20230821160849.531668-1-david@redhat.com
Link: https://lkml.kernel.org/r/20230821160849.531668-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64]
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Seth Jennings <sjenning@redhat.com>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/mm/mteswap.c
include/linux/mm_types.h
include/linux/swap.h
mm/huge_memory.c
mm/memory.c
mm/rmap.c
mm/swap_state.c
mm/swapfile.c

index cd508ba80ab1ba889fdc29647297c6707418e531..a31833e3ddc544c88abe1ef4a8ef6d292cd2bd8d 100644 (file)
@@ -33,8 +33,9 @@ int mte_save_tags(struct page *page)
 
        mte_save_page_tags(page_address(page), tag_storage);
 
-       /* page_private contains the swap entry.val set in do_swap_page */
-       ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
+       /* lookup the swap entry.val from the page */
+       ret = xa_store(&mte_pages, page_swap_entry(page).val, tag_storage,
+                      GFP_KERNEL);
        if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
                mte_free_tag_storage(tag_storage);
                return xa_err(ret);
index 2b9d8be28361e62ef2c52c4466704ba6d475e93e..55cd4bc57b8df3d4cacca27b384e7809db95e847 100644 (file)
@@ -322,11 +322,8 @@ struct folio {
                        atomic_t _pincount;
 #ifdef CONFIG_64BIT
                        unsigned int _folio_nr_pages;
-                       /* 4 byte gap here */
-       /* private: the union with struct page is transitional */
-                       /* Fix THP_SWAP to not use tail->private */
-                       unsigned long _private_1;
 #endif
+       /* private: the union with struct page is transitional */
                };
                struct page __page_1;
        };
@@ -347,9 +344,6 @@ struct folio {
        /* public: */
                        struct list_head _deferred_list;
        /* private: the union with struct page is transitional */
-                       unsigned long _avail_2a;
-                       /* Fix THP_SWAP to not use tail->private */
-                       unsigned long _private_2a;
                };
                struct page __page_2;
        };
@@ -374,9 +368,6 @@ FOLIO_MATCH(memcg_data, memcg_data);
                        offsetof(struct page, pg) + sizeof(struct page))
 FOLIO_MATCH(flags, _flags_1);
 FOLIO_MATCH(compound_head, _head_1);
-#ifdef CONFIG_64BIT
-FOLIO_MATCH(private, _private_1);
-#endif
 #undef FOLIO_MATCH
 #define FOLIO_MATCH(pg, fl)                                            \
        static_assert(offsetof(struct folio, fl) ==                     \
@@ -385,7 +376,6 @@ FOLIO_MATCH(flags, _flags_2);
 FOLIO_MATCH(compound_head, _head_2);
 FOLIO_MATCH(flags, _flags_2a);
 FOLIO_MATCH(compound_head, _head_2a);
-FOLIO_MATCH(private, _private_2a);
 #undef FOLIO_MATCH
 
 /**
index bb5adc6041448a15f834b3e4fd8919e3731ef357..e5cf58a1cf9e629d1641dc139b5d1623bc6c2076 100644 (file)
@@ -339,6 +339,15 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio)
        return entry;
 }
 
+static inline swp_entry_t page_swap_entry(struct page *page)
+{
+       struct folio *folio = page_folio(page);
+       swp_entry_t entry = folio_swap_entry(folio);
+
+       entry.val += folio_page_idx(folio, page);
+       return entry;
+}
+
 static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
 {
        folio->private = (void *)entry.val;
index cb4432792b88b460cef8b476720695c7c1837a99..a28e9fe16585482079eda11a31cf6ca3f12e81eb 100644 (file)
@@ -2446,18 +2446,15 @@ static void __split_huge_page_tail(struct page *head, int tail,
        page_tail->index = head->index + tail;
 
        /*
-        * page->private should not be set in tail pages with the exception
-        * of swap cache pages that store the swp_entry_t in tail pages.
-        * Fix up and warn once if private is unexpectedly set.
-        *
-        * What of 32-bit systems, on which folio->_pincount overlays
-        * head[1].private?  No problem: THP_SWAP is not enabled on 32-bit, and
-        * pincount must be 0 for folio_ref_freeze() to have succeeded.
+        * page->private should not be set in tail pages. Fix up and warn once
+        * if private is unexpectedly set.
         */
-       if (!folio_test_swapcache(page_folio(head))) {
-               VM_WARN_ON_ONCE_PAGE(page_tail->private != 0, page_tail);
+       if (unlikely(page_tail->private)) {
+               VM_WARN_ON_ONCE_PAGE(true, page_tail);
                page_tail->private = 0;
        }
+       if (PageSwapCache(head))
+               set_page_private(page_tail, (unsigned long)head->private + tail);
 
        /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
index 9d7fb721a680a0341dc0965c223b2a58dbf53693..d104a38e8545a7fefd19baddd1aa6096a0556eaa 100644 (file)
@@ -3879,7 +3879,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                 * changed.
                 */
                if (unlikely(!folio_test_swapcache(folio) ||
-                            page_private(page) != entry.val))
+                            page_swap_entry(page).val != entry.val))
                        goto out_page;
 
                /*
index 1f04debdc87a035ed46c0020bb6ce27544e7e4f7..ec7f8e6c9e483a6ff768272d4e89221044974bee 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1647,7 +1647,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
                         */
                        dec_mm_counter(mm, mm_counter(&folio->page));
                } else if (folio_test_anon(folio)) {
-                       swp_entry_t entry = { .val = page_private(subpage) };
+                       swp_entry_t entry = page_swap_entry(subpage);
                        pte_t swp_pte;
                        /*
                         * Store the swap location in the pte.
index 01f15139b7d9e52b4b4a882b130bceffc7d4be98..2f24178100520b080d781526aa660867a4e027eb 100644 (file)
@@ -100,6 +100,7 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
 
        folio_ref_add(folio, nr);
        folio_set_swapcache(folio);
+       folio_set_swap_entry(folio, entry);
 
        do {
                xas_lock_irq(&xas);
@@ -113,7 +114,6 @@ int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
                                if (shadowp)
                                        *shadowp = old;
                        }
-                       set_page_private(folio_page(folio, i), entry.val + i);
                        xas_store(&xas, folio);
                        xas_next(&xas);
                }
@@ -154,9 +154,10 @@ void __delete_from_swap_cache(struct folio *folio,
        for (i = 0; i < nr; i++) {
                void *entry = xas_store(&xas, shadow);
                VM_BUG_ON_PAGE(entry != folio, entry);
-               set_page_private(folio_page(folio, i), 0);
                xas_next(&xas);
        }
+       entry.val = 0;
+       folio_set_swap_entry(folio, entry);
        folio_clear_swapcache(folio);
        address_space->nrpages -= nr;
        __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
index d46933adf789f5085f6b588b993596cc2dcd279a..bd9d904671b9476321f08ceb3be4ebc6be030c66 100644 (file)
@@ -3369,7 +3369,7 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
 
 struct swap_info_struct *page_swap_info(struct page *page)
 {
-       swp_entry_t entry = { .val = page_private(page) };
+       swp_entry_t entry = page_swap_entry(page);
        return swp_swap_info(entry);
 }
 
@@ -3384,7 +3384,7 @@ EXPORT_SYMBOL_GPL(swapcache_mapping);
 
 pgoff_t __page_file_index(struct page *page)
 {
-       swp_entry_t swap = { .val = page_private(page) };
+       swp_entry_t swap = page_swap_entry(page);
        return swp_offset(swap);
 }
 EXPORT_SYMBOL_GPL(__page_file_index);