mm: refactor folio_undo_large_rmappable()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Tue, 21 May 2024 13:03:15 +0000 (21:03 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 5 Jul 2024 01:05:50 +0000 (18:05 -0700)
Folios of order <= 1 are not in deferred list, the check of order is added
into folio_undo_large_rmappable() from commit 8897277acfef ("mm: support
order-1 folios in the page cache"), but there is a repeated check for
small folio (order 0) during each call of the
folio_undo_large_rmappable(), so only keep folio_order() check inside the
function.

In addition, move all the checks into header file to save a function call
for non-large-rmappable or empty deferred_list folio.

Link: https://lkml.kernel.org/r/20240521130315.46072-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lance Yang <ioworker0@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c
mm/internal.h
mm/page_alloc.c
mm/swap.c
mm/vmscan.c

index 1c827fd618cd0b1a04d580675c7411f21822b0ff..c7ce28f6b7f3b28fffbf3fae79d7422e81e14b57 100644 (file)
@@ -3258,22 +3258,11 @@ out:
        return ret;
 }
 
-void folio_undo_large_rmappable(struct folio *folio)
+void __folio_undo_large_rmappable(struct folio *folio)
 {
        struct deferred_split *ds_queue;
        unsigned long flags;
 
-       if (folio_order(folio) <= 1)
-               return;
-
-       /*
-        * At this point, there is no one trying to add the folio to
-        * deferred_list. If folio is not in deferred_list, it's safe
-        * to check without acquiring the split_queue_lock.
-        */
-       if (data_race(list_empty(&folio->_deferred_list)))
-               return;
-
        ds_queue = get_deferred_split_queue(folio);
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (!list_empty(&folio->_deferred_list)) {
index 0af4c988542422327f8cd370c94680395b14c07e..2ea9a88dcb953fd6e851e4039d12013424d48e3a 100644 (file)
@@ -622,7 +622,22 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
 #endif
 }
 
-void folio_undo_large_rmappable(struct folio *folio);
+void __folio_undo_large_rmappable(struct folio *folio);
+static inline void folio_undo_large_rmappable(struct folio *folio)
+{
+       if (folio_order(folio) <= 1 || !folio_test_large_rmappable(folio))
+               return;
+
+       /*
+        * At this point, there is no one trying to add the folio to
+        * deferred_list. If folio is not in deferred_list, it's safe
+        * to check without acquiring the split_queue_lock.
+        */
+       if (data_race(list_empty(&folio->_deferred_list)))
+               return;
+
+       __folio_undo_large_rmappable(folio);
+}
 
 static inline struct folio *page_rmappable_folio(struct page *page)
 {
index 8a0fd41376846539040b147a630ac62beced640d..3f3d83def9be9f3341d92c7814df307d753fbaf0 100644 (file)
@@ -2661,8 +2661,7 @@ void free_unref_folios(struct folio_batch *folios)
                unsigned long pfn = folio_pfn(folio);
                unsigned int order = folio_order(folio);
 
-               if (order > 0 && folio_test_large_rmappable(folio))
-                       folio_undo_large_rmappable(folio);
+               folio_undo_large_rmappable(folio);
                if (!free_pages_prepare(&folio->page, order))
                        continue;
                /*
index 67786cb771305c09e6fd96c03296261e19f0f990..dc205bdfbbd48538af43981bbb3936a2cba02dfc 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -123,8 +123,7 @@ void __folio_put(struct folio *folio)
        }
 
        page_cache_release(folio);
-       if (folio_test_large(folio) && folio_test_large_rmappable(folio))
-               folio_undo_large_rmappable(folio);
+       folio_undo_large_rmappable(folio);
        mem_cgroup_uncharge(folio);
        free_unref_page(&folio->page, folio_order(folio));
 }
@@ -1002,10 +1001,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
                        free_huge_folio(folio);
                        continue;
                }
-               if (folio_test_large(folio) &&
-                   folio_test_large_rmappable(folio))
-                       folio_undo_large_rmappable(folio);
-
+               folio_undo_large_rmappable(folio);
                __page_cache_release(folio, &lruvec, &flags);
 
                if (j != i)
index 633632cb9344beaf2074cdad662c1883593e247d..1807e5d95dda2f134c203a1d424cd1d2aaf2d19d 100644 (file)
@@ -1439,9 +1439,7 @@ free_it:
                 */
                nr_reclaimed += nr_pages;
 
-               if (folio_test_large(folio) &&
-                   folio_test_large_rmappable(folio))
-                       folio_undo_large_rmappable(folio);
+               folio_undo_large_rmappable(folio);
                if (folio_batch_add(&free_folios, folio) == 0) {
                        mem_cgroup_uncharge_folios(&free_folios);
                        try_to_unmap_flush();
@@ -1848,9 +1846,7 @@ static unsigned int move_folios_to_lru(struct lruvec *lruvec,
                if (unlikely(folio_put_testzero(folio))) {
                        __folio_clear_lru_flags(folio);
 
-                       if (folio_test_large(folio) &&
-                           folio_test_large_rmappable(folio))
-                               folio_undo_large_rmappable(folio);
+                       folio_undo_large_rmappable(folio);
                        if (folio_batch_add(&free_folios, folio) == 0) {
                                spin_unlock_irq(&lruvec->lru_lock);
                                mem_cgroup_uncharge_folios(&free_folios);