mm: add per-order mTHP anon_swpout and anon_swpout_fallback counters
authorBarry Song <v-songbaohua@oppo.com>
Fri, 12 Apr 2024 11:48:56 +0000 (23:48 +1200)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 6 May 2024 00:53:35 +0000 (17:53 -0700)
This helps to display the fragmentation situation of the swapfile, knowing
the proportion of how much we haven't split large folios.  So far, we only
support non-split swapout for anon memory, with the possibility of
expanding to shmem in the future.  So, we add the "anon" prefix to the
counter names.

Link: https://lkml.kernel.org/r/20240412114858.407208-3-21cnbao@gmail.com
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
Cc: Kairui Song <kasong@tencent.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/huge_mm.h
mm/huge_memory.c
mm/page_io.c
mm/vmscan.c

index d4fdb264107021d26fa2953ecbf6b3ff2490f3ae..7cd07b83a3d0aaacb9a7195d9004b351c7133262 100644 (file)
@@ -268,6 +268,8 @@ enum mthp_stat_item {
        MTHP_STAT_ANON_FAULT_ALLOC,
        MTHP_STAT_ANON_FAULT_FALLBACK,
        MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
+       MTHP_STAT_ANON_SWPOUT,
+       MTHP_STAT_ANON_SWPOUT_FALLBACK,
        __MTHP_STAT_COUNT
 };
 
index 01f01d6a50a1dd10857a2ce56e5f081c4da376f2..264e09043f09695a0a8aef043ba2aa2721c99f38 100644 (file)
@@ -555,11 +555,15 @@ static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
 DEFINE_MTHP_STAT_ATTR(anon_fault_alloc, MTHP_STAT_ANON_FAULT_ALLOC);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK);
 DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE);
+DEFINE_MTHP_STAT_ATTR(anon_swpout, MTHP_STAT_ANON_SWPOUT);
+DEFINE_MTHP_STAT_ATTR(anon_swpout_fallback, MTHP_STAT_ANON_SWPOUT_FALLBACK);
 
 static struct attribute *stats_attrs[] = {
        &anon_fault_alloc_attr.attr,
        &anon_fault_fallback_attr.attr,
        &anon_fault_fallback_charge_attr.attr,
+       &anon_swpout_attr.attr,
+       &anon_swpout_fallback_attr.attr,
        NULL,
 };
 
index a9a7c236aeccfba81ce34ec7638f67d8ce1cd7c8..46c603dddf043d4b5c0380996d3a5fb842d73315 100644 (file)
@@ -217,6 +217,7 @@ static inline void count_swpout_vm_event(struct folio *folio)
                count_memcg_folio_events(folio, THP_SWPOUT, 1);
                count_vm_event(THP_SWPOUT);
        }
+       count_mthp_stat(folio_order(folio), MTHP_STAT_ANON_SWPOUT);
 #endif
        count_vm_events(PSWPOUT, folio_nr_pages(folio));
 }
index da93213af981e99dc58aa8fcc2b2d91e8d1db2fd..d194e7240df44ab65063a63a300ad0703bdd477b 100644 (file)
@@ -1214,6 +1214,8 @@ retry:
                                                goto activate_locked;
                                }
                                if (!add_to_swap(folio)) {
+                                       int __maybe_unused order = folio_order(folio);
+
                                        if (!folio_test_large(folio))
                                                goto activate_locked_split;
                                        /* Fallback to swap normal pages */
@@ -1225,6 +1227,7 @@ retry:
                                                        THP_SWPOUT_FALLBACK, 1);
                                                count_vm_event(THP_SWPOUT_FALLBACK);
                                        }
+                                       count_mthp_stat(order, MTHP_STAT_ANON_SWPOUT_FALLBACK);
 #endif
                                        if (!add_to_swap(folio))
                                                goto activate_locked_split;