mm: huge_memory: add the missing folio_test_pmd_mappable() for THP split statistics
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Fri, 29 Mar 2024 06:59:33 +0000 (14:59 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:56:31 +0000 (20:56 -0700)
Now the mTHP can also be split or added into the deferred list, so add
folio_test_pmd_mappable() validation for PMD mapped THP, to avoid
confusion with PMD mapped THP related statistics.

[baolin.wang@linux.alibaba.com: check THP earlier in case folio is split, per Lance]
Link: https://lkml.kernel.org/r/b99f8cb14bc85fdb6ab43721d1331cb5ebed2581.1713771041.git.baolin.wang@linux.alibaba.com
Link: https://lkml.kernel.org/r/a5341defeef27c9ac7b85c97f030f93e4368bbc1.1711694852.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Lance Yang <ioworker0@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index b757d9be310c8aee654b214a634ae3b0c023493c..4065bf8bfcc433433ce05d9a31e6c72e41083e52 100644 (file)
@@ -2934,6 +2934,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
        XA_STATE_ORDER(xas, &folio->mapping->i_pages, folio->index, new_order);
        struct anon_vma *anon_vma = NULL;
        struct address_space *mapping = NULL;
+       bool is_thp = folio_test_pmd_mappable(folio);
        int extra_pins, ret;
        pgoff_t end;
        bool is_hzp;
@@ -3112,7 +3113,8 @@ out_unlock:
                i_mmap_unlock_read(mapping);
 out:
        xas_destroy(&xas);
-       count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
+       if (is_thp)
+               count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
        return ret;
 }
 
@@ -3174,7 +3176,8 @@ void deferred_split_folio(struct folio *folio)
 
        spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
        if (list_empty(&folio->_deferred_list)) {
-               count_vm_event(THP_DEFERRED_SPLIT_PAGE);
+               if (folio_test_pmd_mappable(folio))
+                       count_vm_event(THP_DEFERRED_SPLIT_PAGE);
                list_add_tail(&folio->_deferred_list, &ds_queue->split_queue);
                ds_queue->split_queue_len++;
 #ifdef CONFIG_MEMCG