mm: convert deferred_split_huge_page() to deferred_split_folio()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 11 Jan 2023 14:29:13 +0000 (14:29 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Feb 2023 06:33:00 +0000 (22:33 -0800)
Now that both callers use a folio, pass the folio in and save a call to
compound_head().

Link: https://lkml.kernel.org/r/20230111142915.1001531-28-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Documentation/mm/transhuge.rst
include/linux/huge_mm.h
mm/huge_memory.c
mm/rmap.c

index 03bbd0a1904144a9f7f693854af56e08f232c6f4..a9608fe516499083aa9398d7e5fbab0393b287a7 100644 (file)
@@ -153,8 +153,8 @@ clear where references should go after split: it will stay on the head page.
 Note that split_huge_pmd() doesn't have any limitations on refcounting:
 pmd can be split at any point and never fails.
 
-Partial unmap and deferred_split_huge_page()
-============================================
+Partial unmap and deferred_split_folio()
+========================================
 
 Unmapping part of THP (with munmap() or other way) is not going to free
 memory immediately. Instead, we detect that a subpage of THP is not in use
@@ -166,6 +166,6 @@ the place where we can detect partial unmap. It also might be
 counterproductive since in many cases partial unmap happens during exit(2) if
 a THP crosses a VMA boundary.
 
-The function deferred_split_huge_page() is used to queue a page for splitting.
+The function deferred_split_folio() is used to queue a folio for splitting.
 The splitting itself will happen when we get memory pressure via shrinker
 interface.
index b9978978a160965135b194de67f71e787691d972..70bd867eba9492eb6d3c24a68524d90fb4feba24 100644 (file)
@@ -187,7 +187,7 @@ static inline int split_huge_page(struct page *page)
 {
        return split_huge_page_to_list(page, NULL);
 }
-void deferred_split_huge_page(struct page *page);
+void deferred_split_folio(struct folio *folio);
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                unsigned long address, bool freeze, struct folio *folio);
@@ -340,7 +340,7 @@ static inline int split_huge_page(struct page *page)
 {
        return 0;
 }
-static inline void deferred_split_huge_page(struct page *page) {}
+static inline void deferred_split_folio(struct folio *folio) {}
 #define split_huge_pmd(__vma, __pmd, __address)        \
        do { } while (0)
 
index c23b0e01734b312e33754f76f0de7dcc2b937de9..868fcccdff72874c6f58a533550bd7b64c4a7367 100644 (file)
@@ -2815,9 +2815,8 @@ void free_transhuge_page(struct page *page)
        free_compound_page(page);
 }
 
-void deferred_split_huge_page(struct page *page)
+void deferred_split_folio(struct folio *folio)
 {
-       struct folio *folio = page_folio(page);
        struct deferred_split *ds_queue = get_deferred_split_queue(folio);
 #ifdef CONFIG_MEMCG
        struct mem_cgroup *memcg = folio_memcg(folio);
index 0020474f46c13448126f7c3c9c7bcbcac6a277c9..a079d9964b9c39976fb434c891ab080a5dad205b 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1427,7 +1427,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
                 */
                if (folio_test_pmd_mappable(folio) && folio_test_anon(folio))
                        if (!compound || nr < nr_pmdmapped)
-                               deferred_split_huge_page(&folio->page);
+                               deferred_split_folio(folio);
        }
 
        /*