mm: khugepaged: convert set_huge_pmd() to take a folio
authorBaolin Wang <baolin.wang@linux.alibaba.com>
Mon, 12 May 2025 02:57:11 +0000 (10:57 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 May 2025 21:55:37 +0000 (14:55 -0700)
We've already gotten the stable locked folio in collapse_pte_mapped_thp(),
so just use folio for set_huge_pmd() to set the PMD entry, which is more
straightforward.

Moreover, we will check the folio size in do_set_pmd(), so we can remove
the unnecessary VM_BUG_ON() in set_huge_pmd().  While we are at it, we can
also remove the PageTransHuge(), as it currently has no callers.

Link: https://lkml.kernel.org/r/110c3e1ec5fe7854a0e2c95ffcbc985817180ed7.1747017104.git.baolin.wang@linux.alibaba.com
Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/khugepaged.c

index 37b11f15dbd9f91dae473b68dd44dc34953a8eaa..1c1d49554c71acb43ae36b2f64f053afe2cce3c2 100644 (file)
@@ -907,20 +907,6 @@ FOLIO_FLAG_FALSE(partially_mapped)
 #define PG_head_mask ((1UL << PG_head))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/*
- * PageHuge() only returns true for hugetlbfs pages, but not for
- * normal or transparent huge pages.
- *
- * PageTransHuge() returns true for both transparent huge and
- * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
- * called only in the core VM paths where hugetlbfs pages can't exist.
- */
-static inline int PageTransHuge(const struct page *page)
-{
-       VM_BUG_ON_PAGE(PageTail(page), page);
-       return PageHead(page);
-}
-
 /*
  * PageTransCompound returns true for both transparent huge pages
  * and hugetlbfs pages, so it should only be called when it's known
@@ -931,7 +917,6 @@ static inline int PageTransCompound(const struct page *page)
        return PageCompound(page);
 }
 #else
-TESTPAGEFLAG_FALSE(TransHuge, transhuge)
 TESTPAGEFLAG_FALSE(TransCompound, transcompound)
 #endif
 
index b04b6a770afe3d3a3dae234f96fb38f326b777af..33daea8f667e888e813715a62afb1649e4b8c1b1 100644 (file)
@@ -1465,9 +1465,9 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
 }
 
 #ifdef CONFIG_SHMEM
-/* hpage must be locked, and mmap_lock must be held */
+/* folio must be locked, and mmap_lock must be held */
 static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
-                       pmd_t *pmdp, struct page *hpage)
+                       pmd_t *pmdp, struct folio *folio, struct page *page)
 {
        struct vm_fault vmf = {
                .vma = vma,
@@ -1476,13 +1476,12 @@ static int set_huge_pmd(struct vm_area_struct *vma, unsigned long addr,
                .pmd = pmdp,
        };
 
-       VM_BUG_ON(!PageTransHuge(hpage));
        mmap_assert_locked(vma->vm_mm);
 
-       if (do_set_pmd(&vmf, hpage))
+       if (do_set_pmd(&vmf, page))
                return SCAN_FAIL;
 
-       get_page(hpage);
+       folio_get(folio);
        return SCAN_SUCCEED;
 }
 
@@ -1689,7 +1688,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 maybe_install_pmd:
        /* step 5: install pmd entry */
        result = install_pmd
-                       ? set_huge_pmd(vma, haddr, pmd, &folio->page)
+                       ? set_huge_pmd(vma, haddr, pmd, folio, &folio->page)
                        : SCAN_SUCCEED;
        goto drop_folio;
 abort: