mm/huge_memory: page_try_dup_anon_rmap() -> folio_try_dup_anon_rmap_pmd()
authorDavid Hildenbrand <david@redhat.com>
Wed, 20 Dec 2023 22:44:59 +0000 (23:44 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 29 Dec 2023 19:58:55 +0000 (11:58 -0800)
Let's convert copy_huge_pmd() and fixup the comment in copy_huge_pud().
While at it, perform more folio conversion in copy_huge_pmd().

Link: https://lkml.kernel.org/r/20231220224504.646757-36-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/huge_memory.c

index ddc03421df0f97678b4709afcd09e310ac72b268..de623b942b6e4789f0b2e2439dc9821a7b6cd7b0 100644 (file)
@@ -1275,6 +1275,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 {
        spinlock_t *dst_ptl, *src_ptl;
        struct page *src_page;
+       struct folio *src_folio;
        pmd_t pmd;
        pgtable_t pgtable = NULL;
        int ret = -ENOMEM;
@@ -1341,11 +1342,12 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
 
        src_page = pmd_page(pmd);
        VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
+       src_folio = page_folio(src_page);
 
-       get_page(src_page);
-       if (unlikely(page_try_dup_anon_rmap(src_page, true, src_vma))) {
+       folio_get(src_folio);
+       if (unlikely(folio_try_dup_anon_rmap_pmd(src_folio, src_page, src_vma))) {
                /* Page maybe pinned: split and retry the fault on PTEs. */
-               put_page(src_page);
+               folio_put(src_folio);
                pte_free(dst_mm, pgtable);
                spin_unlock(src_ptl);
                spin_unlock(dst_ptl);
@@ -1454,8 +1456,8 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        }
 
        /*
-        * TODO: once we support anonymous pages, use page_try_dup_anon_rmap()
-        * and split if duplicating fails.
+        * TODO: once we support anonymous pages, use
+        * folio_try_dup_anon_rmap_*() and split if duplicating fails.
         */
        pudp_set_wrprotect(src_mm, addr, src_pud);
        pud = pud_mkold(pud_wrprotect(pud));