rmap: pass the folio to __page_check_anon_rmap()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 6 Jul 2023 19:52:51 +0000 (20:52 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 18 Aug 2023 17:12:12 +0000 (10:12 -0700)
The lone caller already has the folio, so pass it in instead of deriving
it from the page again.

Link: https://lkml.kernel.org/r/20230706195251.2707542-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/rmap.c

index 0c0d8857dfce47c279855e300acd56b6ca57a13a..2668f5ea353428dbceaf79b21ab9fe4baba52701 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1175,14 +1175,14 @@ out:
 
 /**
  * __page_check_anon_rmap - sanity check anonymous rmap addition
- * @page:      the page to add the mapping to
+ * @folio:     The folio containing @page.
+ * @page:      the page to check the mapping of
  * @vma:       the vm area in which the mapping is added
  * @address:   the user virtual address mapped
  */
-static void __page_check_anon_rmap(struct page *page,
+static void __page_check_anon_rmap(struct folio *folio, struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
-       struct folio *folio = page_folio(page);
        /*
         * The page's anon-rmap details (mapping and index) are guaranteed to
         * be set up correctly at this point.
@@ -1262,7 +1262,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
                        __page_set_anon_rmap(folio, page, vma, address,
                                             !!(flags & RMAP_EXCLUSIVE));
                else
-                       __page_check_anon_rmap(page, vma, address);
+                       __page_check_anon_rmap(folio, page, vma, address);
        }
 
        mlock_vma_folio(folio, vma, compound);