mm: remove PageKsm()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Wed, 2 Oct 2024 15:25:31 +0000 (16:25 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 7 Nov 2024 04:11:08 +0000 (20:11 -0800)
All callers have been converted to use folio_test_ksm() or
PageAnonNotKsm(), so we can remove this wrapper.

Link: https://lkml.kernel.org/r/20241002152533.1350629-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: David Hildenbrand <david@redhat.com>
Cc: Alex Shi <alexs@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/page-flags.h
mm/internal.h
mm/ksm.c

index 1fcef06a2d3164bfccca4618a7f7115a0eeada07..e80665bc51fac556c996986be2955521313d88cb 100644 (file)
@@ -725,13 +725,8 @@ static __always_inline bool folio_test_ksm(const struct folio *folio)
        return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
                                PAGE_MAPPING_KSM;
 }
-
-static __always_inline bool PageKsm(const struct page *page)
-{
-       return folio_test_ksm(page_folio(page));
-}
 #else
-TESTPAGEFLAG_FALSE(Ksm, ksm)
+FOLIO_TEST_FLAG_FALSE(ksm)
 #endif
 
 u64 stable_page_flags(const struct page *page);
index 64c2eb0b160e169ab9134e3ab618d8a1d552d92c..fc2f523258a368d3654682fd2a9518ee75d61a41 100644 (file)
@@ -1356,7 +1356,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma,
                smp_rmb();
 
        /*
-        * Note that PageKsm() pages cannot be exclusive, and consequently,
+        * Note that KSM pages cannot be exclusive, and consequently,
         * cannot get pinned.
         */
        return !PageAnonExclusive(page);
index b1c5c8aff41b784acfd0582c5d5dc1a94c07fe20..556b8a8f37d04d69b88f5b8790e65ecd12b45df5 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -656,7 +656,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_v
         *
         * VM_FAULT_SIGBUS could occur if we race with truncation of the
         * backing file, which also invalidates anonymous pages: that's
-        * okay, that truncation will have unmapped the PageKsm for us.
+        * okay, that truncation will have unmapped the KSM page for us.
         *
         * VM_FAULT_OOM: at the time of writing (late July 2009), setting
         * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
@@ -1434,7 +1434,7 @@ out:
  * try_to_merge_one_page - take two pages and merge them into one
  * @vma: the vma that holds the pte pointing to page
  * @page: the PageAnon page that we want to replace with kpage
- * @kpage: the PageKsm page that we want to map instead of page,
+ * @kpage: the KSM page that we want to map instead of page,
  *         or NULL the first time when we want to use page as kpage.
  *
  * This function returns 0 if the pages were merged, -EFAULT otherwise.