mm: remember young/dirty bit for page migrations
authorPeter Xu <peterx@redhat.com>
Thu, 11 Aug 2022 16:13:29 +0000 (12:13 -0400)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 27 Sep 2022 02:46:05 +0000 (19:46 -0700)
When page migration happens, we always ignore the young/dirty bit settings
in the old pgtable, and marking the page as old in the new page table
using either pte_mkold() or pmd_mkold(), and keeping the pte clean.

That's fine from functional-wise, but that's not friendly to page reclaim
because the moving page can be actively accessed within the procedure.
Not to mention hardware setting the young bit can bring quite some
overhead on some systems, e.g.  x86_64 needs a few hundreds nanoseconds to
set the bit.  The same slowdown problem to dirty bits when the memory is
first written after page migration happened.

Actually we can easily remember the A/D bit configuration and recover the
information after the page is migrated.  To achieve it, define a new set
of bits in the migration swap offset field to cache the A/D bits for old
pte.  Then when removing/recovering the migration entry, we can recover
the A/D bits even if the page changed.

One thing to mention is that here we used max_swapfile_size() to detect
how many swp offset bits we have, and we'll only enable this feature if we
know the swp offset is big enough to store both the PFN value and the A/D
bits.  Otherwise the A/D bits are dropped like before.

Link: https://lkml.kernel.org/r/20220811161331.37055-6-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Andi Kleen <andi.kleen@intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/swapops.h
mm/huge_memory.c
mm/migrate.c
mm/migrate_device.c
mm/rmap.c

index 578212fbf2be9b4321b32bc40f7d74da14f687f9..11b874f212a20dcf24e80595624ba355a04e2b66 100644 (file)
@@ -8,6 +8,10 @@
 
 #ifdef CONFIG_MMU
 
+#ifdef CONFIG_SWAP
+#include <linux/swapfile.h>
+#endif /* CONFIG_SWAP */
+
 /*
  * swapcache pages are stored in the swapper_space radix tree.  We want to
  * get good packing density in that tree, so the index should be dense in
 #endif /* MAX_PHYSMEM_BITS */
 #define SWP_PFN_MASK                   (BIT(SWP_PFN_BITS) - 1)
 
+/**
+ * Migration swap entry specific bitfield definitions.  Layout:
+ *
+ *   |----------+--------------------|
+ *   | swp_type | swp_offset         |
+ *   |----------+--------+-+-+-------|
+ *   |          | resv   |D|A|  PFN  |
+ *   |----------+--------+-+-+-------|
+ *
+ * @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
+ * @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
+ *
+ * Note: A/D bits will be stored in migration entries iff there're enough
+ * free bits in arch specific swp offset.  By default we'll ignore A/D bits
+ * when migrating a page.  Please refer to migration_entry_supports_ad()
+ * for more information.  If there're more bits besides PFN and A/D bits,
+ * they should be reserved and always be zeros.
+ */
+#define SWP_MIG_YOUNG_BIT              (SWP_PFN_BITS)
+#define SWP_MIG_DIRTY_BIT              (SWP_PFN_BITS + 1)
+#define SWP_MIG_TOTAL_BITS             (SWP_PFN_BITS + 2)
+
+#define SWP_MIG_YOUNG                  BIT(SWP_MIG_YOUNG_BIT)
+#define SWP_MIG_DIRTY                  BIT(SWP_MIG_DIRTY_BIT)
+
 static inline bool is_pfn_swap_entry(swp_entry_t entry);
 
 /* Clear all flags but only keep swp_entry_t related information */
@@ -265,6 +294,57 @@ static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
        return swp_entry(SWP_MIGRATION_WRITE, offset);
 }
 
+/*
+ * Returns whether the host has large enough swap offset field to support
+ * carrying over pgtable A/D bits for page migrations.  The result is
+ * pretty much arch specific.
+ */
+static inline bool migration_entry_supports_ad(void)
+{
+       /*
+        * max_swapfile_size() returns the max supported swp-offset plus 1.
+        * We can support the migration A/D bits iff the pfn swap entry has
+        * the offset large enough to cover all of them (PFN, A & D bits).
+        */
+#ifdef CONFIG_SWAP
+       return max_swapfile_size() >= (1UL << SWP_MIG_TOTAL_BITS);
+#else  /* CONFIG_SWAP */
+       return false;
+#endif /* CONFIG_SWAP */
+}
+
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+       if (migration_entry_supports_ad())
+               return swp_entry(swp_type(entry),
+                                swp_offset(entry) | SWP_MIG_YOUNG);
+       return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+       if (migration_entry_supports_ad())
+               return swp_offset(entry) & SWP_MIG_YOUNG;
+       /* Keep the old behavior of aging page after migration */
+       return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+       if (migration_entry_supports_ad())
+               return swp_entry(swp_type(entry),
+                                swp_offset(entry) | SWP_MIG_DIRTY);
+       return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+       if (migration_entry_supports_ad())
+               return swp_offset(entry) & SWP_MIG_DIRTY;
+       /* Keep the old behavior of clean page after migration */
+       return false;
+}
+
 extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
                                        spinlock_t *ptl);
 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
@@ -311,6 +391,25 @@ static inline int is_readable_migration_entry(swp_entry_t entry)
        return 0;
 }
 
+static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
+{
+       return entry;
+}
+
+static inline bool is_migration_entry_young(swp_entry_t entry)
+{
+       return false;
+}
+
+static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
+{
+       return entry;
+}
+
+static inline bool is_migration_entry_dirty(swp_entry_t entry)
+{
+       return false;
+}
 #endif /* CONFIG_MIGRATION */
 
 typedef unsigned long pte_marker;
index b4666774abf08da3289eb3c7df39f8abfce4c278..f4a656b279b1b0f49280bd3cce1962052351a0e6 100644 (file)
@@ -2121,7 +2121,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                write = is_writable_migration_entry(entry);
                if (PageAnon(page))
                        anon_exclusive = is_readable_exclusive_migration_entry(entry);
-               young = false;
+               young = is_migration_entry_young(entry);
+               dirty = is_migration_entry_dirty(entry);
                soft_dirty = pmd_swp_soft_dirty(old_pmd);
                uffd_wp = pmd_swp_uffd_wp(old_pmd);
        } else {
@@ -2183,6 +2184,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        else
                                swp_entry = make_readable_migration_entry(
                                                        page_to_pfn(page + i));
+                       if (young)
+                               swp_entry = make_migration_entry_young(swp_entry);
+                       if (dirty)
+                               swp_entry = make_migration_entry_dirty(swp_entry);
                        entry = swp_entry_to_pte(swp_entry);
                        if (soft_dirty)
                                entry = pte_swp_mksoft_dirty(entry);
@@ -3201,6 +3206,10 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
                entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
        else
                entry = make_readable_migration_entry(page_to_pfn(page));
+       if (pmd_young(pmdval))
+               entry = make_migration_entry_young(entry);
+       if (pmd_dirty(pmdval))
+               entry = make_migration_entry_dirty(entry);
        pmdswp = swp_entry_to_pmd(entry);
        if (pmd_soft_dirty(pmdval))
                pmdswp = pmd_swp_mksoft_dirty(pmdswp);
@@ -3226,13 +3235,18 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
 
        entry = pmd_to_swp_entry(*pvmw->pmd);
        get_page(new);
-       pmde = pmd_mkold(mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot)));
+       pmde = mk_huge_pmd(new, READ_ONCE(vma->vm_page_prot));
        if (pmd_swp_soft_dirty(*pvmw->pmd))
                pmde = pmd_mksoft_dirty(pmde);
        if (is_writable_migration_entry(entry))
                pmde = maybe_pmd_mkwrite(pmde, vma);
        if (pmd_swp_uffd_wp(*pvmw->pmd))
                pmde = pmd_wrprotect(pmd_mkuffd_wp(pmde));
+       if (!is_migration_entry_young(entry))
+               pmde = pmd_mkold(pmde);
+       /* NOTE: this may contain setting soft-dirty on some archs */
+       if (PageDirty(new) && is_migration_entry_dirty(entry))
+               pmde = pmd_mkdirty(pmde);
 
        if (PageAnon(new)) {
                rmap_t rmap_flags = RMAP_COMPOUND;
index ce6a58f3b21f11262b0cfe050cf499425cfe38ba..a35eba462e61f51e85679bda604a61315642aa95 100644 (file)
@@ -198,7 +198,7 @@ static bool remove_migration_pte(struct folio *folio,
 #endif
 
                folio_get(folio);
-               pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
+               pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
                if (pte_swp_soft_dirty(*pvmw.pte))
                        pte = pte_mksoft_dirty(pte);
 
@@ -206,6 +206,10 @@ static bool remove_migration_pte(struct folio *folio,
                 * Recheck VMA as permissions can change since migration started
                 */
                entry = pte_to_swp_entry(*pvmw.pte);
+               if (!is_migration_entry_young(entry))
+                       pte = pte_mkold(pte);
+               if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
+                       pte = pte_mkdirty(pte);
                if (is_writable_migration_entry(entry))
                        pte = maybe_mkwrite(pte, vma);
                else if (pte_swp_uffd_wp(*pvmw.pte))
index d8efd5a0eb405daa2a85de5abed7639813f6cd15..5ab6ab9d2ed82be3a231e31746571f0c98eeee92 100644 (file)
@@ -233,6 +233,12 @@ again:
                        else
                                entry = make_readable_migration_entry(
                                                        page_to_pfn(page));
+                       if (pte_present(pte)) {
+                               if (pte_young(pte))
+                                       entry = make_migration_entry_young(entry);
+                               if (pte_dirty(pte))
+                                       entry = make_migration_entry_dirty(entry);
+                       }
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_present(pte)) {
                                if (pte_soft_dirty(pte))
index 6781f693df50947debc35bfd6038f234d7c6c97c..131def40e4f0207015fa7a74da9d54926078206e 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2066,7 +2066,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        else
                                entry = make_readable_migration_entry(
                                                        page_to_pfn(subpage));
-
+                       if (pte_young(pteval))
+                               entry = make_migration_entry_young(entry);
+                       if (pte_dirty(pteval))
+                               entry = make_migration_entry_dirty(entry);
                        swp_pte = swp_entry_to_pte(entry);
                        if (pte_soft_dirty(pteval))
                                swp_pte = pte_swp_mksoft_dirty(swp_pte);