mm: Add AS_UNMOVABLE to mark mapping as completely unmovable
authorSean Christopherson <seanjc@google.com>
Fri, 27 Oct 2023 18:21:56 +0000 (11:21 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 13 Nov 2023 10:31:38 +0000 (05:31 -0500)
Add an "unmovable" flag for mappings that cannot be migrated under any
circumstance.  KVM will use the flag for its upcoming GUEST_MEMFD support,
which will not support compaction/migration, at least not in the
foreseeable future.

Test AS_UNMOVABLE under folio lock as already done for the async
compaction/dirty folio case, as the mapping can be removed by truncation
while compaction is running.  To avoid having to lock every folio with a
mapping, assume/require that unmovable mappings are also unevictable, and
have mapping_set_unmovable() also set AS_UNEVICTABLE.

Cc: Matthew Wilcox <willy@infradead.org>
Co-developed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20231027182217.3615211-15-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
include/linux/pagemap.h
mm/compaction.c
mm/migrate.c

index 351c3b7f93a14ee3e7edafb17dbe128cd0e755f9..82c9bf506b79c0335eef23ffad73444bc3315125 100644 (file)
@@ -203,7 +203,8 @@ enum mapping_flags {
        /* writeback related tags are not used */
        AS_NO_WRITEBACK_TAGS = 5,
        AS_LARGE_FOLIO_SUPPORT = 6,
-       AS_RELEASE_ALWAYS,      /* Call ->release_folio(), even if no private data */
+       AS_RELEASE_ALWAYS = 7,  /* Call ->release_folio(), even if no private data */
+       AS_UNMOVABLE    = 8,    /* The mapping cannot be moved, ever */
 };
 
 /**
@@ -289,6 +290,22 @@ static inline void mapping_clear_release_always(struct address_space *mapping)
        clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
 }
 
+static inline void mapping_set_unmovable(struct address_space *mapping)
+{
+       /*
+        * It's expected unmovable mappings are also unevictable. Compaction
+        * migrate scanner (isolate_migratepages_block()) relies on this to
+        * reduce page locking.
+        */
+       set_bit(AS_UNEVICTABLE, &mapping->flags);
+       set_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
+static inline bool mapping_unmovable(struct address_space *mapping)
+{
+       return test_bit(AS_UNMOVABLE, &mapping->flags);
+}
+
 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
 {
        return mapping->gfp_mask;
index 38c8d216c6a3bffd9d75fd430981558c66614750..12b828aed7c840083163e27651c4e0fd26c5b8a9 100644 (file)
@@ -883,6 +883,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
        /* Time to isolate some pages for migration */
        for (; low_pfn < end_pfn; low_pfn++) {
+               bool is_dirty, is_unevictable;
 
                if (skip_on_failure && low_pfn >= next_skip_pfn) {
                        /*
@@ -1080,8 +1081,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if (!folio_test_lru(folio))
                        goto isolate_fail_put;
 
+               is_unevictable = folio_test_unevictable(folio);
+
                /* Compaction might skip unevictable pages but CMA takes them */
-               if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio))
+               if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable)
                        goto isolate_fail_put;
 
                /*
@@ -1093,26 +1096,42 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
                if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio))
                        goto isolate_fail_put;
 
-               if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) {
-                       bool migrate_dirty;
+               is_dirty = folio_test_dirty(folio);
+
+               if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) ||
+                   (mapping && is_unevictable)) {
+                       bool migrate_dirty = true;
+                       bool is_unmovable;
 
                        /*
                         * Only folios without mappings or that have
-                        * a ->migrate_folio callback are possible to
-                        * migrate without blocking.  However, we may
-                        * be racing with truncation, which can free
-                        * the mapping.  Truncation holds the folio lock
-                        * until after the folio is removed from the page
-                        * cache so holding it ourselves is sufficient.
+                        * a ->migrate_folio callback are possible to migrate
+                        * without blocking.
+                        *
+                        * Folios from unmovable mappings are not migratable.
+                        *
+                        * However, we can be racing with truncation, which can
+                        * free the mapping that we need to check. Truncation
+                        * holds the folio lock until after the folio is removed
+                        * from the page so holding it ourselves is sufficient.
+                        *
+                        * To avoid locking the folio just to check unmovable,
+                        * assume every unmovable folio is also unevictable,
+                        * which is a cheaper test.  If our assumption goes
+                        * wrong, it's not a correctness bug, just potentially
+                        * wasted cycles.
                         */
                        if (!folio_trylock(folio))
                                goto isolate_fail_put;
 
                        mapping = folio_mapping(folio);
-                       migrate_dirty = !mapping ||
-                                       mapping->a_ops->migrate_folio;
+                       if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) {
+                               migrate_dirty = !mapping ||
+                                               mapping->a_ops->migrate_folio;
+                       }
+                       is_unmovable = mapping && mapping_unmovable(mapping);
                        folio_unlock(folio);
-                       if (!migrate_dirty)
+                       if (!migrate_dirty || is_unmovable)
                                goto isolate_fail_put;
                }
 
index 06086dc9da288f96debedb4582d9467a02563322..60f2ff6b36aa068b5ec4dca5cf8c154f335fc673 100644 (file)
@@ -956,6 +956,8 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
 
                if (!mapping)
                        rc = migrate_folio(mapping, dst, src, mode);
+               else if (mapping_unmovable(mapping))
+                       rc = -EOPNOTSUPP;
                else if (mapping->a_ops->migrate_folio)
                        /*
                         * Most folios have a mapping and most filesystems