mm: pass migratetype into memmap_init_zone() and move_pfn_range_to_zone()
authorDavid Hildenbrand <david@redhat.com>
Fri, 16 Oct 2020 03:08:19 +0000 (20:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 16 Oct 2020 18:11:17 +0000 (11:11 -0700)
On the memory onlining path, we want to start with MIGRATE_ISOLATE, to
un-isolate the pages after memory onlining is complete.  Let's allow
passing in the migratetype.

Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Michel Lespinasse <walken@google.com>
Cc: Charan Teja Reddy <charante@codeaurora.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Link: https://lkml.kernel.org/r/20200819175957.28465-10-david@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/mm/init.c
include/linux/memory_hotplug.h
include/linux/mm.h
mm/memory_hotplug.c
mm/memremap.c
mm/page_alloc.c

index d8686bf3ae2f685deb37fbdac22289ecbc6933cd..ef12e097f318453b257bef39642a3385e9e0526e 100644 (file)
@@ -537,7 +537,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
                                 args->nid, args->zone, page_to_pfn(map_start),
-                                MEMINIT_EARLY, NULL);
+                                MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        return 0;
 }
 
@@ -547,7 +547,7 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
 {
        if (!vmem_map) {
                memmap_init_zone(size, nid, zone, start_pfn,
-                                MEMINIT_EARLY, NULL);
+                                MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        } else {
                struct page *start;
                struct memmap_init_callback_data args;
index 76b314031f090fe17492a621a0cf94c026778b0a..51a877fec8da8ccccec9308f2366d1fb42c49827 100644 (file)
@@ -351,7 +351,8 @@ extern int add_memory_resource(int nid, struct resource *resource);
 extern int add_memory_driver_managed(int nid, u64 start, u64 size,
                                     const char *resource_name);
 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
-               unsigned long nr_pages, struct vmem_altmap *altmap);
+                                  unsigned long nr_pages,
+                                  struct vmem_altmap *altmap, int migratetype);
 extern void remove_pfn_range_from_zone(struct zone *zone,
                                       unsigned long start_pfn,
                                       unsigned long nr_pages);
index a9df46309e0731938d23c9124653aa75fff38f6a..61a2633fcc7ff08f7c62f9410f57c3e518739f41 100644 (file)
@@ -2440,7 +2440,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
 
 extern void set_dma_reserve(unsigned long new_dma_reserve);
 extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
-               enum meminit_context, struct vmem_altmap *);
+               enum meminit_context, struct vmem_altmap *, int migratetype);
 extern void setup_per_zone_wmarks(void);
 extern int __meminit init_per_zone_wmark_min(void);
 extern void mem_init(void);
index 113edf95b908be35a48122fe72174618c06d37ae..bb30e99b73832d4d7869851338cac3577f733545 100644 (file)
@@ -701,9 +701,14 @@ static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned lon
  * Associate the pfn range with the given zone, initializing the memmaps
  * and resizing the pgdat/zone data to span the added pages. After this
  * call, all affected pages are PG_reserved.
+ *
+ * All aligned pageblocks are initialized to the specified migratetype
+ * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
+ * zone stats (e.g., nr_isolate_pageblock) are touched.
  */
 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
-               unsigned long nr_pages, struct vmem_altmap *altmap)
+                                 unsigned long nr_pages,
+                                 struct vmem_altmap *altmap, int migratetype)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        int nid = pgdat->node_id;
@@ -728,7 +733,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
         * are reserved so nobody should be touching them so we should be safe
         */
        memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn,
-                        MEMINIT_HOTPLUG, altmap);
+                        MEMINIT_HOTPLUG, altmap, migratetype);
 
        set_zone_contiguous(zone);
 }
@@ -808,7 +813,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
 
        /* associate pfn range with the zone */
        zone = zone_for_pfn_range(online_type, nid, pfn, nr_pages);
-       move_pfn_range_to_zone(zone, pfn, nr_pages, NULL);
+       move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE);
 
        arg.start_pfn = pfn;
        arg.nr_pages = nr_pages;
index 198083453182f95f3221261ad3beed30c8213eac..73a206d0f64542e0f28a52da3b52ce3c7b4635ae 100644 (file)
@@ -266,7 +266,8 @@ static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
 
                zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
                move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
-                               PHYS_PFN(range_len(range)), params->altmap);
+                               PHYS_PFN(range_len(range)), params->altmap,
+                               MIGRATE_MOVABLE);
        }
 
        mem_hotplug_done();
index 7a99ed299443a578c77f23e4e34fa61327405324..f7f292f1d108bc00f5a8d3791b5703575de374a6 100644 (file)
@@ -5990,10 +5990,15 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
  * Initially all pages are reserved - free ones are freed
  * up by memblock_free_all() once the early boot process is
  * done. Non-atomic initialization, single-pass.
+ *
+ * All aligned pageblocks are initialized to the specified migratetype
+ * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
+ * zone stats (e.g., nr_isolate_pageblock) are touched.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn, enum meminit_context context,
-               struct vmem_altmap *altmap)
+               unsigned long start_pfn,
+               enum meminit_context context,
+               struct vmem_altmap *altmap, int migratetype)
 {
        unsigned long pfn, end_pfn = start_pfn + size;
        struct page *page;
@@ -6037,14 +6042,12 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                        __SetPageReserved(page);
 
                /*
-                * Mark the block movable so that blocks are reserved for
-                * movable at startup. This will force kernel allocations
-                * to reserve their blocks rather than leaking throughout
-                * the address space during boot when many long-lived
-                * kernel allocations are made.
+                * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
+                * such that unmovable allocations won't be scattered all
+                * over the place during system boot.
                 */
                if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
-                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+                       set_pageblock_migratetype(page, migratetype);
                        cond_resched();
                }
                pfn++;
@@ -6144,7 +6147,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
                if (end_pfn > start_pfn) {
                        size = end_pfn - start_pfn;
                        memmap_init_zone(size, nid, zone, start_pfn,
-                                        MEMINIT_EARLY, NULL);
+                                        MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
                }
        }
 }