mm: remove unnecessary pagevec includes
[linux-2.6-block.git] / mm / memory_hotplug.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
3947be19
DH
2/*
3 * linux/mm/memory_hotplug.c
4 *
5 * Copyright (C)
6 */
7
3947be19
DH
8#include <linux/stddef.h>
9#include <linux/mm.h>
174cd4b1 10#include <linux/sched/signal.h>
3947be19
DH
11#include <linux/swap.h>
12#include <linux/interrupt.h>
13#include <linux/pagemap.h>
3947be19 14#include <linux/compiler.h>
b95f1b31 15#include <linux/export.h>
2d1d43f6 16#include <linux/writeback.h>
3947be19
DH
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
4b94ffdc 21#include <linux/memremap.h>
3947be19 22#include <linux/memory_hotplug.h>
3947be19 23#include <linux/vmalloc.h>
0a547039 24#include <linux/ioport.h>
0c0e6195
KH
25#include <linux/delay.h>
26#include <linux/migrate.h>
27#include <linux/page-isolation.h>
71088785 28#include <linux/pfn.h>
6ad696d2 29#include <linux/suspend.h>
6d9c285a 30#include <linux/mm_inline.h>
d96ae530 31#include <linux/firmware-map.h>
60a5a19e 32#include <linux/stop_machine.h>
c8721bbb 33#include <linux/hugetlb.h>
c5320926 34#include <linux/memblock.h>
698b1b30 35#include <linux/compaction.h>
b15c8726 36#include <linux/rmap.h>
8581fd40 37#include <linux/module.h>
3947be19
DH
38
39#include <asm/tlbflush.h>
40
1e5ad9a3 41#include "internal.h"
e900a918 42#include "shuffle.h"
1e5ad9a3 43
6e02c46b 44#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY
e3a9d9fc
OS
45/*
46 * memory_hotplug.memmap_on_memory parameter
47 */
48static bool memmap_on_memory __ro_after_init;
66361095 49module_param(memmap_on_memory, bool, 0444);
e3a9d9fc 50MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug");
6e02c46b 51
66361095 52static inline bool mhp_memmap_on_memory(void)
6e02c46b
MS
53{
54 return memmap_on_memory;
55}
66361095
MS
56#else
57static inline bool mhp_memmap_on_memory(void)
58{
59 return false;
60}
e3a9d9fc 61#endif
a08a2ae3 62
e83a437f
DH
63enum {
64 ONLINE_POLICY_CONTIG_ZONES = 0,
65 ONLINE_POLICY_AUTO_MOVABLE,
66};
67
ac62554b 68static const char * const online_policy_to_str[] = {
e83a437f
DH
69 [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones",
70 [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable",
71};
72
73static int set_online_policy(const char *val, const struct kernel_param *kp)
74{
75 int ret = sysfs_match_string(online_policy_to_str, val);
76
77 if (ret < 0)
78 return ret;
79 *((int *)kp->arg) = ret;
80 return 0;
81}
82
83static int get_online_policy(char *buffer, const struct kernel_param *kp)
84{
85 return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]);
86}
87
88/*
89 * memory_hotplug.online_policy: configure online behavior when onlining without
90 * specifying a zone (MMOP_ONLINE)
91 *
92 * "contig-zones": keep zone contiguous
93 * "auto-movable": online memory to ZONE_MOVABLE if the configuration
94 * (auto_movable_ratio, auto_movable_numa_aware) allows for it
95 */
96static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES;
97static const struct kernel_param_ops online_policy_ops = {
98 .set = set_online_policy,
99 .get = get_online_policy,
100};
101module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644);
102MODULE_PARM_DESC(online_policy,
103 "Set the online policy (\"contig-zones\", \"auto-movable\") "
104 "Default: \"contig-zones\"");
105
106/*
107 * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio
108 *
109 * The ratio represent an upper limit and the kernel might decide to not
110 * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory
111 * doesn't allow for more MOVABLE memory.
112 */
113static unsigned int auto_movable_ratio __read_mostly = 301;
114module_param(auto_movable_ratio, uint, 0644);
115MODULE_PARM_DESC(auto_movable_ratio,
116 "Set the maximum ratio of MOVABLE:KERNEL memory in the system "
117 "in percent for \"auto-movable\" online policy. Default: 301");
118
119/*
120 * memory_hotplug.auto_movable_numa_aware: consider numa node stats
121 */
122#ifdef CONFIG_NUMA
123static bool auto_movable_numa_aware __read_mostly = true;
124module_param(auto_movable_numa_aware, bool, 0644);
125MODULE_PARM_DESC(auto_movable_numa_aware,
126 "Consider numa node stats in addition to global stats in "
127 "\"auto-movable\" online policy. Default: true");
128#endif /* CONFIG_NUMA */
129
9d0ad8ca
DK
130/*
131 * online_page_callback contains pointer to current page onlining function.
132 * Initially it is generic_online_page(). If it is required it could be
133 * changed by calling set_online_page_callback() for callback registration
134 * and restore_online_page_callback() for generic callback restore.
135 */
136
9d0ad8ca 137static online_page_callback_t online_page_callback = generic_online_page;
bfc8c901 138static DEFINE_MUTEX(online_page_callback_lock);
9d0ad8ca 139
3f906ba2 140DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock);
bfc8c901 141
3f906ba2
TG
142void get_online_mems(void)
143{
144 percpu_down_read(&mem_hotplug_lock);
145}
bfc8c901 146
3f906ba2
TG
147void put_online_mems(void)
148{
149 percpu_up_read(&mem_hotplug_lock);
150}
bfc8c901 151
4932381e
MH
152bool movable_node_enabled = false;
153
8604d9e5 154#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE
1adf8b46 155int mhp_default_online_type = MMOP_OFFLINE;
8604d9e5 156#else
1adf8b46 157int mhp_default_online_type = MMOP_ONLINE;
8604d9e5 158#endif
31bc3858 159
86dd995d
VK
160static int __init setup_memhp_default_state(char *str)
161{
1adf8b46 162 const int online_type = mhp_online_type_from_str(str);
5f47adf7
DH
163
164 if (online_type >= 0)
1adf8b46 165 mhp_default_online_type = online_type;
86dd995d
VK
166
167 return 1;
168}
169__setup("memhp_default_state=", setup_memhp_default_state);
170
30467e0b 171void mem_hotplug_begin(void)
20d6c96b 172{
3f906ba2
TG
173 cpus_read_lock();
174 percpu_down_write(&mem_hotplug_lock);
20d6c96b
KM
175}
176
30467e0b 177void mem_hotplug_done(void)
bfc8c901 178{
3f906ba2
TG
179 percpu_up_write(&mem_hotplug_lock);
180 cpus_read_unlock();
bfc8c901 181}
20d6c96b 182
357b4da5
JG
183u64 max_mem_size = U64_MAX;
184
45e0b78b 185/* add this memory to iomem resource */
7b7b2721
DH
186static struct resource *register_memory_resource(u64 start, u64 size,
187 const char *resource_name)
45e0b78b 188{
2794129e
DH
189 struct resource *res;
190 unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
7b7b2721
DH
191
192 if (strcmp(resource_name, "System RAM"))
7cf603d1 193 flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED;
357b4da5 194
bca3feaa
AK
195 if (!mhp_range_allowed(start, size, true))
196 return ERR_PTR(-E2BIG);
197
f3cd4c86
BH
198 /*
199 * Make sure value parsed from 'mem=' only restricts memory adding
200 * while booting, so that memory hotplug won't be impacted. Please
201 * refer to document of 'mem=' in kernel-parameters.txt for more
202 * details.
203 */
204 if (start + size > max_mem_size && system_state < SYSTEM_RUNNING)
357b4da5
JG
205 return ERR_PTR(-E2BIG);
206
2794129e
DH
207 /*
208 * Request ownership of the new memory range. This might be
209 * a child of an existing resource that was present but
210 * not marked as busy.
211 */
212 res = __request_region(&iomem_resource, start, size,
213 resource_name, flags);
214
215 if (!res) {
216 pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n",
217 start, start + size);
6f754ba4 218 return ERR_PTR(-EEXIST);
45e0b78b
KM
219 }
220 return res;
221}
222
223static void release_memory_resource(struct resource *res)
224{
225 if (!res)
226 return;
227 release_resource(res);
228 kfree(res);
45e0b78b
KM
229}
230
943189db 231static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
7ea62160
DW
232{
233 /*
234 * Disallow all operations smaller than a sub-section and only
235 * allow operations smaller than a section for
236 * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
237 * enforces a larger memory_block_size_bytes() granularity for
238 * memory that will be marked online, so this check should only
239 * fire for direct arch_{add,remove}_memory() users outside of
240 * add_memory_resource().
241 */
242 unsigned long min_align;
243
244 if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
245 min_align = PAGES_PER_SUBSECTION;
246 else
247 min_align = PAGES_PER_SECTION;
943189db 248 if (!IS_ALIGNED(pfn | nr_pages, min_align))
7ea62160 249 return -EINVAL;
7ea62160
DW
250 return 0;
251}
252
9f605f26
DW
253/*
254 * Return page for the valid pfn only if the page is online. All pfn
255 * walkers which rely on the fully initialized page->flags and others
256 * should use this rather than pfn_valid && pfn_to_page
257 */
258struct page *pfn_to_online_page(unsigned long pfn)
259{
260 unsigned long nr = pfn_to_section_nr(pfn);
1f90a347 261 struct dev_pagemap *pgmap;
9f9b02e5
DW
262 struct mem_section *ms;
263
264 if (nr >= NR_MEM_SECTIONS)
265 return NULL;
266
267 ms = __nr_to_section(nr);
268 if (!online_section(ms))
269 return NULL;
270
271 /*
272 * Save some code text when online_section() +
273 * pfn_section_valid() are sufficient.
274 */
275 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
276 return NULL;
277
278 if (!pfn_section_valid(ms, pfn))
279 return NULL;
9f605f26 280
1f90a347
DW
281 if (!online_device_section(ms))
282 return pfn_to_page(pfn);
283
284 /*
285 * Slowpath: when ZONE_DEVICE collides with
286 * ZONE_{NORMAL,MOVABLE} within the same section some pfns in
287 * the section may be 'offline' but 'valid'. Only
288 * get_dev_pagemap() can determine sub-section online status.
289 */
290 pgmap = get_dev_pagemap(pfn, NULL);
291 put_dev_pagemap(pgmap);
292
293 /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
294 if (pgmap)
295 return NULL;
296
9f9b02e5 297 return pfn_to_page(pfn);
9f605f26
DW
298}
299EXPORT_SYMBOL_GPL(pfn_to_online_page);
300
7ea62160 301int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
f5637d3b 302 struct mhp_params *params)
4edd7cef 303{
6cdd0b30
DH
304 const unsigned long end_pfn = pfn + nr_pages;
305 unsigned long cur_nr_pages;
9a845030 306 int err;
f5637d3b 307 struct vmem_altmap *altmap = params->altmap;
4b94ffdc 308
6366238b 309 if (WARN_ON_ONCE(!pgprot_val(params->pgprot)))
bfeb022f
LG
310 return -EINVAL;
311
bca3feaa 312 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
dca4436d 313
4b94ffdc
DW
314 if (altmap) {
315 /*
316 * Validate altmap is within bounds of the total request
317 */
7ea62160 318 if (altmap->base_pfn != pfn
4b94ffdc
DW
319 || vmem_altmap_offset(altmap) > nr_pages) {
320 pr_warn_once("memory add fail, invalid altmap\n");
7ea62160 321 return -EINVAL;
4b94ffdc
DW
322 }
323 altmap->alloc = 0;
324 }
325
943189db 326 if (check_pfn_span(pfn, nr_pages)) {
50135045 327 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
943189db
AK
328 return -EINVAL;
329 }
7ea62160 330
6cdd0b30
DH
331 for (; pfn < end_pfn; pfn += cur_nr_pages) {
332 /* Select all remaining pages up to the next section boundary */
333 cur_nr_pages = min(end_pfn - pfn,
334 SECTION_ALIGN_UP(pfn + 1) - pfn);
e3246d8f
JM
335 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
336 params->pgmap);
ba72b4c8
DW
337 if (err)
338 break;
f64ac5e6 339 cond_resched();
4edd7cef 340 }
c435a390 341 vmemmap_populate_print_last();
4edd7cef
DR
342 return err;
343}
4edd7cef 344
815121d2 345/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
d09b0137 346static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
815121d2
YI
347 unsigned long start_pfn,
348 unsigned long end_pfn)
349{
49ba3c6b 350 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) {
7ce700bf 351 if (unlikely(!pfn_to_online_page(start_pfn)))
815121d2
YI
352 continue;
353
354 if (unlikely(pfn_to_nid(start_pfn) != nid))
355 continue;
356
9b05158f 357 if (zone != page_zone(pfn_to_page(start_pfn)))
815121d2
YI
358 continue;
359
360 return start_pfn;
361 }
362
363 return 0;
364}
365
366/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
d09b0137 367static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
815121d2
YI
368 unsigned long start_pfn,
369 unsigned long end_pfn)
370{
815121d2
YI
371 unsigned long pfn;
372
373 /* pfn is the end pfn of a memory section. */
374 pfn = end_pfn - 1;
49ba3c6b 375 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
7ce700bf 376 if (unlikely(!pfn_to_online_page(pfn)))
815121d2
YI
377 continue;
378
379 if (unlikely(pfn_to_nid(pfn) != nid))
380 continue;
381
9b05158f 382 if (zone != page_zone(pfn_to_page(pfn)))
815121d2
YI
383 continue;
384
385 return pfn;
386 }
387
388 return 0;
389}
390
391static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
392 unsigned long end_pfn)
393{
815121d2 394 unsigned long pfn;
815121d2
YI
395 int nid = zone_to_nid(zone);
396
5d12071c 397 if (zone->zone_start_pfn == start_pfn) {
815121d2
YI
398 /*
399 * If the section is smallest section in the zone, it need
400 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
401 * In this case, we find second smallest valid mem_section
402 * for shrinking zone.
403 */
404 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
5d12071c 405 zone_end_pfn(zone));
815121d2 406 if (pfn) {
5d12071c 407 zone->spanned_pages = zone_end_pfn(zone) - pfn;
815121d2 408 zone->zone_start_pfn = pfn;
950b68d9
DH
409 } else {
410 zone->zone_start_pfn = 0;
411 zone->spanned_pages = 0;
815121d2 412 }
5d12071c 413 } else if (zone_end_pfn(zone) == end_pfn) {
815121d2
YI
414 /*
415 * If the section is biggest section in the zone, it need
416 * shrink zone->spanned_pages.
417 * In this case, we find second biggest valid mem_section for
418 * shrinking zone.
419 */
5d12071c 420 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
815121d2
YI
421 start_pfn);
422 if (pfn)
5d12071c 423 zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
950b68d9
DH
424 else {
425 zone->zone_start_pfn = 0;
426 zone->spanned_pages = 0;
427 }
815121d2 428 }
815121d2
YI
429}
430
00d6c019 431static void update_pgdat_span(struct pglist_data *pgdat)
815121d2 432{
00d6c019
DH
433 unsigned long node_start_pfn = 0, node_end_pfn = 0;
434 struct zone *zone;
435
436 for (zone = pgdat->node_zones;
437 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
6c922cf7 438 unsigned long end_pfn = zone_end_pfn(zone);
00d6c019
DH
439
440 /* No need to lock the zones, they can't change. */
656d5711
DH
441 if (!zone->spanned_pages)
442 continue;
443 if (!node_end_pfn) {
444 node_start_pfn = zone->zone_start_pfn;
6c922cf7 445 node_end_pfn = end_pfn;
656d5711
DH
446 continue;
447 }
448
6c922cf7
ML
449 if (end_pfn > node_end_pfn)
450 node_end_pfn = end_pfn;
00d6c019
DH
451 if (zone->zone_start_pfn < node_start_pfn)
452 node_start_pfn = zone->zone_start_pfn;
815121d2
YI
453 }
454
00d6c019
DH
455 pgdat->node_start_pfn = node_start_pfn;
456 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
815121d2
YI
457}
458
feee6b29
DH
459void __ref remove_pfn_range_from_zone(struct zone *zone,
460 unsigned long start_pfn,
461 unsigned long nr_pages)
815121d2 462{
b7e3debd 463 const unsigned long end_pfn = start_pfn + nr_pages;
815121d2 464 struct pglist_data *pgdat = zone->zone_pgdat;
27cacaad 465 unsigned long pfn, cur_nr_pages;
815121d2 466
d33695b1 467 /* Poison struct pages because they are now uninitialized again. */
b7e3debd
BW
468 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
469 cond_resched();
470
471 /* Select all remaining pages up to the next section boundary */
472 cur_nr_pages =
473 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
474 page_init_poison(pfn_to_page(pfn),
475 sizeof(struct page) * cur_nr_pages);
476 }
d33695b1 477
7ce700bf
DH
478 /*
479 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
480 * we will not try to shrink the zones - which is okay as
481 * set_zone_contiguous() cannot deal with ZONE_DEVICE either way.
482 */
5ef5f810 483 if (zone_is_zone_device(zone))
7ce700bf 484 return;
7ce700bf 485
feee6b29
DH
486 clear_zone_contiguous(zone);
487
815121d2 488 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
00d6c019 489 update_pgdat_span(pgdat);
feee6b29
DH
490
491 set_zone_contiguous(zone);
815121d2
YI
492}
493
ea01ea93 494/**
feee6b29 495 * __remove_pages() - remove sections of pages
7ea62160 496 * @pfn: starting pageframe (must be aligned to start of a section)
ea01ea93 497 * @nr_pages: number of pages to remove (must be multiple of section size)
e8b098fc 498 * @altmap: alternative device page map or %NULL if default memmap is used
ea01ea93
BP
499 *
500 * Generic helper function to remove section mappings and sysfs entries
501 * for the section of the memory we are removing. Caller needs to make
502 * sure that pages are marked reserved and zones are adjust properly by
503 * calling offline_pages().
504 */
feee6b29
DH
505void __remove_pages(unsigned long pfn, unsigned long nr_pages,
506 struct vmem_altmap *altmap)
ea01ea93 507{
52fb87c8
DH
508 const unsigned long end_pfn = pfn + nr_pages;
509 unsigned long cur_nr_pages;
ea01ea93 510
943189db 511 if (check_pfn_span(pfn, nr_pages)) {
50135045 512 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
7ea62160 513 return;
943189db 514 }
ea01ea93 515
52fb87c8 516 for (; pfn < end_pfn; pfn += cur_nr_pages) {
dd33ad7b 517 cond_resched();
52fb87c8 518 /* Select all remaining pages up to the next section boundary */
a11b9419
DH
519 cur_nr_pages = min(end_pfn - pfn,
520 SECTION_ALIGN_UP(pfn + 1) - pfn);
bd5f79ab 521 sparse_remove_section(pfn, cur_nr_pages, altmap);
ea01ea93 522 }
ea01ea93 523}
ea01ea93 524
9d0ad8ca
DK
525int set_online_page_callback(online_page_callback_t callback)
526{
527 int rc = -EINVAL;
528
bfc8c901
VD
529 get_online_mems();
530 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
531
532 if (online_page_callback == generic_online_page) {
533 online_page_callback = callback;
534 rc = 0;
535 }
536
bfc8c901
VD
537 mutex_unlock(&online_page_callback_lock);
538 put_online_mems();
9d0ad8ca
DK
539
540 return rc;
541}
542EXPORT_SYMBOL_GPL(set_online_page_callback);
543
544int restore_online_page_callback(online_page_callback_t callback)
545{
546 int rc = -EINVAL;
547
bfc8c901
VD
548 get_online_mems();
549 mutex_lock(&online_page_callback_lock);
9d0ad8ca
DK
550
551 if (online_page_callback == callback) {
552 online_page_callback = generic_online_page;
553 rc = 0;
554 }
555
bfc8c901
VD
556 mutex_unlock(&online_page_callback_lock);
557 put_online_mems();
9d0ad8ca
DK
558
559 return rc;
560}
561EXPORT_SYMBOL_GPL(restore_online_page_callback);
562
18db1491 563void generic_online_page(struct page *page, unsigned int order)
9d0ad8ca 564{
c87cbc1f
VB
565 /*
566 * Freeing the page with debug_pagealloc enabled will try to unmap it,
567 * so we should map it first. This is better than introducing a special
568 * case in page freeing fast path.
569 */
77bc7fd6 570 debug_pagealloc_map_pages(page, 1 << order);
a9cd410a
AK
571 __free_pages_core(page, order);
572 totalram_pages_add(1UL << order);
a9cd410a 573}
18db1491 574EXPORT_SYMBOL_GPL(generic_online_page);
a9cd410a 575
aac65321 576static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
3947be19 577{
b2c2ab20
DH
578 const unsigned long end_pfn = start_pfn + nr_pages;
579 unsigned long pfn;
b2c2ab20
DH
580
581 /*
23baf831 582 * Online the pages in MAX_ORDER aligned chunks. The callback might
aac65321
DH
583 * decide to not expose all pages to the buddy (e.g., expose them
584 * later). We account all pages as being online and belonging to this
585 * zone ("present").
a08a2ae3
OS
586 * When using memmap_on_memory, the range might not be aligned to
587 * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect
588 * this and the first chunk to online will be pageblock_nr_pages.
b2c2ab20 589 */
a08a2ae3 590 for (pfn = start_pfn; pfn < end_pfn;) {
59f876fb
KS
591 int order;
592
593 /*
594 * Free to online pages in the largest chunks alignment allows.
595 *
596 * __ffs() behaviour is undefined for 0. start == 0 is
597 * MAX_ORDER-aligned, Set order to MAX_ORDER for the case.
598 */
599 if (pfn)
600 order = min_t(int, MAX_ORDER, __ffs(pfn));
601 else
602 order = MAX_ORDER;
a08a2ae3
OS
603
604 (*online_page_callback)(pfn_to_page(pfn), order);
605 pfn += (1UL << order);
606 }
2d070eab 607
b2c2ab20
DH
608 /* mark all involved sections as online */
609 online_mem_sections(start_pfn, end_pfn);
75884fb1
KH
610}
611
d9713679
LJ
612/* check which state of node_states will be changed when online memory */
613static void node_states_check_changes_online(unsigned long nr_pages,
614 struct zone *zone, struct memory_notify *arg)
615{
616 int nid = zone_to_nid(zone);
d9713679 617
98fa15f3
AK
618 arg->status_change_nid = NUMA_NO_NODE;
619 arg->status_change_nid_normal = NUMA_NO_NODE;
d9713679 620
8efe33f4
OS
621 if (!node_state(nid, N_MEMORY))
622 arg->status_change_nid = nid;
623 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY))
d9713679 624 arg->status_change_nid_normal = nid;
d9713679
LJ
625}
626
627static void node_states_set_node(int node, struct memory_notify *arg)
628{
629 if (arg->status_change_nid_normal >= 0)
630 node_set_state(node, N_NORMAL_MEMORY);
631
83d83612
OS
632 if (arg->status_change_nid >= 0)
633 node_set_state(node, N_MEMORY);
d9713679
LJ
634}
635
f1dd2cd1
MH
636static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
637 unsigned long nr_pages)
638{
639 unsigned long old_end_pfn = zone_end_pfn(zone);
640
641 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
642 zone->zone_start_pfn = start_pfn;
643
644 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
645}
646
647static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn,
648 unsigned long nr_pages)
649{
650 unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
651
652 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
653 pgdat->node_start_pfn = start_pfn;
654
655 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn;
f1dd2cd1 656
3fccb74c 657}
1f90a347 658
ed7802dd 659#ifdef CONFIG_ZONE_DEVICE
1f90a347
DW
660static void section_taint_zone_device(unsigned long pfn)
661{
662 struct mem_section *ms = __pfn_to_section(pfn);
663
664 ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE;
665}
ed7802dd
MS
666#else
667static inline void section_taint_zone_device(unsigned long pfn)
668{
669}
670#endif
1f90a347 671
3fccb74c
DH
672/*
673 * Associate the pfn range with the given zone, initializing the memmaps
674 * and resizing the pgdat/zone data to span the added pages. After this
675 * call, all affected pages are PG_reserved.
d882c006
DH
676 *
677 * All aligned pageblocks are initialized to the specified migratetype
678 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
679 * zone stats (e.g., nr_isolate_pageblock) are touched.
3fccb74c 680 */
a99583e7 681void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
d882c006
DH
682 unsigned long nr_pages,
683 struct vmem_altmap *altmap, int migratetype)
f1dd2cd1
MH
684{
685 struct pglist_data *pgdat = zone->zone_pgdat;
686 int nid = pgdat->node_id;
df429ac0 687
f1dd2cd1
MH
688 clear_zone_contiguous(zone);
689
fa004ab7
WY
690 if (zone_is_empty(zone))
691 init_currently_empty_zone(zone, start_pfn, nr_pages);
f1dd2cd1 692 resize_zone_range(zone, start_pfn, nr_pages);
f1dd2cd1 693 resize_pgdat_range(pgdat, start_pfn, nr_pages);
f1dd2cd1 694
1f90a347
DW
695 /*
696 * Subsection population requires care in pfn_to_online_page().
697 * Set the taint to enable the slow path detection of
698 * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE}
699 * section.
700 */
701 if (zone_is_zone_device(zone)) {
702 if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION))
703 section_taint_zone_device(start_pfn);
704 if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))
705 section_taint_zone_device(start_pfn + nr_pages);
706 }
707
f1dd2cd1
MH
708 /*
709 * TODO now we have a visible range of pages which are not associated
710 * with their zone properly. Not nice but set_pfnblock_flags_mask
711 * expects the zone spans the pfn range. All the pages in the range
712 * are reserved so nobody should be touching them so we should be safe
713 */
ab28cb6e 714 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
d882c006 715 MEMINIT_HOTPLUG, altmap, migratetype);
f1dd2cd1
MH
716
717 set_zone_contiguous(zone);
718}
719
e83a437f
DH
720struct auto_movable_stats {
721 unsigned long kernel_early_pages;
722 unsigned long movable_pages;
723};
724
725static void auto_movable_stats_account_zone(struct auto_movable_stats *stats,
726 struct zone *zone)
727{
728 if (zone_idx(zone) == ZONE_MOVABLE) {
729 stats->movable_pages += zone->present_pages;
730 } else {
731 stats->kernel_early_pages += zone->present_early_pages;
732#ifdef CONFIG_CMA
733 /*
734 * CMA pages (never on hotplugged memory) behave like
735 * ZONE_MOVABLE.
736 */
737 stats->movable_pages += zone->cma_pages;
738 stats->kernel_early_pages -= zone->cma_pages;
739#endif /* CONFIG_CMA */
740 }
741}
3fcebf90
DH
742struct auto_movable_group_stats {
743 unsigned long movable_pages;
744 unsigned long req_kernel_early_pages;
745};
e83a437f 746
3fcebf90
DH
747static int auto_movable_stats_account_group(struct memory_group *group,
748 void *arg)
749{
750 const int ratio = READ_ONCE(auto_movable_ratio);
751 struct auto_movable_group_stats *stats = arg;
752 long pages;
753
754 /*
755 * We don't support modifying the config while the auto-movable online
756 * policy is already enabled. Just avoid the division by zero below.
757 */
758 if (!ratio)
759 return 0;
760
761 /*
762 * Calculate how many early kernel pages this group requires to
763 * satisfy the configured zone ratio.
764 */
765 pages = group->present_movable_pages * 100 / ratio;
766 pages -= group->present_kernel_pages;
767
768 if (pages > 0)
769 stats->req_kernel_early_pages += pages;
770 stats->movable_pages += group->present_movable_pages;
771 return 0;
772}
773
774static bool auto_movable_can_online_movable(int nid, struct memory_group *group,
775 unsigned long nr_pages)
e83a437f 776{
e83a437f 777 unsigned long kernel_early_pages, movable_pages;
3fcebf90
DH
778 struct auto_movable_group_stats group_stats = {};
779 struct auto_movable_stats stats = {};
e83a437f
DH
780 pg_data_t *pgdat = NODE_DATA(nid);
781 struct zone *zone;
782 int i;
783
784 /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */
785 if (nid == NUMA_NO_NODE) {
786 /* TODO: cache values */
787 for_each_populated_zone(zone)
788 auto_movable_stats_account_zone(&stats, zone);
789 } else {
790 for (i = 0; i < MAX_NR_ZONES; i++) {
791 zone = pgdat->node_zones + i;
792 if (populated_zone(zone))
793 auto_movable_stats_account_zone(&stats, zone);
794 }
795 }
796
797 kernel_early_pages = stats.kernel_early_pages;
798 movable_pages = stats.movable_pages;
799
3fcebf90
DH
800 /*
801 * Kernel memory inside dynamic memory group allows for more MOVABLE
802 * memory within the same group. Remove the effect of all but the
803 * current group from the stats.
804 */
805 walk_dynamic_memory_groups(nid, auto_movable_stats_account_group,
806 group, &group_stats);
807 if (kernel_early_pages <= group_stats.req_kernel_early_pages)
808 return false;
809 kernel_early_pages -= group_stats.req_kernel_early_pages;
810 movable_pages -= group_stats.movable_pages;
811
812 if (group && group->is_dynamic)
813 kernel_early_pages += group->present_kernel_pages;
814
e83a437f
DH
815 /*
816 * Test if we could online the given number of pages to ZONE_MOVABLE
817 * and still stay in the configured ratio.
818 */
819 movable_pages += nr_pages;
820 return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100;
821}
822
c246a213
MH
823/*
824 * Returns a default kernel memory zone for the given pfn range.
825 * If no kernel zone covers this pfn range it will automatically go
826 * to the ZONE_NORMAL.
827 */
c6f03e29 828static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
c246a213
MH
829 unsigned long nr_pages)
830{
831 struct pglist_data *pgdat = NODE_DATA(nid);
832 int zid;
833
d6aad201 834 for (zid = 0; zid < ZONE_NORMAL; zid++) {
c246a213
MH
835 struct zone *zone = &pgdat->node_zones[zid];
836
837 if (zone_intersects(zone, start_pfn, nr_pages))
838 return zone;
839 }
840
841 return &pgdat->node_zones[ZONE_NORMAL];
842}
843
e83a437f
DH
844/*
845 * Determine to which zone to online memory dynamically based on user
846 * configuration and system stats. We care about the following ratio:
847 *
848 * MOVABLE : KERNEL
849 *
850 * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in
851 * one of the kernel zones. CMA pages inside one of the kernel zones really
852 * behaves like ZONE_MOVABLE, so we treat them accordingly.
853 *
854 * We don't allow for hotplugged memory in a KERNEL zone to increase the
855 * amount of MOVABLE memory we can have, so we end up with:
856 *
857 * MOVABLE : KERNEL_EARLY
858 *
859 * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze
860 * boot. We base our calculation on KERNEL_EARLY internally, because:
861 *
862 * a) Hotplugged memory in one of the kernel zones can sometimes still get
863 * hotunplugged, especially when hot(un)plugging individual memory blocks.
864 * There is no coordination across memory devices, therefore "automatic"
865 * hotunplugging, as implemented in hypervisors, could result in zone
866 * imbalances.
867 * b) Early/boot memory in one of the kernel zones can usually not get
868 * hotunplugged again (e.g., no firmware interface to unplug, fragmented
869 * with unmovable allocations). While there are corner cases where it might
870 * still work, it is barely relevant in practice.
871 *
3fcebf90
DH
872 * Exceptions are dynamic memory groups, which allow for more MOVABLE
873 * memory within the same memory group -- because in that case, there is
874 * coordination within the single memory device managed by a single driver.
875 *
e83a437f
DH
876 * We rely on "present pages" instead of "managed pages", as the latter is
877 * highly unreliable and dynamic in virtualized environments, and does not
878 * consider boot time allocations. For example, memory ballooning adjusts the
879 * managed pages when inflating/deflating the balloon, and balloon compaction
880 * can even migrate inflated pages between zones.
881 *
882 * Using "present pages" is better but some things to keep in mind are:
883 *
884 * a) Some memblock allocations, such as for the crashkernel area, are
885 * effectively unused by the kernel, yet they account to "present pages".
886 * Fortunately, these allocations are comparatively small in relevant setups
887 * (e.g., fraction of system memory).
888 * b) Some hotplugged memory blocks in virtualized environments, esecially
889 * hotplugged by virtio-mem, look like they are completely present, however,
890 * only parts of the memory block are actually currently usable.
891 * "present pages" is an upper limit that can get reached at runtime. As
892 * we base our calculations on KERNEL_EARLY, this is not an issue.
893 */
445fcf7c
DH
894static struct zone *auto_movable_zone_for_pfn(int nid,
895 struct memory_group *group,
896 unsigned long pfn,
e83a437f
DH
897 unsigned long nr_pages)
898{
445fcf7c
DH
899 unsigned long online_pages = 0, max_pages, end_pfn;
900 struct page *page;
901
e83a437f
DH
902 if (!auto_movable_ratio)
903 goto kernel_zone;
904
445fcf7c
DH
905 if (group && !group->is_dynamic) {
906 max_pages = group->s.max_pages;
907 online_pages = group->present_movable_pages;
908
909 /* If anything is !MOVABLE online the rest !MOVABLE. */
910 if (group->present_kernel_pages)
911 goto kernel_zone;
912 } else if (!group || group->d.unit_pages == nr_pages) {
913 max_pages = nr_pages;
914 } else {
915 max_pages = group->d.unit_pages;
916 /*
917 * Take a look at all online sections in the current unit.
918 * We can safely assume that all pages within a section belong
919 * to the same zone, because dynamic memory groups only deal
920 * with hotplugged memory.
921 */
922 pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
923 end_pfn = pfn + group->d.unit_pages;
924 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
925 page = pfn_to_online_page(pfn);
926 if (!page)
927 continue;
928 /* If anything is !MOVABLE online the rest !MOVABLE. */
07252dfe 929 if (!is_zone_movable_page(page))
445fcf7c
DH
930 goto kernel_zone;
931 online_pages += PAGES_PER_SECTION;
932 }
933 }
934
935 /*
936 * Online MOVABLE if we could *currently* online all remaining parts
937 * MOVABLE. We expect to (add+) online them immediately next, so if
938 * nobody interferes, all will be MOVABLE if possible.
939 */
940 nr_pages = max_pages - online_pages;
3fcebf90 941 if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages))
e83a437f
DH
942 goto kernel_zone;
943
944#ifdef CONFIG_NUMA
945 if (auto_movable_numa_aware &&
3fcebf90 946 !auto_movable_can_online_movable(nid, group, nr_pages))
e83a437f
DH
947 goto kernel_zone;
948#endif /* CONFIG_NUMA */
949
950 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
951kernel_zone:
952 return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
953}
954
c6f03e29
MH
955static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
956 unsigned long nr_pages)
e5e68930 957{
c6f03e29
MH
958 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
959 nr_pages);
960 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
961 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages);
962 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages);
e5e68930
MH
963
964 /*
c6f03e29
MH
965 * We inherit the existing zone in a simple case where zones do not
966 * overlap in the given range
e5e68930 967 */
c6f03e29
MH
968 if (in_kernel ^ in_movable)
969 return (in_kernel) ? kernel_zone : movable_zone;
9f123ab5 970
c6f03e29
MH
971 /*
972 * If the range doesn't belong to any zone or two zones overlap in the
973 * given range then we use movable zone only if movable_node is
974 * enabled because we always online to a kernel zone by default.
975 */
976 return movable_node_enabled ? movable_zone : kernel_zone;
9f123ab5
MH
977}
978
7cf209ba 979struct zone *zone_for_pfn_range(int online_type, int nid,
445fcf7c 980 struct memory_group *group, unsigned long start_pfn,
e5e68930 981 unsigned long nr_pages)
f1dd2cd1 982{
c6f03e29
MH
983 if (online_type == MMOP_ONLINE_KERNEL)
984 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages);
f1dd2cd1 985
c6f03e29
MH
986 if (online_type == MMOP_ONLINE_MOVABLE)
987 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
df429ac0 988
e83a437f 989 if (online_policy == ONLINE_POLICY_AUTO_MOVABLE)
445fcf7c 990 return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages);
e83a437f 991
c6f03e29 992 return default_zone_for_pfn(nid, start_pfn, nr_pages);
e5e68930
MH
993}
994
a08a2ae3
OS
995/*
996 * This function should only be called by memory_block_{online,offline},
997 * and {online,offline}_pages.
998 */
836809ec
DH
999void adjust_present_page_count(struct page *page, struct memory_group *group,
1000 long nr_pages)
f9901144 1001{
4b097002 1002 struct zone *zone = page_zone(page);
836809ec 1003 const bool movable = zone_idx(zone) == ZONE_MOVABLE;
4b097002
DH
1004
1005 /*
1006 * We only support onlining/offlining/adding/removing of complete
1007 * memory blocks; therefore, either all is either early or hotplugged.
1008 */
1009 if (early_section(__pfn_to_section(page_to_pfn(page))))
1010 zone->present_early_pages += nr_pages;
f9901144 1011 zone->present_pages += nr_pages;
f9901144 1012 zone->zone_pgdat->node_present_pages += nr_pages;
836809ec
DH
1013
1014 if (group && movable)
1015 group->present_movable_pages += nr_pages;
1016 else if (group && !movable)
1017 group->present_kernel_pages += nr_pages;
f9901144
DH
1018}
1019
a08a2ae3
OS
1020int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
1021 struct zone *zone)
1022{
1023 unsigned long end_pfn = pfn + nr_pages;
66361095 1024 int ret, i;
a08a2ae3
OS
1025
1026 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1027 if (ret)
1028 return ret;
1029
1030 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
1031
66361095
MS
1032 for (i = 0; i < nr_pages; i++)
1033 SetPageVmemmapSelfHosted(pfn_to_page(pfn + i));
1034
a08a2ae3
OS
1035 /*
1036 * It might be that the vmemmap_pages fully span sections. If that is
1037 * the case, mark those sections online here as otherwise they will be
1038 * left offline.
1039 */
1040 if (nr_pages >= PAGES_PER_SECTION)
1041 online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1042
1043 return ret;
1044}
1045
1046void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
1047{
1048 unsigned long end_pfn = pfn + nr_pages;
1049
1050 /*
1051 * It might be that the vmemmap_pages fully span sections. If that is
1052 * the case, mark those sections offline here as otherwise they will be
1053 * left online.
1054 */
1055 if (nr_pages >= PAGES_PER_SECTION)
1056 offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1057
1058 /*
1059 * The pages associated with this vmemmap have been offlined, so
1060 * we can reset its state here.
1061 */
1062 remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
1063 kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1064}
1065
836809ec
DH
1066int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
1067 struct zone *zone, struct memory_group *group)
75884fb1 1068{
aa47228a 1069 unsigned long flags;
6811378e 1070 int need_zonelists_rebuild = 0;
a08a2ae3 1071 const int nid = zone_to_nid(zone);
7b78d335
YG
1072 int ret;
1073 struct memory_notify arg;
d0dc12e8 1074
dd8e2f23
OS
1075 /*
1076 * {on,off}lining is constrained to full memory sections (or more
041711ce 1077 * precisely to memory blocks from the user space POV).
dd8e2f23
OS
1078 * memmap_on_memory is an exception because it reserves initial part
1079 * of the physical memory space for vmemmaps. That space is pageblock
1080 * aligned.
1081 */
ee0913c4 1082 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
dd8e2f23 1083 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
4986fac1
DH
1084 return -EINVAL;
1085
381eab4a
DH
1086 mem_hotplug_begin();
1087
f1dd2cd1 1088 /* associate pfn range with the zone */
b30c5927 1089 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
f1dd2cd1 1090
7b78d335
YG
1091 arg.start_pfn = pfn;
1092 arg.nr_pages = nr_pages;
d9713679 1093 node_states_check_changes_online(nr_pages, zone, &arg);
7b78d335 1094
7b78d335
YG
1095 ret = memory_notify(MEM_GOING_ONLINE, &arg);
1096 ret = notifier_to_errno(ret);
e33e33b4
CY
1097 if (ret)
1098 goto failed_addition;
1099
b30c5927
DH
1100 /*
1101 * Fixup the number of isolated pageblocks before marking the sections
1102 * onlining, such that undo_isolate_page_range() works correctly.
1103 */
1104 spin_lock_irqsave(&zone->lock, flags);
1105 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
1106 spin_unlock_irqrestore(&zone->lock, flags);
1107
6811378e
YG
1108 /*
1109 * If this zone is not populated, then it is not in zonelist.
1110 * This means the page allocator ignores this zone.
1111 * So, zonelist must be updated after online.
1112 */
6dcd73d7 1113 if (!populated_zone(zone)) {
6811378e 1114 need_zonelists_rebuild = 1;
72675e13 1115 setup_zone_pageset(zone);
6dcd73d7 1116 }
6811378e 1117
aac65321 1118 online_pages_range(pfn, nr_pages);
836809ec 1119 adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
aa47228a 1120
b30c5927
DH
1121 node_states_set_node(nid, &arg);
1122 if (need_zonelists_rebuild)
1123 build_all_zonelists(NULL);
b30c5927
DH
1124
1125 /* Basic onlining is complete, allow allocation of onlined pages. */
1126 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
1127
93146d98 1128 /*
b86c5fc4
DH
1129 * Freshly onlined pages aren't shuffled (e.g., all pages are placed to
1130 * the tail of the freelist when undoing isolation). Shuffle the whole
1131 * zone to make sure the just onlined pages are properly distributed
1132 * across the whole freelist - to create an initial shuffle.
93146d98 1133 */
e900a918
DW
1134 shuffle_zone(zone);
1135
b92ca18e 1136 /* reinitialise watermarks and update pcp limits */
1b79acc9
KM
1137 init_per_zone_wmark_min();
1138
ca9a46f8
DH
1139 kswapd_run(nid);
1140 kcompactd_run(nid);
61b13993 1141
2d1d43f6 1142 writeback_set_ratelimit();
7b78d335 1143
ca9a46f8 1144 memory_notify(MEM_ONLINE, &arg);
381eab4a 1145 mem_hotplug_done();
30467e0b 1146 return 0;
e33e33b4
CY
1147
1148failed_addition:
1149 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n",
1150 (unsigned long long) pfn << PAGE_SHIFT,
1151 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1152 memory_notify(MEM_CANCEL_ONLINE, &arg);
feee6b29 1153 remove_pfn_range_from_zone(zone, pfn, nr_pages);
381eab4a 1154 mem_hotplug_done();
e33e33b4 1155 return ret;
3947be19 1156}
bc02af93 1157
e1319331 1158/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
09f49dca 1159static pg_data_t __ref *hotadd_init_pgdat(int nid)
9af3c2de
YG
1160{
1161 struct pglist_data *pgdat;
9af3c2de 1162
09f49dca
MH
1163 /*
1164 * NODE_DATA is preallocated (free_area_init) but its internal
1165 * state is not allocated completely. Add missing pieces.
1166 * Completely offline nodes stay around and they just need
1167 * reintialization.
1168 */
70b5b46a 1169 pgdat = NODE_DATA(nid);
03e85f9d 1170
9af3c2de 1171 /* init node's zones as empty zones, we don't have any present pages.*/
70b5b46a 1172 free_area_init_core_hotplug(pgdat);
9af3c2de 1173
959ecc48
KH
1174 /*
1175 * The node we allocated has no zone fallback lists. For avoiding
1176 * to access not-initialized zonelist, build here.
1177 */
72675e13 1178 build_all_zonelists(pgdat);
959ecc48 1179
9af3c2de
YG
1180 return pgdat;
1181}
1182
ba2d2666
MG
1183/*
1184 * __try_online_node - online a node if offlined
e8b098fc 1185 * @nid: the node ID
b9ff0360 1186 * @set_node_online: Whether we want to online the node
cf23422b 1187 * called by cpu_up() to online a node without onlined memory.
b9ff0360
OS
1188 *
1189 * Returns:
1190 * 1 -> a new node has been allocated
1191 * 0 -> the node is already online
1192 * -ENOMEM -> the node could not be allocated
cf23422b 1193 */
c68ab18c 1194static int __try_online_node(int nid, bool set_node_online)
cf23422b 1195{
b9ff0360
OS
1196 pg_data_t *pgdat;
1197 int ret = 1;
cf23422b 1198
01b0f197
TK
1199 if (node_online(nid))
1200 return 0;
1201
09f49dca 1202 pgdat = hotadd_init_pgdat(nid);
7553e8f2 1203 if (!pgdat) {
01b0f197 1204 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
cf23422b 1205 ret = -ENOMEM;
1206 goto out;
1207 }
b9ff0360
OS
1208
1209 if (set_node_online) {
1210 node_set_online(nid);
1211 ret = register_one_node(nid);
1212 BUG_ON(ret);
1213 }
cf23422b 1214out:
b9ff0360
OS
1215 return ret;
1216}
1217
1218/*
1219 * Users of this function always want to online/register the node
1220 */
1221int try_online_node(int nid)
1222{
1223 int ret;
1224
1225 mem_hotplug_begin();
c68ab18c 1226 ret = __try_online_node(nid, true);
bfc8c901 1227 mem_hotplug_done();
cf23422b 1228 return ret;
1229}
1230
27356f54
TK
1231static int check_hotplug_memory_range(u64 start, u64 size)
1232{
ba325585 1233 /* memory range must be block size aligned */
cec3ebd0
DH
1234 if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) ||
1235 !IS_ALIGNED(size, memory_block_size_bytes())) {
ba325585 1236 pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx",
cec3ebd0 1237 memory_block_size_bytes(), start, size);
27356f54
TK
1238 return -EINVAL;
1239 }
1240
1241 return 0;
1242}
1243
31bc3858
VK
1244static int online_memory_block(struct memory_block *mem, void *arg)
1245{
1adf8b46 1246 mem->online_type = mhp_default_online_type;
dc18d706 1247 return device_online(&mem->dev);
31bc3858
VK
1248}
1249
a08a2ae3
OS
1250bool mhp_supports_memmap_on_memory(unsigned long size)
1251{
1252 unsigned long nr_vmemmap_pages = size / PAGE_SIZE;
1253 unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
1254 unsigned long remaining_size = size - vmemmap_size;
1255
1256 /*
1257 * Besides having arch support and the feature enabled at runtime, we
1258 * need a few more assumptions to hold true:
1259 *
1260 * a) We span a single memory block: memory onlining/offlinin;g happens
1261 * in memory block granularity. We don't want the vmemmap of online
1262 * memory blocks to reside on offline memory blocks. In the future,
1263 * we might want to support variable-sized memory blocks to make the
1264 * feature more versatile.
1265 *
1266 * b) The vmemmap pages span complete PMDs: We don't want vmemmap code
1267 * to populate memory from the altmap for unrelated parts (i.e.,
1268 * other memory blocks)
1269 *
1270 * c) The vmemmap pages (and thereby the pages that will be exposed to
1271 * the buddy) have to cover full pageblocks: memory onlining/offlining
1272 * code requires applicable ranges to be page-aligned, for example, to
1273 * set the migratetypes properly.
1274 *
1275 * TODO: Although we have a check here to make sure that vmemmap pages
1276 * fully populate a PMD, it is not the right place to check for
1277 * this. A much better solution involves improving vmemmap code
1278 * to fallback to base pages when trying to populate vmemmap using
1279 * altmap as an alternative source of memory, and we do not exactly
1280 * populate a single PMD.
1281 */
6e02c46b 1282 return mhp_memmap_on_memory() &&
a08a2ae3
OS
1283 size == memory_block_size_bytes() &&
1284 IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
1285 IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
1286}
1287
8df1d0e4
DH
1288/*
1289 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1290 * and online/offline operations (triggered e.g. by sysfs).
1291 *
1292 * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG
1293 */
b6117199 1294int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
bc02af93 1295{
d15dfd31 1296 struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
32befe9e 1297 enum memblock_flags memblock_flags = MEMBLOCK_NONE;
a08a2ae3 1298 struct vmem_altmap mhp_altmap = {};
028fc57a 1299 struct memory_group *group = NULL;
62cedb9f 1300 u64 start, size;
b9ff0360 1301 bool new_node = false;
bc02af93
YG
1302 int ret;
1303
62cedb9f
DV
1304 start = res->start;
1305 size = resource_size(res);
1306
27356f54
TK
1307 ret = check_hotplug_memory_range(start, size);
1308 if (ret)
1309 return ret;
1310
028fc57a
DH
1311 if (mhp_flags & MHP_NID_IS_MGID) {
1312 group = memory_group_find_by_id(nid);
1313 if (!group)
1314 return -EINVAL;
1315 nid = group->nid;
1316 }
1317
fa6d9ec7
VV
1318 if (!node_possible(nid)) {
1319 WARN(1, "node %d was absent from the node_possible_map\n", nid);
1320 return -EINVAL;
1321 }
1322
bfc8c901 1323 mem_hotplug_begin();
ac13c462 1324
53d38316 1325 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
32befe9e
DH
1326 if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED)
1327 memblock_flags = MEMBLOCK_DRIVER_MANAGED;
1328 ret = memblock_add_node(start, size, nid, memblock_flags);
53d38316
DH
1329 if (ret)
1330 goto error_mem_hotplug_end;
1331 }
7f36e3e5 1332
c68ab18c 1333 ret = __try_online_node(nid, false);
b9ff0360
OS
1334 if (ret < 0)
1335 goto error;
1336 new_node = ret;
9af3c2de 1337
a08a2ae3
OS
1338 /*
1339 * Self hosted memmap array
1340 */
1341 if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
1342 if (!mhp_supports_memmap_on_memory(size)) {
1343 ret = -EINVAL;
1344 goto error;
1345 }
1346 mhp_altmap.free = PHYS_PFN(size);
1347 mhp_altmap.base_pfn = PHYS_PFN(start);
1348 params.altmap = &mhp_altmap;
1349 }
1350
bc02af93 1351 /* call arch's memory hotadd */
f5637d3b 1352 ret = arch_add_memory(nid, start, size, &params);
9af3c2de
YG
1353 if (ret < 0)
1354 goto error;
1355
db051a0d 1356 /* create memory block devices after memory was added */
028fc57a
DH
1357 ret = create_memory_block_devices(start, size, mhp_altmap.alloc,
1358 group);
db051a0d 1359 if (ret) {
65a2aa5f 1360 arch_remove_memory(start, size, NULL);
db051a0d
DH
1361 goto error;
1362 }
1363
a1e565aa 1364 if (new_node) {
d5b6f6a3 1365 /* If sysfs file of new node can't be created, cpu on the node
0fc44159
YG
1366 * can't be hot-added. There is no rollback way now.
1367 * So, check by BUG_ON() to catch it reluctantly..
d5b6f6a3 1368 * We online node here. We can't roll back from here.
0fc44159 1369 */
d5b6f6a3
OS
1370 node_set_online(nid);
1371 ret = __register_one_node(nid);
0fc44159
YG
1372 BUG_ON(ret);
1373 }
1374
cc651559
DH
1375 register_memory_blocks_under_node(nid, PFN_DOWN(start),
1376 PFN_UP(start + size - 1),
1377 MEMINIT_HOTPLUG);
d5b6f6a3 1378
d96ae530 1379 /* create new memmap entry */
7b7b2721
DH
1380 if (!strcmp(res->name, "System RAM"))
1381 firmware_map_add_hotplug(start, start + size, "System RAM");
d96ae530 1382
381eab4a
DH
1383 /* device_online() will take the lock when calling online_pages() */
1384 mem_hotplug_done();
1385
9ca6551e
DH
1386 /*
1387 * In case we're allowed to merge the resource, flag it and trigger
1388 * merging now that adding succeeded.
1389 */
26011267 1390 if (mhp_flags & MHP_MERGE_RESOURCE)
9ca6551e
DH
1391 merge_system_ram_resource(res);
1392
31bc3858 1393 /* online pages if requested */
1adf8b46 1394 if (mhp_default_online_type != MMOP_OFFLINE)
fbcf73ce 1395 walk_memory_blocks(start, size, NULL, online_memory_block);
31bc3858 1396
381eab4a 1397 return ret;
9af3c2de 1398error:
52219aea
DH
1399 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
1400 memblock_remove(start, size);
53d38316 1401error_mem_hotplug_end:
bfc8c901 1402 mem_hotplug_done();
bc02af93
YG
1403 return ret;
1404}
62cedb9f 1405
8df1d0e4 1406/* requires device_hotplug_lock, see add_memory_resource() */
b6117199 1407int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
62cedb9f
DV
1408{
1409 struct resource *res;
1410 int ret;
1411
7b7b2721 1412 res = register_memory_resource(start, size, "System RAM");
6f754ba4
VK
1413 if (IS_ERR(res))
1414 return PTR_ERR(res);
62cedb9f 1415
b6117199 1416 ret = add_memory_resource(nid, res, mhp_flags);
62cedb9f
DV
1417 if (ret < 0)
1418 release_memory_resource(res);
1419 return ret;
1420}
8df1d0e4 1421
b6117199 1422int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags)
8df1d0e4
DH
1423{
1424 int rc;
1425
1426 lock_device_hotplug();
b6117199 1427 rc = __add_memory(nid, start, size, mhp_flags);
8df1d0e4
DH
1428 unlock_device_hotplug();
1429
1430 return rc;
1431}
bc02af93 1432EXPORT_SYMBOL_GPL(add_memory);
0c0e6195 1433
7b7b2721
DH
1434/*
1435 * Add special, driver-managed memory to the system as system RAM. Such
1436 * memory is not exposed via the raw firmware-provided memmap as system
1437 * RAM, instead, it is detected and added by a driver - during cold boot,
1438 * after a reboot, and after kexec.
1439 *
1440 * Reasons why this memory should not be used for the initial memmap of a
1441 * kexec kernel or for placing kexec images:
1442 * - The booting kernel is in charge of determining how this memory will be
1443 * used (e.g., use persistent memory as system RAM)
1444 * - Coordination with a hypervisor is required before this memory
1445 * can be used (e.g., inaccessible parts).
1446 *
1447 * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided
1448 * memory map") are created. Also, the created memory resource is flagged
7cf603d1 1449 * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case
7b7b2721
DH
1450 * this memory as well (esp., not place kexec images onto it).
1451 *
1452 * The resource_name (visible via /proc/iomem) has to have the format
1453 * "System RAM ($DRIVER)".
1454 */
1455int add_memory_driver_managed(int nid, u64 start, u64 size,
b6117199 1456 const char *resource_name, mhp_t mhp_flags)
7b7b2721
DH
1457{
1458 struct resource *res;
1459 int rc;
1460
1461 if (!resource_name ||
1462 strstr(resource_name, "System RAM (") != resource_name ||
1463 resource_name[strlen(resource_name) - 1] != ')')
1464 return -EINVAL;
1465
1466 lock_device_hotplug();
1467
1468 res = register_memory_resource(start, size, resource_name);
1469 if (IS_ERR(res)) {
1470 rc = PTR_ERR(res);
1471 goto out_unlock;
1472 }
1473
b6117199 1474 rc = add_memory_resource(nid, res, mhp_flags);
7b7b2721
DH
1475 if (rc < 0)
1476 release_memory_resource(res);
1477
1478out_unlock:
1479 unlock_device_hotplug();
1480 return rc;
1481}
1482EXPORT_SYMBOL_GPL(add_memory_driver_managed);
1483
bca3feaa
AK
1484/*
1485 * Platforms should define arch_get_mappable_range() that provides
1486 * maximum possible addressable physical memory range for which the
1487 * linear mapping could be created. The platform returned address
1488 * range must adhere to these following semantics.
1489 *
1490 * - range.start <= range.end
1491 * - Range includes both end points [range.start..range.end]
1492 *
1493 * There is also a fallback definition provided here, allowing the
1494 * entire possible physical address range in case any platform does
1495 * not define arch_get_mappable_range().
1496 */
1497struct range __weak arch_get_mappable_range(void)
1498{
1499 struct range mhp_range = {
1500 .start = 0UL,
1501 .end = -1ULL,
1502 };
1503 return mhp_range;
1504}
1505
1506struct range mhp_get_pluggable_range(bool need_mapping)
1507{
1508 const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1;
1509 struct range mhp_range;
1510
1511 if (need_mapping) {
1512 mhp_range = arch_get_mappable_range();
1513 if (mhp_range.start > max_phys) {
1514 mhp_range.start = 0;
1515 mhp_range.end = 0;
1516 }
1517 mhp_range.end = min_t(u64, mhp_range.end, max_phys);
1518 } else {
1519 mhp_range.start = 0;
1520 mhp_range.end = max_phys;
1521 }
1522 return mhp_range;
1523}
1524EXPORT_SYMBOL_GPL(mhp_get_pluggable_range);
1525
1526bool mhp_range_allowed(u64 start, u64 size, bool need_mapping)
1527{
1528 struct range mhp_range = mhp_get_pluggable_range(need_mapping);
1529 u64 end = start + size;
1530
1531 if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end)
1532 return true;
1533
1534 pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n",
1535 start, end, mhp_range.start, mhp_range.end);
1536 return false;
1537}
1538
0c0e6195 1539#ifdef CONFIG_MEMORY_HOTREMOVE
0c0e6195 1540/*
0efadf48 1541 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
aa218795
DH
1542 * non-lru movable pages and hugepages). Will skip over most unmovable
1543 * pages (esp., pages that can be skipped when offlining), but bail out on
1544 * definitely unmovable pages.
1545 *
1546 * Returns:
1547 * 0 in case a movable page is found and movable_pfn was updated.
1548 * -ENOENT in case no movable page was found.
1549 * -EBUSY in case a definitely unmovable page was found.
0c0e6195 1550 */
aa218795
DH
1551static int scan_movable_pages(unsigned long start, unsigned long end,
1552 unsigned long *movable_pfn)
0c0e6195
KH
1553{
1554 unsigned long pfn;
eeb0efd0 1555
0c0e6195 1556 for (pfn = start; pfn < end; pfn++) {
eeb0efd0
OS
1557 struct page *page, *head;
1558 unsigned long skip;
1559
1560 if (!pfn_valid(pfn))
1561 continue;
1562 page = pfn_to_page(pfn);
1563 if (PageLRU(page))
aa218795 1564 goto found;
eeb0efd0 1565 if (__PageMovable(page))
aa218795
DH
1566 goto found;
1567
1568 /*
1569 * PageOffline() pages that are not marked __PageMovable() and
1570 * have a reference count > 0 (after MEM_GOING_OFFLINE) are
1571 * definitely unmovable. If their reference count would be 0,
1572 * they could at least be skipped when offlining memory.
1573 */
1574 if (PageOffline(page) && page_count(page))
1575 return -EBUSY;
eeb0efd0
OS
1576
1577 if (!PageHuge(page))
1578 continue;
1579 head = compound_head(page);
8f251a3d
MK
1580 /*
1581 * This test is racy as we hold no reference or lock. The
1582 * hugetlb page could have been free'ed and head is no longer
1583 * a hugetlb page before the following check. In such unlikely
1584 * cases false positives and negatives are possible. Calling
1585 * code must deal with these scenarios.
1586 */
1587 if (HPageMigratable(head))
aa218795 1588 goto found;
d8c6546b 1589 skip = compound_nr(head) - (page - head);
eeb0efd0 1590 pfn += skip - 1;
0c0e6195 1591 }
aa218795
DH
1592 return -ENOENT;
1593found:
1594 *movable_pfn = pfn;
0c0e6195
KH
1595 return 0;
1596}
1597
32cf666e 1598static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
0c0e6195
KH
1599{
1600 unsigned long pfn;
6c357848 1601 struct page *page, *head;
0c0e6195 1602 LIST_HEAD(source);
786dee86
LM
1603 static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
1604 DEFAULT_RATELIMIT_BURST);
0c0e6195 1605
a85009c3 1606 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
869f7ee6 1607 struct folio *folio;
f7f9c00d 1608 bool isolated;
869f7ee6 1609
0c0e6195
KH
1610 if (!pfn_valid(pfn))
1611 continue;
1612 page = pfn_to_page(pfn);
869f7ee6
MWO
1613 folio = page_folio(page);
1614 head = &folio->page;
c8721bbb
NH
1615
1616 if (PageHuge(page)) {
d8c6546b 1617 pfn = page_to_pfn(head) + compound_nr(head) - 1;
6aa3a920 1618 isolate_hugetlb(folio, &source);
c8721bbb 1619 continue;
94723aaf 1620 } else if (PageTransHuge(page))
6c357848 1621 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
c8721bbb 1622
b15c8726
MH
1623 /*
1624 * HWPoison pages have elevated reference counts so the migration would
1625 * fail on them. It also doesn't make any sense to migrate them in the
1626 * first place. Still try to unmap such a page in case it is still mapped
1627 * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
1628 * the unmap as the catch all safety net).
1629 */
1630 if (PageHWPoison(page)) {
869f7ee6
MWO
1631 if (WARN_ON(folio_test_lru(folio)))
1632 folio_isolate_lru(folio);
1633 if (folio_mapped(folio))
1634 try_to_unmap(folio, TTU_IGNORE_MLOCK);
b15c8726
MH
1635 continue;
1636 }
1637
700c2a46 1638 if (!get_page_unless_zero(page))
0c0e6195
KH
1639 continue;
1640 /*
0efadf48
YX
1641 * We can skip free pages. And we can deal with pages on
1642 * LRU and non-lru movable pages.
0c0e6195 1643 */
cd775580 1644 if (PageLRU(page))
f7f9c00d 1645 isolated = isolate_lru_page(page);
cd775580
BW
1646 else
1647 isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1648 if (isolated) {
62695a84 1649 list_add_tail(&page->lru, &source);
0efadf48
YX
1650 if (!__PageMovable(page))
1651 inc_node_page_state(page, NR_ISOLATED_ANON +
9de4f22a 1652 page_is_file_lru(page));
6d9c285a 1653
0c0e6195 1654 } else {
786dee86
LM
1655 if (__ratelimit(&migrate_rs)) {
1656 pr_warn("failed to isolate pfn %lx\n", pfn);
1657 dump_page(page, "isolation failed");
1658 }
0c0e6195 1659 }
1723058e 1660 put_page(page);
0c0e6195 1661 }
f3ab2636 1662 if (!list_empty(&source)) {
203e6e5c
JK
1663 nodemask_t nmask = node_states[N_MEMORY];
1664 struct migration_target_control mtc = {
1665 .nmask = &nmask,
1666 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
1667 };
32cf666e 1668 int ret;
203e6e5c
JK
1669
1670 /*
1671 * We have checked that migration range is on a single zone so
1672 * we can use the nid of the first page to all the others.
1673 */
1674 mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
1675
1676 /*
1677 * try to allocate from a different node but reuse this node
1678 * if there are no other online nodes to be used (e.g. we are
1679 * offlining a part of the only existing node)
1680 */
1681 node_clear(mtc.nid, nmask);
1682 if (nodes_empty(nmask))
1683 node_set(mtc.nid, nmask);
1684 ret = migrate_pages(&source, alloc_migration_target, NULL,
5ac95884 1685 (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
2932c8b0
MH
1686 if (ret) {
1687 list_for_each_entry(page, &source, lru) {
786dee86
LM
1688 if (__ratelimit(&migrate_rs)) {
1689 pr_warn("migrating pfn %lx failed ret:%d\n",
1690 page_to_pfn(page), ret);
1691 dump_page(page, "migration failure");
1692 }
2932c8b0 1693 }
c8721bbb 1694 putback_movable_pages(&source);
2932c8b0 1695 }
0c0e6195 1696 }
0c0e6195
KH
1697}
1698
c5320926
TC
1699static int __init cmdline_parse_movable_node(char *p)
1700{
55ac590c 1701 movable_node_enabled = true;
c5320926
TC
1702 return 0;
1703}
1704early_param("movable_node", cmdline_parse_movable_node);
1705
d9713679
LJ
1706/* check which state of node_states will be changed when offline memory */
1707static void node_states_check_changes_offline(unsigned long nr_pages,
1708 struct zone *zone, struct memory_notify *arg)
1709{
1710 struct pglist_data *pgdat = zone->zone_pgdat;
1711 unsigned long present_pages = 0;
86b27bea 1712 enum zone_type zt;
d9713679 1713
98fa15f3
AK
1714 arg->status_change_nid = NUMA_NO_NODE;
1715 arg->status_change_nid_normal = NUMA_NO_NODE;
d9713679
LJ
1716
1717 /*
86b27bea
OS
1718 * Check whether node_states[N_NORMAL_MEMORY] will be changed.
1719 * If the memory to be offline is within the range
1720 * [0..ZONE_NORMAL], and it is the last present memory there,
1721 * the zones in that range will become empty after the offlining,
1722 * thus we can determine that we need to clear the node from
1723 * node_states[N_NORMAL_MEMORY].
d9713679 1724 */
86b27bea 1725 for (zt = 0; zt <= ZONE_NORMAL; zt++)
d9713679 1726 present_pages += pgdat->node_zones[zt].present_pages;
86b27bea 1727 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages)
d9713679 1728 arg->status_change_nid_normal = zone_to_nid(zone);
d9713679
LJ
1729
1730 /*
6b740c6c
DH
1731 * We have accounted the pages from [0..ZONE_NORMAL); ZONE_HIGHMEM
1732 * does not apply as we don't support 32bit.
86b27bea
OS
1733 * Here we count the possible pages from ZONE_MOVABLE.
1734 * If after having accounted all the pages, we see that the nr_pages
1735 * to be offlined is over or equal to the accounted pages,
1736 * we know that the node will become empty, and so, we can clear
1737 * it for N_MEMORY as well.
d9713679 1738 */
86b27bea 1739 present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages;
d9713679 1740
d9713679
LJ
1741 if (nr_pages >= present_pages)
1742 arg->status_change_nid = zone_to_nid(zone);
d9713679
LJ
1743}
1744
1745static void node_states_clear_node(int node, struct memory_notify *arg)
1746{
1747 if (arg->status_change_nid_normal >= 0)
1748 node_clear_state(node, N_NORMAL_MEMORY);
1749
cf01f6f5 1750 if (arg->status_change_nid >= 0)
6715ddf9 1751 node_clear_state(node, N_MEMORY);
d9713679
LJ
1752}
1753
c5e79ef5
DH
1754static int count_system_ram_pages_cb(unsigned long start_pfn,
1755 unsigned long nr_pages, void *data)
1756{
1757 unsigned long *nr_system_ram_pages = data;
1758
1759 *nr_system_ram_pages += nr_pages;
1760 return 0;
1761}
1762
836809ec 1763int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
395f6081 1764 struct zone *zone, struct memory_group *group)
0c0e6195 1765{
73a11c96 1766 const unsigned long end_pfn = start_pfn + nr_pages;
0a1a9a00 1767 unsigned long pfn, system_ram_pages = 0;
395f6081 1768 const int node = zone_to_nid(zone);
d702909f 1769 unsigned long flags;
7b78d335 1770 struct memory_notify arg;
79605093 1771 char *reason;
395f6081 1772 int ret;
0c0e6195 1773
dd8e2f23
OS
1774 /*
1775 * {on,off}lining is constrained to full memory sections (or more
041711ce 1776 * precisely to memory blocks from the user space POV).
dd8e2f23
OS
1777 * memmap_on_memory is an exception because it reserves initial part
1778 * of the physical memory space for vmemmaps. That space is pageblock
1779 * aligned.
1780 */
ee0913c4 1781 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) ||
dd8e2f23 1782 !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
4986fac1
DH
1783 return -EINVAL;
1784
381eab4a
DH
1785 mem_hotplug_begin();
1786
c5e79ef5
DH
1787 /*
1788 * Don't allow to offline memory blocks that contain holes.
1789 * Consequently, memory blocks with holes can never get onlined
1790 * via the hotplug path - online_pages() - as hotplugged memory has
1791 * no holes. This way, we e.g., don't have to worry about marking
1792 * memory holes PG_reserved, don't need pfn_valid() checks, and can
1793 * avoid using walk_system_ram_range() later.
1794 */
73a11c96 1795 walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages,
c5e79ef5 1796 count_system_ram_pages_cb);
73a11c96 1797 if (system_ram_pages != nr_pages) {
c5e79ef5
DH
1798 ret = -EINVAL;
1799 reason = "memory holes";
1800 goto failed_removal;
1801 }
1802
395f6081
DH
1803 /*
1804 * We only support offlining of memory blocks managed by a single zone,
1805 * checked by calling code. This is just a sanity check that we might
1806 * want to remove in the future.
1807 */
1808 if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone ||
1809 page_zone(pfn_to_page(end_pfn - 1)) != zone)) {
79605093
MH
1810 ret = -EINVAL;
1811 reason = "multizone range";
1812 goto failed_removal;
381eab4a 1813 }
7b78d335 1814
ec6e8c7e
VB
1815 /*
1816 * Disable pcplists so that page isolation cannot race with freeing
1817 * in a way that pages from isolated pageblock are left on pcplists.
1818 */
1819 zone_pcp_disable(zone);
d479960e 1820 lru_cache_disable();
ec6e8c7e 1821
0c0e6195 1822 /* set above range as isolated */
b023f468 1823 ret = start_isolate_page_range(start_pfn, end_pfn,
d381c547 1824 MIGRATE_MOVABLE,
b2c9e2fb
ZY
1825 MEMORY_OFFLINE | REPORT_FAILURE,
1826 GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL);
3fa0c7c7 1827 if (ret) {
79605093 1828 reason = "failure to isolate range";
ec6e8c7e 1829 goto failed_removal_pcplists_disabled;
381eab4a 1830 }
7b78d335
YG
1831
1832 arg.start_pfn = start_pfn;
1833 arg.nr_pages = nr_pages;
d9713679 1834 node_states_check_changes_offline(nr_pages, zone, &arg);
7b78d335
YG
1835
1836 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1837 ret = notifier_to_errno(ret);
79605093
MH
1838 if (ret) {
1839 reason = "notifier failure";
1840 goto failed_removal_isolated;
1841 }
7b78d335 1842
bb8965bd 1843 do {
aa218795
DH
1844 pfn = start_pfn;
1845 do {
bb8965bd
MH
1846 if (signal_pending(current)) {
1847 ret = -EINTR;
1848 reason = "signal backoff";
1849 goto failed_removal_isolated;
1850 }
72b39cfc 1851
bb8965bd 1852 cond_resched();
bb8965bd 1853
aa218795
DH
1854 ret = scan_movable_pages(pfn, end_pfn, &pfn);
1855 if (!ret) {
bb8965bd
MH
1856 /*
1857 * TODO: fatal migration failures should bail
1858 * out
1859 */
1860 do_migrate_range(pfn, end_pfn);
1861 }
aa218795
DH
1862 } while (!ret);
1863
1864 if (ret != -ENOENT) {
1865 reason = "unmovable page";
1866 goto failed_removal_isolated;
bb8965bd 1867 }
0c0e6195 1868
bb8965bd
MH
1869 /*
1870 * Dissolve free hugepages in the memory block before doing
1871 * offlining actually in order to make hugetlbfs's object
1872 * counting consistent.
1873 */
1874 ret = dissolve_free_huge_pages(start_pfn, end_pfn);
1875 if (ret) {
1876 reason = "failure to dissolve huge pages";
1877 goto failed_removal_isolated;
1878 }
0a1a9a00 1879
0a1a9a00 1880 ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE);
ec6e8c7e 1881
5557c766 1882 } while (ret);
72b39cfc 1883
0a1a9a00
DH
1884 /* Mark all sections offline and remove free pages from the buddy. */
1885 __offline_isolated_pages(start_pfn, end_pfn);
7c33023a 1886 pr_debug("Offlined Pages %ld\n", nr_pages);
0a1a9a00 1887
9b7ea46a 1888 /*
b30c5927
DH
1889 * The memory sections are marked offline, and the pageblock flags
1890 * effectively stale; nobody should be touching them. Fixup the number
1891 * of isolated pageblocks, memory onlining will properly revert this.
9b7ea46a
QC
1892 */
1893 spin_lock_irqsave(&zone->lock, flags);
ea15153c 1894 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
9b7ea46a
QC
1895 spin_unlock_irqrestore(&zone->lock, flags);
1896
d479960e 1897 lru_cache_enable();
ec6e8c7e
VB
1898 zone_pcp_enable(zone);
1899
0c0e6195 1900 /* removal success */
0a1a9a00 1901 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
836809ec 1902 adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages);
7b78d335 1903
b92ca18e 1904 /* reinitialise watermarks and update pcp limits */
1b79acc9
KM
1905 init_per_zone_wmark_min();
1906
1e8537ba 1907 if (!populated_zone(zone)) {
340175b7 1908 zone_pcp_reset(zone);
72675e13 1909 build_all_zonelists(NULL);
b92ca18e 1910 }
340175b7 1911
d9713679 1912 node_states_clear_node(node, &arg);
698b1b30 1913 if (arg.status_change_nid >= 0) {
698b1b30 1914 kcompactd_stop(node);
b4a0215e 1915 kswapd_stop(node);
698b1b30 1916 }
bce7394a 1917
0c0e6195 1918 writeback_set_ratelimit();
7b78d335
YG
1919
1920 memory_notify(MEM_OFFLINE, &arg);
feee6b29 1921 remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
381eab4a 1922 mem_hotplug_done();
0c0e6195
KH
1923 return 0;
1924
79605093 1925failed_removal_isolated:
36ba30bc 1926 /* pushback to free area */
79605093 1927 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
c4efe484 1928 memory_notify(MEM_CANCEL_OFFLINE, &arg);
ec6e8c7e 1929failed_removal_pcplists_disabled:
946746d1 1930 lru_cache_enable();
ec6e8c7e 1931 zone_pcp_enable(zone);
0c0e6195 1932failed_removal:
79605093 1933 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
e33e33b4 1934 (unsigned long long) start_pfn << PAGE_SHIFT,
79605093
MH
1935 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
1936 reason);
381eab4a 1937 mem_hotplug_done();
0c0e6195
KH
1938 return ret;
1939}
71088785 1940
d6de9d53 1941static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
bbc76be6 1942{
e1c158e4 1943 int *nid = arg;
bbc76be6 1944
e1c158e4 1945 *nid = mem->nid;
639118d1 1946 if (unlikely(mem->state != MEM_OFFLINE)) {
349daa0f
RD
1947 phys_addr_t beginpa, endpa;
1948
1949 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
b6c88d3b 1950 endpa = beginpa + memory_block_size_bytes() - 1;
756a025f 1951 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n",
349daa0f 1952 &beginpa, &endpa);
bbc76be6 1953
eca499ab
PT
1954 return -EBUSY;
1955 }
1956 return 0;
bbc76be6
WC
1957}
1958
a08a2ae3
OS
1959static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg)
1960{
1961 /*
1962 * If not set, continue with the next block.
1963 */
1964 return mem->nr_vmemmap_pages;
1965}
1966
b27340a5 1967static int check_cpu_on_node(int nid)
60a5a19e 1968{
60a5a19e
TC
1969 int cpu;
1970
1971 for_each_present_cpu(cpu) {
b27340a5 1972 if (cpu_to_node(cpu) == nid)
60a5a19e
TC
1973 /*
1974 * the cpu on this node isn't removed, and we can't
1975 * offline this node.
1976 */
1977 return -EBUSY;
1978 }
1979
1980 return 0;
1981}
1982
2c91f8fc
DH
1983static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg)
1984{
1985 int nid = *(int *)arg;
1986
1987 /*
1988 * If a memory block belongs to multiple nodes, the stored nid is not
1989 * reliable. However, such blocks are always online (e.g., cannot get
1990 * offlined) and, therefore, are still spanned by the node.
1991 */
1992 return mem->nid == nid ? -EEXIST : 0;
1993}
1994
0f1cfe9d
TK
1995/**
1996 * try_offline_node
e8b098fc 1997 * @nid: the node ID
0f1cfe9d
TK
1998 *
1999 * Offline a node if all memory sections and cpus of the node are removed.
2000 *
2001 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2002 * and online/offline operations before this call.
2003 */
90b30cdc 2004void try_offline_node(int nid)
60a5a19e 2005{
2c91f8fc 2006 int rc;
60a5a19e 2007
2c91f8fc
DH
2008 /*
2009 * If the node still spans pages (especially ZONE_DEVICE), don't
2010 * offline it. A node spans memory after move_pfn_range_to_zone(),
2011 * e.g., after the memory block was onlined.
2012 */
b27340a5 2013 if (node_spanned_pages(nid))
2c91f8fc 2014 return;
60a5a19e 2015
2c91f8fc
DH
2016 /*
2017 * Especially offline memory blocks might not be spanned by the
2018 * node. They will get spanned by the node once they get onlined.
2019 * However, they link to the node in sysfs and can get onlined later.
2020 */
2021 rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb);
2022 if (rc)
60a5a19e 2023 return;
60a5a19e 2024
b27340a5 2025 if (check_cpu_on_node(nid))
60a5a19e
TC
2026 return;
2027
2028 /*
2029 * all memory/cpu of this node are removed, we can offline this
2030 * node now.
2031 */
2032 node_set_offline(nid);
2033 unregister_one_node(nid);
2034}
90b30cdc 2035EXPORT_SYMBOL(try_offline_node);
60a5a19e 2036
e1c158e4 2037static int __ref try_remove_memory(u64 start, u64 size)
bbc76be6 2038{
a08a2ae3
OS
2039 struct vmem_altmap mhp_altmap = {};
2040 struct vmem_altmap *altmap = NULL;
2041 unsigned long nr_vmemmap_pages;
e1c158e4 2042 int rc = 0, nid = NUMA_NO_NODE;
993c1aad 2043
27356f54
TK
2044 BUG_ON(check_hotplug_memory_range(start, size));
2045
6677e3ea 2046 /*
242831eb 2047 * All memory blocks must be offlined before removing memory. Check
eca499ab 2048 * whether all memory blocks in question are offline and return error
242831eb 2049 * if this is not the case.
e1c158e4
DH
2050 *
2051 * While at it, determine the nid. Note that if we'd have mixed nodes,
2052 * we'd only try to offline the last determined one -- which is good
2053 * enough for the cases we care about.
6677e3ea 2054 */
e1c158e4 2055 rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb);
eca499ab 2056 if (rc)
b4223a51 2057 return rc;
6677e3ea 2058
a08a2ae3
OS
2059 /*
2060 * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
2061 * the same granularity it was added - a single memory block.
2062 */
6e02c46b 2063 if (mhp_memmap_on_memory()) {
a08a2ae3
OS
2064 nr_vmemmap_pages = walk_memory_blocks(start, size, NULL,
2065 get_nr_vmemmap_pages_cb);
2066 if (nr_vmemmap_pages) {
2067 if (size != memory_block_size_bytes()) {
2068 pr_warn("Refuse to remove %#llx - %#llx,"
2069 "wrong granularity\n",
2070 start, start + size);
2071 return -EINVAL;
2072 }
2073
2074 /*
2075 * Let remove_pmd_table->free_hugepage_table do the
2076 * right thing if we used vmem_altmap when hot-adding
2077 * the range.
2078 */
2079 mhp_altmap.alloc = nr_vmemmap_pages;
2080 altmap = &mhp_altmap;
2081 }
2082 }
2083
46c66c4b
YI
2084 /* remove memmap entry */
2085 firmware_map_remove(start, start + size, "System RAM");
4c4b7f9b 2086
f1037ec0
DW
2087 /*
2088 * Memory block device removal under the device_hotplug_lock is
2089 * a barrier against racing online attempts.
2090 */
4c4b7f9b 2091 remove_memory_block_devices(start, size);
46c66c4b 2092
f1037ec0
DW
2093 mem_hotplug_begin();
2094
65a2aa5f 2095 arch_remove_memory(start, size, altmap);
52219aea
DH
2096
2097 if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {
3ecc6834 2098 memblock_phys_free(start, size);
52219aea
DH
2099 memblock_remove(start, size);
2100 }
2101
cb8e3c8b 2102 release_mem_region_adjustable(start, size);
24d335ca 2103
e1c158e4
DH
2104 if (nid != NUMA_NO_NODE)
2105 try_offline_node(nid);
60a5a19e 2106
bfc8c901 2107 mem_hotplug_done();
b4223a51 2108 return 0;
71088785 2109}
d15e5926 2110
eca499ab 2111/**
5640c9ca 2112 * __remove_memory - Remove memory if every memory block is offline
eca499ab
PT
2113 * @start: physical address of the region to remove
2114 * @size: size of the region to remove
2115 *
2116 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
2117 * and online/offline operations before this call, as required by
2118 * try_offline_node().
2119 */
e1c158e4 2120void __remove_memory(u64 start, u64 size)
eca499ab
PT
2121{
2122
2123 /*
29a90db9 2124 * trigger BUG() if some memory is not offlined prior to calling this
eca499ab
PT
2125 * function
2126 */
e1c158e4 2127 if (try_remove_memory(start, size))
eca499ab
PT
2128 BUG();
2129}
2130
2131/*
2132 * Remove memory if every memory block is offline, otherwise return -EBUSY is
2133 * some memory is not offline
2134 */
e1c158e4 2135int remove_memory(u64 start, u64 size)
d15e5926 2136{
eca499ab
PT
2137 int rc;
2138
d15e5926 2139 lock_device_hotplug();
e1c158e4 2140 rc = try_remove_memory(start, size);
d15e5926 2141 unlock_device_hotplug();
eca499ab
PT
2142
2143 return rc;
d15e5926 2144}
71088785 2145EXPORT_SYMBOL_GPL(remove_memory);
08b3acd7 2146
8dc4bb58
DH
2147static int try_offline_memory_block(struct memory_block *mem, void *arg)
2148{
2149 uint8_t online_type = MMOP_ONLINE_KERNEL;
2150 uint8_t **online_types = arg;
2151 struct page *page;
2152 int rc;
2153
2154 /*
2155 * Sense the online_type via the zone of the memory block. Offlining
2156 * with multiple zones within one memory block will be rejected
2157 * by offlining code ... so we don't care about that.
2158 */
2159 page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
2160 if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)
2161 online_type = MMOP_ONLINE_MOVABLE;
2162
2163 rc = device_offline(&mem->dev);
2164 /*
2165 * Default is MMOP_OFFLINE - change it only if offlining succeeded,
2166 * so try_reonline_memory_block() can do the right thing.
2167 */
2168 if (!rc)
2169 **online_types = online_type;
2170
2171 (*online_types)++;
2172 /* Ignore if already offline. */
2173 return rc < 0 ? rc : 0;
2174}
2175
2176static int try_reonline_memory_block(struct memory_block *mem, void *arg)
2177{
2178 uint8_t **online_types = arg;
2179 int rc;
2180
2181 if (**online_types != MMOP_OFFLINE) {
2182 mem->online_type = **online_types;
2183 rc = device_online(&mem->dev);
2184 if (rc < 0)
2185 pr_warn("%s: Failed to re-online memory: %d",
2186 __func__, rc);
2187 }
2188
2189 /* Continue processing all remaining memory blocks. */
2190 (*online_types)++;
2191 return 0;
2192}
2193
08b3acd7 2194/*
8dc4bb58
DH
2195 * Try to offline and remove memory. Might take a long time to finish in case
2196 * memory is still in use. Primarily useful for memory devices that logically
2197 * unplugged all memory (so it's no longer in use) and want to offline + remove
2198 * that memory.
08b3acd7 2199 */
e1c158e4 2200int offline_and_remove_memory(u64 start, u64 size)
08b3acd7 2201{
8dc4bb58
DH
2202 const unsigned long mb_count = size / memory_block_size_bytes();
2203 uint8_t *online_types, *tmp;
2204 int rc;
08b3acd7
DH
2205
2206 if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
8dc4bb58
DH
2207 !IS_ALIGNED(size, memory_block_size_bytes()) || !size)
2208 return -EINVAL;
2209
2210 /*
2211 * We'll remember the old online type of each memory block, so we can
2212 * try to revert whatever we did when offlining one memory block fails
2213 * after offlining some others succeeded.
2214 */
2215 online_types = kmalloc_array(mb_count, sizeof(*online_types),
2216 GFP_KERNEL);
2217 if (!online_types)
2218 return -ENOMEM;
2219 /*
2220 * Initialize all states to MMOP_OFFLINE, so when we abort processing in
2221 * try_offline_memory_block(), we'll skip all unprocessed blocks in
2222 * try_reonline_memory_block().
2223 */
2224 memset(online_types, MMOP_OFFLINE, mb_count);
08b3acd7
DH
2225
2226 lock_device_hotplug();
8dc4bb58
DH
2227
2228 tmp = online_types;
2229 rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
08b3acd7
DH
2230
2231 /*
8dc4bb58 2232 * In case we succeeded to offline all memory, remove it.
08b3acd7
DH
2233 * This cannot fail as it cannot get onlined in the meantime.
2234 */
2235 if (!rc) {
e1c158e4 2236 rc = try_remove_memory(start, size);
8dc4bb58
DH
2237 if (rc)
2238 pr_err("%s: Failed to remove memory: %d", __func__, rc);
2239 }
2240
2241 /*
2242 * Rollback what we did. While memory onlining might theoretically fail
2243 * (nacked by a notifier), it barely ever happens.
2244 */
2245 if (rc) {
2246 tmp = online_types;
2247 walk_memory_blocks(start, size, &tmp,
2248 try_reonline_memory_block);
08b3acd7
DH
2249 }
2250 unlock_device_hotplug();
2251
8dc4bb58 2252 kfree(online_types);
08b3acd7
DH
2253 return rc;
2254}
2255EXPORT_SYMBOL_GPL(offline_and_remove_memory);
aba6efc4 2256#endif /* CONFIG_MEMORY_HOTREMOVE */