Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3947be19 DH |
2 | /* |
3 | * linux/mm/memory_hotplug.c | |
4 | * | |
5 | * Copyright (C) | |
6 | */ | |
7 | ||
3947be19 DH |
8 | #include <linux/stddef.h> |
9 | #include <linux/mm.h> | |
174cd4b1 | 10 | #include <linux/sched/signal.h> |
3947be19 DH |
11 | #include <linux/swap.h> |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/pagemap.h> | |
3947be19 | 14 | #include <linux/compiler.h> |
b95f1b31 | 15 | #include <linux/export.h> |
3947be19 | 16 | #include <linux/pagevec.h> |
2d1d43f6 | 17 | #include <linux/writeback.h> |
3947be19 DH |
18 | #include <linux/slab.h> |
19 | #include <linux/sysctl.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/memory.h> | |
4b94ffdc | 22 | #include <linux/memremap.h> |
3947be19 DH |
23 | #include <linux/memory_hotplug.h> |
24 | #include <linux/highmem.h> | |
25 | #include <linux/vmalloc.h> | |
0a547039 | 26 | #include <linux/ioport.h> |
0c0e6195 KH |
27 | #include <linux/delay.h> |
28 | #include <linux/migrate.h> | |
29 | #include <linux/page-isolation.h> | |
71088785 | 30 | #include <linux/pfn.h> |
6ad696d2 | 31 | #include <linux/suspend.h> |
6d9c285a | 32 | #include <linux/mm_inline.h> |
d96ae530 | 33 | #include <linux/firmware-map.h> |
60a5a19e | 34 | #include <linux/stop_machine.h> |
c8721bbb | 35 | #include <linux/hugetlb.h> |
c5320926 | 36 | #include <linux/memblock.h> |
698b1b30 | 37 | #include <linux/compaction.h> |
b15c8726 | 38 | #include <linux/rmap.h> |
3947be19 DH |
39 | |
40 | #include <asm/tlbflush.h> | |
41 | ||
1e5ad9a3 | 42 | #include "internal.h" |
e900a918 | 43 | #include "shuffle.h" |
1e5ad9a3 | 44 | |
e3a9d9fc OS |
45 | |
46 | /* | |
47 | * memory_hotplug.memmap_on_memory parameter | |
48 | */ | |
49 | static bool memmap_on_memory __ro_after_init; | |
50 | #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY | |
51 | module_param(memmap_on_memory, bool, 0444); | |
52 | MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug"); | |
53 | #endif | |
a08a2ae3 | 54 | |
e83a437f DH |
55 | enum { |
56 | ONLINE_POLICY_CONTIG_ZONES = 0, | |
57 | ONLINE_POLICY_AUTO_MOVABLE, | |
58 | }; | |
59 | ||
60 | const char *online_policy_to_str[] = { | |
61 | [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones", | |
62 | [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable", | |
63 | }; | |
64 | ||
65 | static int set_online_policy(const char *val, const struct kernel_param *kp) | |
66 | { | |
67 | int ret = sysfs_match_string(online_policy_to_str, val); | |
68 | ||
69 | if (ret < 0) | |
70 | return ret; | |
71 | *((int *)kp->arg) = ret; | |
72 | return 0; | |
73 | } | |
74 | ||
75 | static int get_online_policy(char *buffer, const struct kernel_param *kp) | |
76 | { | |
77 | return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]); | |
78 | } | |
79 | ||
80 | /* | |
81 | * memory_hotplug.online_policy: configure online behavior when onlining without | |
82 | * specifying a zone (MMOP_ONLINE) | |
83 | * | |
84 | * "contig-zones": keep zone contiguous | |
85 | * "auto-movable": online memory to ZONE_MOVABLE if the configuration | |
86 | * (auto_movable_ratio, auto_movable_numa_aware) allows for it | |
87 | */ | |
88 | static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES; | |
89 | static const struct kernel_param_ops online_policy_ops = { | |
90 | .set = set_online_policy, | |
91 | .get = get_online_policy, | |
92 | }; | |
93 | module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644); | |
94 | MODULE_PARM_DESC(online_policy, | |
95 | "Set the online policy (\"contig-zones\", \"auto-movable\") " | |
96 | "Default: \"contig-zones\""); | |
97 | ||
98 | /* | |
99 | * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio | |
100 | * | |
101 | * The ratio represent an upper limit and the kernel might decide to not | |
102 | * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory | |
103 | * doesn't allow for more MOVABLE memory. | |
104 | */ | |
105 | static unsigned int auto_movable_ratio __read_mostly = 301; | |
106 | module_param(auto_movable_ratio, uint, 0644); | |
107 | MODULE_PARM_DESC(auto_movable_ratio, | |
108 | "Set the maximum ratio of MOVABLE:KERNEL memory in the system " | |
109 | "in percent for \"auto-movable\" online policy. Default: 301"); | |
110 | ||
111 | /* | |
112 | * memory_hotplug.auto_movable_numa_aware: consider numa node stats | |
113 | */ | |
114 | #ifdef CONFIG_NUMA | |
115 | static bool auto_movable_numa_aware __read_mostly = true; | |
116 | module_param(auto_movable_numa_aware, bool, 0644); | |
117 | MODULE_PARM_DESC(auto_movable_numa_aware, | |
118 | "Consider numa node stats in addition to global stats in " | |
119 | "\"auto-movable\" online policy. Default: true"); | |
120 | #endif /* CONFIG_NUMA */ | |
121 | ||
9d0ad8ca DK |
122 | /* |
123 | * online_page_callback contains pointer to current page onlining function. | |
124 | * Initially it is generic_online_page(). If it is required it could be | |
125 | * changed by calling set_online_page_callback() for callback registration | |
126 | * and restore_online_page_callback() for generic callback restore. | |
127 | */ | |
128 | ||
9d0ad8ca | 129 | static online_page_callback_t online_page_callback = generic_online_page; |
bfc8c901 | 130 | static DEFINE_MUTEX(online_page_callback_lock); |
9d0ad8ca | 131 | |
3f906ba2 | 132 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); |
bfc8c901 | 133 | |
3f906ba2 TG |
134 | void get_online_mems(void) |
135 | { | |
136 | percpu_down_read(&mem_hotplug_lock); | |
137 | } | |
bfc8c901 | 138 | |
3f906ba2 TG |
139 | void put_online_mems(void) |
140 | { | |
141 | percpu_up_read(&mem_hotplug_lock); | |
142 | } | |
bfc8c901 | 143 | |
4932381e MH |
144 | bool movable_node_enabled = false; |
145 | ||
8604d9e5 | 146 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE |
1adf8b46 | 147 | int mhp_default_online_type = MMOP_OFFLINE; |
8604d9e5 | 148 | #else |
1adf8b46 | 149 | int mhp_default_online_type = MMOP_ONLINE; |
8604d9e5 | 150 | #endif |
31bc3858 | 151 | |
86dd995d VK |
152 | static int __init setup_memhp_default_state(char *str) |
153 | { | |
1adf8b46 | 154 | const int online_type = mhp_online_type_from_str(str); |
5f47adf7 DH |
155 | |
156 | if (online_type >= 0) | |
1adf8b46 | 157 | mhp_default_online_type = online_type; |
86dd995d VK |
158 | |
159 | return 1; | |
160 | } | |
161 | __setup("memhp_default_state=", setup_memhp_default_state); | |
162 | ||
30467e0b | 163 | void mem_hotplug_begin(void) |
20d6c96b | 164 | { |
3f906ba2 TG |
165 | cpus_read_lock(); |
166 | percpu_down_write(&mem_hotplug_lock); | |
20d6c96b KM |
167 | } |
168 | ||
30467e0b | 169 | void mem_hotplug_done(void) |
bfc8c901 | 170 | { |
3f906ba2 TG |
171 | percpu_up_write(&mem_hotplug_lock); |
172 | cpus_read_unlock(); | |
bfc8c901 | 173 | } |
20d6c96b | 174 | |
357b4da5 JG |
175 | u64 max_mem_size = U64_MAX; |
176 | ||
45e0b78b | 177 | /* add this memory to iomem resource */ |
7b7b2721 DH |
178 | static struct resource *register_memory_resource(u64 start, u64 size, |
179 | const char *resource_name) | |
45e0b78b | 180 | { |
2794129e DH |
181 | struct resource *res; |
182 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
7b7b2721 DH |
183 | |
184 | if (strcmp(resource_name, "System RAM")) | |
7cf603d1 | 185 | flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; |
357b4da5 | 186 | |
bca3feaa AK |
187 | if (!mhp_range_allowed(start, size, true)) |
188 | return ERR_PTR(-E2BIG); | |
189 | ||
f3cd4c86 BH |
190 | /* |
191 | * Make sure value parsed from 'mem=' only restricts memory adding | |
192 | * while booting, so that memory hotplug won't be impacted. Please | |
193 | * refer to document of 'mem=' in kernel-parameters.txt for more | |
194 | * details. | |
195 | */ | |
196 | if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) | |
357b4da5 JG |
197 | return ERR_PTR(-E2BIG); |
198 | ||
2794129e DH |
199 | /* |
200 | * Request ownership of the new memory range. This might be | |
201 | * a child of an existing resource that was present but | |
202 | * not marked as busy. | |
203 | */ | |
204 | res = __request_region(&iomem_resource, start, size, | |
205 | resource_name, flags); | |
206 | ||
207 | if (!res) { | |
208 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", | |
209 | start, start + size); | |
6f754ba4 | 210 | return ERR_PTR(-EEXIST); |
45e0b78b KM |
211 | } |
212 | return res; | |
213 | } | |
214 | ||
215 | static void release_memory_resource(struct resource *res) | |
216 | { | |
217 | if (!res) | |
218 | return; | |
219 | release_resource(res); | |
220 | kfree(res); | |
45e0b78b KM |
221 | } |
222 | ||
53947027 | 223 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
7ea62160 DW |
224 | static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, |
225 | const char *reason) | |
226 | { | |
227 | /* | |
228 | * Disallow all operations smaller than a sub-section and only | |
229 | * allow operations smaller than a section for | |
230 | * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() | |
231 | * enforces a larger memory_block_size_bytes() granularity for | |
232 | * memory that will be marked online, so this check should only | |
233 | * fire for direct arch_{add,remove}_memory() users outside of | |
234 | * add_memory_resource(). | |
235 | */ | |
236 | unsigned long min_align; | |
237 | ||
238 | if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) | |
239 | min_align = PAGES_PER_SUBSECTION; | |
240 | else | |
241 | min_align = PAGES_PER_SECTION; | |
242 | if (!IS_ALIGNED(pfn, min_align) | |
243 | || !IS_ALIGNED(nr_pages, min_align)) { | |
244 | WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n", | |
245 | reason, pfn, pfn + nr_pages - 1); | |
246 | return -EINVAL; | |
247 | } | |
248 | return 0; | |
249 | } | |
250 | ||
9f605f26 DW |
251 | /* |
252 | * Return page for the valid pfn only if the page is online. All pfn | |
253 | * walkers which rely on the fully initialized page->flags and others | |
254 | * should use this rather than pfn_valid && pfn_to_page | |
255 | */ | |
256 | struct page *pfn_to_online_page(unsigned long pfn) | |
257 | { | |
258 | unsigned long nr = pfn_to_section_nr(pfn); | |
1f90a347 | 259 | struct dev_pagemap *pgmap; |
9f9b02e5 DW |
260 | struct mem_section *ms; |
261 | ||
262 | if (nr >= NR_MEM_SECTIONS) | |
263 | return NULL; | |
264 | ||
265 | ms = __nr_to_section(nr); | |
266 | if (!online_section(ms)) | |
267 | return NULL; | |
268 | ||
269 | /* | |
270 | * Save some code text when online_section() + | |
271 | * pfn_section_valid() are sufficient. | |
272 | */ | |
273 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) | |
274 | return NULL; | |
275 | ||
276 | if (!pfn_section_valid(ms, pfn)) | |
277 | return NULL; | |
9f605f26 | 278 | |
1f90a347 DW |
279 | if (!online_device_section(ms)) |
280 | return pfn_to_page(pfn); | |
281 | ||
282 | /* | |
283 | * Slowpath: when ZONE_DEVICE collides with | |
284 | * ZONE_{NORMAL,MOVABLE} within the same section some pfns in | |
285 | * the section may be 'offline' but 'valid'. Only | |
286 | * get_dev_pagemap() can determine sub-section online status. | |
287 | */ | |
288 | pgmap = get_dev_pagemap(pfn, NULL); | |
289 | put_dev_pagemap(pgmap); | |
290 | ||
291 | /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ | |
292 | if (pgmap) | |
293 | return NULL; | |
294 | ||
9f9b02e5 | 295 | return pfn_to_page(pfn); |
9f605f26 DW |
296 | } |
297 | EXPORT_SYMBOL_GPL(pfn_to_online_page); | |
298 | ||
4edd7cef DR |
299 | /* |
300 | * Reasonably generic function for adding memory. It is | |
301 | * expected that archs that support memory hotplug will | |
302 | * call this function after deciding the zone to which to | |
303 | * add the new pages. | |
304 | */ | |
7ea62160 | 305 | int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, |
f5637d3b | 306 | struct mhp_params *params) |
4edd7cef | 307 | { |
6cdd0b30 DH |
308 | const unsigned long end_pfn = pfn + nr_pages; |
309 | unsigned long cur_nr_pages; | |
9a845030 | 310 | int err; |
f5637d3b | 311 | struct vmem_altmap *altmap = params->altmap; |
4b94ffdc | 312 | |
bfeb022f LG |
313 | if (WARN_ON_ONCE(!params->pgprot.pgprot)) |
314 | return -EINVAL; | |
315 | ||
bca3feaa | 316 | VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); |
dca4436d | 317 | |
4b94ffdc DW |
318 | if (altmap) { |
319 | /* | |
320 | * Validate altmap is within bounds of the total request | |
321 | */ | |
7ea62160 | 322 | if (altmap->base_pfn != pfn |
4b94ffdc DW |
323 | || vmem_altmap_offset(altmap) > nr_pages) { |
324 | pr_warn_once("memory add fail, invalid altmap\n"); | |
7ea62160 | 325 | return -EINVAL; |
4b94ffdc DW |
326 | } |
327 | altmap->alloc = 0; | |
328 | } | |
329 | ||
7ea62160 DW |
330 | err = check_pfn_span(pfn, nr_pages, "add"); |
331 | if (err) | |
332 | return err; | |
333 | ||
6cdd0b30 DH |
334 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
335 | /* Select all remaining pages up to the next section boundary */ | |
336 | cur_nr_pages = min(end_pfn - pfn, | |
337 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
338 | err = sparse_add_section(nid, pfn, cur_nr_pages, altmap); | |
ba72b4c8 DW |
339 | if (err) |
340 | break; | |
f64ac5e6 | 341 | cond_resched(); |
4edd7cef | 342 | } |
c435a390 | 343 | vmemmap_populate_print_last(); |
4edd7cef DR |
344 | return err; |
345 | } | |
4edd7cef | 346 | |
815121d2 | 347 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
d09b0137 | 348 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
349 | unsigned long start_pfn, |
350 | unsigned long end_pfn) | |
351 | { | |
49ba3c6b | 352 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { |
7ce700bf | 353 | if (unlikely(!pfn_to_online_page(start_pfn))) |
815121d2 YI |
354 | continue; |
355 | ||
356 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
357 | continue; | |
358 | ||
9b05158f | 359 | if (zone != page_zone(pfn_to_page(start_pfn))) |
815121d2 YI |
360 | continue; |
361 | ||
362 | return start_pfn; | |
363 | } | |
364 | ||
365 | return 0; | |
366 | } | |
367 | ||
368 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
d09b0137 | 369 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
370 | unsigned long start_pfn, |
371 | unsigned long end_pfn) | |
372 | { | |
815121d2 YI |
373 | unsigned long pfn; |
374 | ||
375 | /* pfn is the end pfn of a memory section. */ | |
376 | pfn = end_pfn - 1; | |
49ba3c6b | 377 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { |
7ce700bf | 378 | if (unlikely(!pfn_to_online_page(pfn))) |
815121d2 YI |
379 | continue; |
380 | ||
381 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
382 | continue; | |
383 | ||
9b05158f | 384 | if (zone != page_zone(pfn_to_page(pfn))) |
815121d2 YI |
385 | continue; |
386 | ||
387 | return pfn; | |
388 | } | |
389 | ||
390 | return 0; | |
391 | } | |
392 | ||
393 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
394 | unsigned long end_pfn) | |
395 | { | |
815121d2 | 396 | unsigned long pfn; |
815121d2 YI |
397 | int nid = zone_to_nid(zone); |
398 | ||
5d12071c | 399 | if (zone->zone_start_pfn == start_pfn) { |
815121d2 YI |
400 | /* |
401 | * If the section is smallest section in the zone, it need | |
402 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
403 | * In this case, we find second smallest valid mem_section | |
404 | * for shrinking zone. | |
405 | */ | |
406 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
5d12071c | 407 | zone_end_pfn(zone)); |
815121d2 | 408 | if (pfn) { |
5d12071c | 409 | zone->spanned_pages = zone_end_pfn(zone) - pfn; |
815121d2 | 410 | zone->zone_start_pfn = pfn; |
950b68d9 DH |
411 | } else { |
412 | zone->zone_start_pfn = 0; | |
413 | zone->spanned_pages = 0; | |
815121d2 | 414 | } |
5d12071c | 415 | } else if (zone_end_pfn(zone) == end_pfn) { |
815121d2 YI |
416 | /* |
417 | * If the section is biggest section in the zone, it need | |
418 | * shrink zone->spanned_pages. | |
419 | * In this case, we find second biggest valid mem_section for | |
420 | * shrinking zone. | |
421 | */ | |
5d12071c | 422 | pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, |
815121d2 YI |
423 | start_pfn); |
424 | if (pfn) | |
5d12071c | 425 | zone->spanned_pages = pfn - zone->zone_start_pfn + 1; |
950b68d9 DH |
426 | else { |
427 | zone->zone_start_pfn = 0; | |
428 | zone->spanned_pages = 0; | |
429 | } | |
815121d2 | 430 | } |
815121d2 YI |
431 | } |
432 | ||
00d6c019 | 433 | static void update_pgdat_span(struct pglist_data *pgdat) |
815121d2 | 434 | { |
00d6c019 DH |
435 | unsigned long node_start_pfn = 0, node_end_pfn = 0; |
436 | struct zone *zone; | |
437 | ||
438 | for (zone = pgdat->node_zones; | |
439 | zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { | |
6c922cf7 | 440 | unsigned long end_pfn = zone_end_pfn(zone); |
00d6c019 DH |
441 | |
442 | /* No need to lock the zones, they can't change. */ | |
656d5711 DH |
443 | if (!zone->spanned_pages) |
444 | continue; | |
445 | if (!node_end_pfn) { | |
446 | node_start_pfn = zone->zone_start_pfn; | |
6c922cf7 | 447 | node_end_pfn = end_pfn; |
656d5711 DH |
448 | continue; |
449 | } | |
450 | ||
6c922cf7 ML |
451 | if (end_pfn > node_end_pfn) |
452 | node_end_pfn = end_pfn; | |
00d6c019 DH |
453 | if (zone->zone_start_pfn < node_start_pfn) |
454 | node_start_pfn = zone->zone_start_pfn; | |
815121d2 YI |
455 | } |
456 | ||
00d6c019 DH |
457 | pgdat->node_start_pfn = node_start_pfn; |
458 | pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; | |
815121d2 YI |
459 | } |
460 | ||
feee6b29 DH |
461 | void __ref remove_pfn_range_from_zone(struct zone *zone, |
462 | unsigned long start_pfn, | |
463 | unsigned long nr_pages) | |
815121d2 | 464 | { |
b7e3debd | 465 | const unsigned long end_pfn = start_pfn + nr_pages; |
815121d2 | 466 | struct pglist_data *pgdat = zone->zone_pgdat; |
27cacaad | 467 | unsigned long pfn, cur_nr_pages; |
815121d2 | 468 | |
d33695b1 | 469 | /* Poison struct pages because they are now uninitialized again. */ |
b7e3debd BW |
470 | for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { |
471 | cond_resched(); | |
472 | ||
473 | /* Select all remaining pages up to the next section boundary */ | |
474 | cur_nr_pages = | |
475 | min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); | |
476 | page_init_poison(pfn_to_page(pfn), | |
477 | sizeof(struct page) * cur_nr_pages); | |
478 | } | |
d33695b1 | 479 | |
7ce700bf DH |
480 | #ifdef CONFIG_ZONE_DEVICE |
481 | /* | |
482 | * Zone shrinking code cannot properly deal with ZONE_DEVICE. So | |
483 | * we will not try to shrink the zones - which is okay as | |
484 | * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. | |
485 | */ | |
486 | if (zone_idx(zone) == ZONE_DEVICE) | |
487 | return; | |
488 | #endif | |
489 | ||
feee6b29 DH |
490 | clear_zone_contiguous(zone); |
491 | ||
815121d2 | 492 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); |
00d6c019 | 493 | update_pgdat_span(pgdat); |
feee6b29 DH |
494 | |
495 | set_zone_contiguous(zone); | |
815121d2 YI |
496 | } |
497 | ||
feee6b29 DH |
498 | static void __remove_section(unsigned long pfn, unsigned long nr_pages, |
499 | unsigned long map_offset, | |
500 | struct vmem_altmap *altmap) | |
ea01ea93 | 501 | { |
10404901 | 502 | struct mem_section *ms = __pfn_to_section(pfn); |
ea01ea93 | 503 | |
9d1d887d DH |
504 | if (WARN_ON_ONCE(!valid_section(ms))) |
505 | return; | |
ea01ea93 | 506 | |
ba72b4c8 | 507 | sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); |
ea01ea93 BP |
508 | } |
509 | ||
ea01ea93 | 510 | /** |
feee6b29 | 511 | * __remove_pages() - remove sections of pages |
7ea62160 | 512 | * @pfn: starting pageframe (must be aligned to start of a section) |
ea01ea93 | 513 | * @nr_pages: number of pages to remove (must be multiple of section size) |
e8b098fc | 514 | * @altmap: alternative device page map or %NULL if default memmap is used |
ea01ea93 BP |
515 | * |
516 | * Generic helper function to remove section mappings and sysfs entries | |
517 | * for the section of the memory we are removing. Caller needs to make | |
518 | * sure that pages are marked reserved and zones are adjust properly by | |
519 | * calling offline_pages(). | |
520 | */ | |
feee6b29 DH |
521 | void __remove_pages(unsigned long pfn, unsigned long nr_pages, |
522 | struct vmem_altmap *altmap) | |
ea01ea93 | 523 | { |
52fb87c8 DH |
524 | const unsigned long end_pfn = pfn + nr_pages; |
525 | unsigned long cur_nr_pages; | |
4b94ffdc | 526 | unsigned long map_offset = 0; |
4b94ffdc | 527 | |
96da4350 | 528 | map_offset = vmem_altmap_offset(altmap); |
ea01ea93 | 529 | |
7ea62160 DW |
530 | if (check_pfn_span(pfn, nr_pages, "remove")) |
531 | return; | |
ea01ea93 | 532 | |
52fb87c8 | 533 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
dd33ad7b | 534 | cond_resched(); |
52fb87c8 | 535 | /* Select all remaining pages up to the next section boundary */ |
a11b9419 DH |
536 | cur_nr_pages = min(end_pfn - pfn, |
537 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
52fb87c8 | 538 | __remove_section(pfn, cur_nr_pages, map_offset, altmap); |
4b94ffdc | 539 | map_offset = 0; |
ea01ea93 | 540 | } |
ea01ea93 | 541 | } |
ea01ea93 | 542 | |
9d0ad8ca DK |
543 | int set_online_page_callback(online_page_callback_t callback) |
544 | { | |
545 | int rc = -EINVAL; | |
546 | ||
bfc8c901 VD |
547 | get_online_mems(); |
548 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
549 | |
550 | if (online_page_callback == generic_online_page) { | |
551 | online_page_callback = callback; | |
552 | rc = 0; | |
553 | } | |
554 | ||
bfc8c901 VD |
555 | mutex_unlock(&online_page_callback_lock); |
556 | put_online_mems(); | |
9d0ad8ca DK |
557 | |
558 | return rc; | |
559 | } | |
560 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
561 | ||
562 | int restore_online_page_callback(online_page_callback_t callback) | |
563 | { | |
564 | int rc = -EINVAL; | |
565 | ||
bfc8c901 VD |
566 | get_online_mems(); |
567 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
568 | |
569 | if (online_page_callback == callback) { | |
570 | online_page_callback = generic_online_page; | |
571 | rc = 0; | |
572 | } | |
573 | ||
bfc8c901 VD |
574 | mutex_unlock(&online_page_callback_lock); |
575 | put_online_mems(); | |
9d0ad8ca DK |
576 | |
577 | return rc; | |
578 | } | |
579 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
580 | ||
18db1491 | 581 | void generic_online_page(struct page *page, unsigned int order) |
9d0ad8ca | 582 | { |
c87cbc1f VB |
583 | /* |
584 | * Freeing the page with debug_pagealloc enabled will try to unmap it, | |
585 | * so we should map it first. This is better than introducing a special | |
586 | * case in page freeing fast path. | |
587 | */ | |
77bc7fd6 | 588 | debug_pagealloc_map_pages(page, 1 << order); |
a9cd410a AK |
589 | __free_pages_core(page, order); |
590 | totalram_pages_add(1UL << order); | |
591 | #ifdef CONFIG_HIGHMEM | |
592 | if (PageHighMem(page)) | |
593 | totalhigh_pages_add(1UL << order); | |
594 | #endif | |
595 | } | |
18db1491 | 596 | EXPORT_SYMBOL_GPL(generic_online_page); |
a9cd410a | 597 | |
aac65321 | 598 | static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) |
3947be19 | 599 | { |
b2c2ab20 DH |
600 | const unsigned long end_pfn = start_pfn + nr_pages; |
601 | unsigned long pfn; | |
b2c2ab20 DH |
602 | |
603 | /* | |
aac65321 DH |
604 | * Online the pages in MAX_ORDER - 1 aligned chunks. The callback might |
605 | * decide to not expose all pages to the buddy (e.g., expose them | |
606 | * later). We account all pages as being online and belonging to this | |
607 | * zone ("present"). | |
a08a2ae3 OS |
608 | * When using memmap_on_memory, the range might not be aligned to |
609 | * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect | |
610 | * this and the first chunk to online will be pageblock_nr_pages. | |
b2c2ab20 | 611 | */ |
a08a2ae3 OS |
612 | for (pfn = start_pfn; pfn < end_pfn;) { |
613 | int order = min(MAX_ORDER - 1UL, __ffs(pfn)); | |
614 | ||
615 | (*online_page_callback)(pfn_to_page(pfn), order); | |
616 | pfn += (1UL << order); | |
617 | } | |
2d070eab | 618 | |
b2c2ab20 DH |
619 | /* mark all involved sections as online */ |
620 | online_mem_sections(start_pfn, end_pfn); | |
75884fb1 KH |
621 | } |
622 | ||
d9713679 LJ |
623 | /* check which state of node_states will be changed when online memory */ |
624 | static void node_states_check_changes_online(unsigned long nr_pages, | |
625 | struct zone *zone, struct memory_notify *arg) | |
626 | { | |
627 | int nid = zone_to_nid(zone); | |
d9713679 | 628 | |
98fa15f3 AK |
629 | arg->status_change_nid = NUMA_NO_NODE; |
630 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
631 | arg->status_change_nid_high = NUMA_NO_NODE; | |
d9713679 | 632 | |
8efe33f4 OS |
633 | if (!node_state(nid, N_MEMORY)) |
634 | arg->status_change_nid = nid; | |
635 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) | |
d9713679 | 636 | arg->status_change_nid_normal = nid; |
6715ddf9 | 637 | #ifdef CONFIG_HIGHMEM |
d3ba3ae1 | 638 | if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) |
6715ddf9 | 639 | arg->status_change_nid_high = nid; |
6715ddf9 | 640 | #endif |
d9713679 LJ |
641 | } |
642 | ||
643 | static void node_states_set_node(int node, struct memory_notify *arg) | |
644 | { | |
645 | if (arg->status_change_nid_normal >= 0) | |
646 | node_set_state(node, N_NORMAL_MEMORY); | |
647 | ||
6715ddf9 LJ |
648 | if (arg->status_change_nid_high >= 0) |
649 | node_set_state(node, N_HIGH_MEMORY); | |
650 | ||
83d83612 OS |
651 | if (arg->status_change_nid >= 0) |
652 | node_set_state(node, N_MEMORY); | |
d9713679 LJ |
653 | } |
654 | ||
f1dd2cd1 MH |
655 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, |
656 | unsigned long nr_pages) | |
657 | { | |
658 | unsigned long old_end_pfn = zone_end_pfn(zone); | |
659 | ||
660 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) | |
661 | zone->zone_start_pfn = start_pfn; | |
662 | ||
663 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; | |
664 | } | |
665 | ||
666 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, | |
667 | unsigned long nr_pages) | |
668 | { | |
669 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); | |
670 | ||
671 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
672 | pgdat->node_start_pfn = start_pfn; | |
673 | ||
674 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | |
f1dd2cd1 | 675 | |
3fccb74c | 676 | } |
1f90a347 DW |
677 | |
678 | static void section_taint_zone_device(unsigned long pfn) | |
679 | { | |
680 | struct mem_section *ms = __pfn_to_section(pfn); | |
681 | ||
682 | ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; | |
683 | } | |
684 | ||
3fccb74c DH |
685 | /* |
686 | * Associate the pfn range with the given zone, initializing the memmaps | |
687 | * and resizing the pgdat/zone data to span the added pages. After this | |
688 | * call, all affected pages are PG_reserved. | |
d882c006 DH |
689 | * |
690 | * All aligned pageblocks are initialized to the specified migratetype | |
691 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related | |
692 | * zone stats (e.g., nr_isolate_pageblock) are touched. | |
3fccb74c | 693 | */ |
a99583e7 | 694 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
d882c006 DH |
695 | unsigned long nr_pages, |
696 | struct vmem_altmap *altmap, int migratetype) | |
f1dd2cd1 MH |
697 | { |
698 | struct pglist_data *pgdat = zone->zone_pgdat; | |
699 | int nid = pgdat->node_id; | |
df429ac0 | 700 | |
f1dd2cd1 MH |
701 | clear_zone_contiguous(zone); |
702 | ||
fa004ab7 WY |
703 | if (zone_is_empty(zone)) |
704 | init_currently_empty_zone(zone, start_pfn, nr_pages); | |
f1dd2cd1 | 705 | resize_zone_range(zone, start_pfn, nr_pages); |
f1dd2cd1 | 706 | resize_pgdat_range(pgdat, start_pfn, nr_pages); |
f1dd2cd1 | 707 | |
1f90a347 DW |
708 | /* |
709 | * Subsection population requires care in pfn_to_online_page(). | |
710 | * Set the taint to enable the slow path detection of | |
711 | * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} | |
712 | * section. | |
713 | */ | |
714 | if (zone_is_zone_device(zone)) { | |
715 | if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) | |
716 | section_taint_zone_device(start_pfn); | |
717 | if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) | |
718 | section_taint_zone_device(start_pfn + nr_pages); | |
719 | } | |
720 | ||
f1dd2cd1 MH |
721 | /* |
722 | * TODO now we have a visible range of pages which are not associated | |
723 | * with their zone properly. Not nice but set_pfnblock_flags_mask | |
724 | * expects the zone spans the pfn range. All the pages in the range | |
725 | * are reserved so nobody should be touching them so we should be safe | |
726 | */ | |
ab28cb6e | 727 | memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, |
d882c006 | 728 | MEMINIT_HOTPLUG, altmap, migratetype); |
f1dd2cd1 MH |
729 | |
730 | set_zone_contiguous(zone); | |
731 | } | |
732 | ||
e83a437f DH |
733 | struct auto_movable_stats { |
734 | unsigned long kernel_early_pages; | |
735 | unsigned long movable_pages; | |
736 | }; | |
737 | ||
738 | static void auto_movable_stats_account_zone(struct auto_movable_stats *stats, | |
739 | struct zone *zone) | |
740 | { | |
741 | if (zone_idx(zone) == ZONE_MOVABLE) { | |
742 | stats->movable_pages += zone->present_pages; | |
743 | } else { | |
744 | stats->kernel_early_pages += zone->present_early_pages; | |
745 | #ifdef CONFIG_CMA | |
746 | /* | |
747 | * CMA pages (never on hotplugged memory) behave like | |
748 | * ZONE_MOVABLE. | |
749 | */ | |
750 | stats->movable_pages += zone->cma_pages; | |
751 | stats->kernel_early_pages -= zone->cma_pages; | |
752 | #endif /* CONFIG_CMA */ | |
753 | } | |
754 | } | |
755 | ||
756 | static bool auto_movable_can_online_movable(int nid, unsigned long nr_pages) | |
757 | { | |
758 | struct auto_movable_stats stats = {}; | |
759 | unsigned long kernel_early_pages, movable_pages; | |
760 | pg_data_t *pgdat = NODE_DATA(nid); | |
761 | struct zone *zone; | |
762 | int i; | |
763 | ||
764 | /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */ | |
765 | if (nid == NUMA_NO_NODE) { | |
766 | /* TODO: cache values */ | |
767 | for_each_populated_zone(zone) | |
768 | auto_movable_stats_account_zone(&stats, zone); | |
769 | } else { | |
770 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
771 | zone = pgdat->node_zones + i; | |
772 | if (populated_zone(zone)) | |
773 | auto_movable_stats_account_zone(&stats, zone); | |
774 | } | |
775 | } | |
776 | ||
777 | kernel_early_pages = stats.kernel_early_pages; | |
778 | movable_pages = stats.movable_pages; | |
779 | ||
780 | /* | |
781 | * Test if we could online the given number of pages to ZONE_MOVABLE | |
782 | * and still stay in the configured ratio. | |
783 | */ | |
784 | movable_pages += nr_pages; | |
785 | return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100; | |
786 | } | |
787 | ||
c246a213 MH |
788 | /* |
789 | * Returns a default kernel memory zone for the given pfn range. | |
790 | * If no kernel zone covers this pfn range it will automatically go | |
791 | * to the ZONE_NORMAL. | |
792 | */ | |
c6f03e29 | 793 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, |
c246a213 MH |
794 | unsigned long nr_pages) |
795 | { | |
796 | struct pglist_data *pgdat = NODE_DATA(nid); | |
797 | int zid; | |
798 | ||
799 | for (zid = 0; zid <= ZONE_NORMAL; zid++) { | |
800 | struct zone *zone = &pgdat->node_zones[zid]; | |
801 | ||
802 | if (zone_intersects(zone, start_pfn, nr_pages)) | |
803 | return zone; | |
804 | } | |
805 | ||
806 | return &pgdat->node_zones[ZONE_NORMAL]; | |
807 | } | |
808 | ||
e83a437f DH |
809 | /* |
810 | * Determine to which zone to online memory dynamically based on user | |
811 | * configuration and system stats. We care about the following ratio: | |
812 | * | |
813 | * MOVABLE : KERNEL | |
814 | * | |
815 | * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in | |
816 | * one of the kernel zones. CMA pages inside one of the kernel zones really | |
817 | * behaves like ZONE_MOVABLE, so we treat them accordingly. | |
818 | * | |
819 | * We don't allow for hotplugged memory in a KERNEL zone to increase the | |
820 | * amount of MOVABLE memory we can have, so we end up with: | |
821 | * | |
822 | * MOVABLE : KERNEL_EARLY | |
823 | * | |
824 | * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze | |
825 | * boot. We base our calculation on KERNEL_EARLY internally, because: | |
826 | * | |
827 | * a) Hotplugged memory in one of the kernel zones can sometimes still get | |
828 | * hotunplugged, especially when hot(un)plugging individual memory blocks. | |
829 | * There is no coordination across memory devices, therefore "automatic" | |
830 | * hotunplugging, as implemented in hypervisors, could result in zone | |
831 | * imbalances. | |
832 | * b) Early/boot memory in one of the kernel zones can usually not get | |
833 | * hotunplugged again (e.g., no firmware interface to unplug, fragmented | |
834 | * with unmovable allocations). While there are corner cases where it might | |
835 | * still work, it is barely relevant in practice. | |
836 | * | |
837 | * We rely on "present pages" instead of "managed pages", as the latter is | |
838 | * highly unreliable and dynamic in virtualized environments, and does not | |
839 | * consider boot time allocations. For example, memory ballooning adjusts the | |
840 | * managed pages when inflating/deflating the balloon, and balloon compaction | |
841 | * can even migrate inflated pages between zones. | |
842 | * | |
843 | * Using "present pages" is better but some things to keep in mind are: | |
844 | * | |
845 | * a) Some memblock allocations, such as for the crashkernel area, are | |
846 | * effectively unused by the kernel, yet they account to "present pages". | |
847 | * Fortunately, these allocations are comparatively small in relevant setups | |
848 | * (e.g., fraction of system memory). | |
849 | * b) Some hotplugged memory blocks in virtualized environments, esecially | |
850 | * hotplugged by virtio-mem, look like they are completely present, however, | |
851 | * only parts of the memory block are actually currently usable. | |
852 | * "present pages" is an upper limit that can get reached at runtime. As | |
853 | * we base our calculations on KERNEL_EARLY, this is not an issue. | |
854 | */ | |
855 | static struct zone *auto_movable_zone_for_pfn(int nid, unsigned long pfn, | |
856 | unsigned long nr_pages) | |
857 | { | |
858 | if (!auto_movable_ratio) | |
859 | goto kernel_zone; | |
860 | ||
861 | if (!auto_movable_can_online_movable(NUMA_NO_NODE, nr_pages)) | |
862 | goto kernel_zone; | |
863 | ||
864 | #ifdef CONFIG_NUMA | |
865 | if (auto_movable_numa_aware && | |
866 | !auto_movable_can_online_movable(nid, nr_pages)) | |
867 | goto kernel_zone; | |
868 | #endif /* CONFIG_NUMA */ | |
869 | ||
870 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
871 | kernel_zone: | |
872 | return default_kernel_zone_for_pfn(nid, pfn, nr_pages); | |
873 | } | |
874 | ||
c6f03e29 MH |
875 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, |
876 | unsigned long nr_pages) | |
e5e68930 | 877 | { |
c6f03e29 MH |
878 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, |
879 | nr_pages); | |
880 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
881 | bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); | |
882 | bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); | |
e5e68930 MH |
883 | |
884 | /* | |
c6f03e29 MH |
885 | * We inherit the existing zone in a simple case where zones do not |
886 | * overlap in the given range | |
e5e68930 | 887 | */ |
c6f03e29 MH |
888 | if (in_kernel ^ in_movable) |
889 | return (in_kernel) ? kernel_zone : movable_zone; | |
9f123ab5 | 890 | |
c6f03e29 MH |
891 | /* |
892 | * If the range doesn't belong to any zone or two zones overlap in the | |
893 | * given range then we use movable zone only if movable_node is | |
894 | * enabled because we always online to a kernel zone by default. | |
895 | */ | |
896 | return movable_node_enabled ? movable_zone : kernel_zone; | |
9f123ab5 MH |
897 | } |
898 | ||
7cf209ba DH |
899 | struct zone *zone_for_pfn_range(int online_type, int nid, |
900 | unsigned long start_pfn, unsigned long nr_pages) | |
f1dd2cd1 | 901 | { |
c6f03e29 MH |
902 | if (online_type == MMOP_ONLINE_KERNEL) |
903 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); | |
f1dd2cd1 | 904 | |
c6f03e29 MH |
905 | if (online_type == MMOP_ONLINE_MOVABLE) |
906 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
df429ac0 | 907 | |
e83a437f DH |
908 | if (online_policy == ONLINE_POLICY_AUTO_MOVABLE) |
909 | return auto_movable_zone_for_pfn(nid, start_pfn, nr_pages); | |
910 | ||
c6f03e29 | 911 | return default_zone_for_pfn(nid, start_pfn, nr_pages); |
e5e68930 MH |
912 | } |
913 | ||
a08a2ae3 OS |
914 | /* |
915 | * This function should only be called by memory_block_{online,offline}, | |
916 | * and {online,offline}_pages. | |
917 | */ | |
4b097002 | 918 | void adjust_present_page_count(struct page *page, long nr_pages) |
f9901144 | 919 | { |
4b097002 DH |
920 | struct zone *zone = page_zone(page); |
921 | ||
922 | /* | |
923 | * We only support onlining/offlining/adding/removing of complete | |
924 | * memory blocks; therefore, either all is either early or hotplugged. | |
925 | */ | |
926 | if (early_section(__pfn_to_section(page_to_pfn(page)))) | |
927 | zone->present_early_pages += nr_pages; | |
f9901144 | 928 | zone->present_pages += nr_pages; |
f9901144 | 929 | zone->zone_pgdat->node_present_pages += nr_pages; |
f9901144 DH |
930 | } |
931 | ||
a08a2ae3 OS |
932 | int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, |
933 | struct zone *zone) | |
934 | { | |
935 | unsigned long end_pfn = pfn + nr_pages; | |
936 | int ret; | |
937 | ||
938 | ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
939 | if (ret) | |
940 | return ret; | |
941 | ||
942 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); | |
943 | ||
944 | /* | |
945 | * It might be that the vmemmap_pages fully span sections. If that is | |
946 | * the case, mark those sections online here as otherwise they will be | |
947 | * left offline. | |
948 | */ | |
949 | if (nr_pages >= PAGES_PER_SECTION) | |
950 | online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
951 | ||
952 | return ret; | |
953 | } | |
954 | ||
955 | void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) | |
956 | { | |
957 | unsigned long end_pfn = pfn + nr_pages; | |
958 | ||
959 | /* | |
960 | * It might be that the vmemmap_pages fully span sections. If that is | |
961 | * the case, mark those sections offline here as otherwise they will be | |
962 | * left online. | |
963 | */ | |
964 | if (nr_pages >= PAGES_PER_SECTION) | |
965 | offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
966 | ||
967 | /* | |
968 | * The pages associated with this vmemmap have been offlined, so | |
969 | * we can reset its state here. | |
970 | */ | |
971 | remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); | |
972 | kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
973 | } | |
974 | ||
975 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone) | |
75884fb1 | 976 | { |
aa47228a | 977 | unsigned long flags; |
6811378e | 978 | int need_zonelists_rebuild = 0; |
a08a2ae3 | 979 | const int nid = zone_to_nid(zone); |
7b78d335 YG |
980 | int ret; |
981 | struct memory_notify arg; | |
d0dc12e8 | 982 | |
dd8e2f23 OS |
983 | /* |
984 | * {on,off}lining is constrained to full memory sections (or more | |
041711ce | 985 | * precisely to memory blocks from the user space POV). |
dd8e2f23 OS |
986 | * memmap_on_memory is an exception because it reserves initial part |
987 | * of the physical memory space for vmemmaps. That space is pageblock | |
988 | * aligned. | |
989 | */ | |
4986fac1 | 990 | if (WARN_ON_ONCE(!nr_pages || |
dd8e2f23 OS |
991 | !IS_ALIGNED(pfn, pageblock_nr_pages) || |
992 | !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) | |
4986fac1 DH |
993 | return -EINVAL; |
994 | ||
381eab4a DH |
995 | mem_hotplug_begin(); |
996 | ||
f1dd2cd1 | 997 | /* associate pfn range with the zone */ |
b30c5927 | 998 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); |
f1dd2cd1 | 999 | |
7b78d335 YG |
1000 | arg.start_pfn = pfn; |
1001 | arg.nr_pages = nr_pages; | |
d9713679 | 1002 | node_states_check_changes_online(nr_pages, zone, &arg); |
7b78d335 | 1003 | |
7b78d335 YG |
1004 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
1005 | ret = notifier_to_errno(ret); | |
e33e33b4 CY |
1006 | if (ret) |
1007 | goto failed_addition; | |
1008 | ||
b30c5927 DH |
1009 | /* |
1010 | * Fixup the number of isolated pageblocks before marking the sections | |
1011 | * onlining, such that undo_isolate_page_range() works correctly. | |
1012 | */ | |
1013 | spin_lock_irqsave(&zone->lock, flags); | |
1014 | zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; | |
1015 | spin_unlock_irqrestore(&zone->lock, flags); | |
1016 | ||
6811378e YG |
1017 | /* |
1018 | * If this zone is not populated, then it is not in zonelist. | |
1019 | * This means the page allocator ignores this zone. | |
1020 | * So, zonelist must be updated after online. | |
1021 | */ | |
6dcd73d7 | 1022 | if (!populated_zone(zone)) { |
6811378e | 1023 | need_zonelists_rebuild = 1; |
72675e13 | 1024 | setup_zone_pageset(zone); |
6dcd73d7 | 1025 | } |
6811378e | 1026 | |
aac65321 | 1027 | online_pages_range(pfn, nr_pages); |
4b097002 | 1028 | adjust_present_page_count(pfn_to_page(pfn), nr_pages); |
aa47228a | 1029 | |
b30c5927 DH |
1030 | node_states_set_node(nid, &arg); |
1031 | if (need_zonelists_rebuild) | |
1032 | build_all_zonelists(NULL); | |
b30c5927 DH |
1033 | |
1034 | /* Basic onlining is complete, allow allocation of onlined pages. */ | |
1035 | undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); | |
1036 | ||
93146d98 | 1037 | /* |
b86c5fc4 DH |
1038 | * Freshly onlined pages aren't shuffled (e.g., all pages are placed to |
1039 | * the tail of the freelist when undoing isolation). Shuffle the whole | |
1040 | * zone to make sure the just onlined pages are properly distributed | |
1041 | * across the whole freelist - to create an initial shuffle. | |
93146d98 | 1042 | */ |
e900a918 DW |
1043 | shuffle_zone(zone); |
1044 | ||
b92ca18e | 1045 | /* reinitialise watermarks and update pcp limits */ |
1b79acc9 KM |
1046 | init_per_zone_wmark_min(); |
1047 | ||
ca9a46f8 DH |
1048 | kswapd_run(nid); |
1049 | kcompactd_run(nid); | |
61b13993 | 1050 | |
2d1d43f6 | 1051 | writeback_set_ratelimit(); |
7b78d335 | 1052 | |
ca9a46f8 | 1053 | memory_notify(MEM_ONLINE, &arg); |
381eab4a | 1054 | mem_hotplug_done(); |
30467e0b | 1055 | return 0; |
e33e33b4 CY |
1056 | |
1057 | failed_addition: | |
1058 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", | |
1059 | (unsigned long long) pfn << PAGE_SHIFT, | |
1060 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); | |
1061 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
feee6b29 | 1062 | remove_pfn_range_from_zone(zone, pfn, nr_pages); |
381eab4a | 1063 | mem_hotplug_done(); |
e33e33b4 | 1064 | return ret; |
3947be19 | 1065 | } |
53947027 | 1066 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
bc02af93 | 1067 | |
0bd85420 TC |
1068 | static void reset_node_present_pages(pg_data_t *pgdat) |
1069 | { | |
1070 | struct zone *z; | |
1071 | ||
1072 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | |
1073 | z->present_pages = 0; | |
1074 | ||
1075 | pgdat->node_present_pages = 0; | |
1076 | } | |
1077 | ||
e1319331 | 1078 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
c68ab18c | 1079 | static pg_data_t __ref *hotadd_new_pgdat(int nid) |
9af3c2de YG |
1080 | { |
1081 | struct pglist_data *pgdat; | |
9af3c2de | 1082 | |
a1e565aa TC |
1083 | pgdat = NODE_DATA(nid); |
1084 | if (!pgdat) { | |
1085 | pgdat = arch_alloc_nodedata(nid); | |
1086 | if (!pgdat) | |
1087 | return NULL; | |
9af3c2de | 1088 | |
33fce011 WY |
1089 | pgdat->per_cpu_nodestats = |
1090 | alloc_percpu(struct per_cpu_nodestat); | |
a1e565aa | 1091 | arch_refresh_nodedata(nid, pgdat); |
b0dc3a34 | 1092 | } else { |
33fce011 | 1093 | int cpu; |
e716f2eb | 1094 | /* |
97a225e6 JK |
1095 | * Reset the nr_zones, order and highest_zoneidx before reuse. |
1096 | * Note that kswapd will init kswapd_highest_zoneidx properly | |
e716f2eb MG |
1097 | * when it starts in the near future. |
1098 | */ | |
b0dc3a34 | 1099 | pgdat->nr_zones = 0; |
38087d9b | 1100 | pgdat->kswapd_order = 0; |
97a225e6 | 1101 | pgdat->kswapd_highest_zoneidx = 0; |
33fce011 WY |
1102 | for_each_online_cpu(cpu) { |
1103 | struct per_cpu_nodestat *p; | |
1104 | ||
1105 | p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); | |
1106 | memset(p, 0, sizeof(*p)); | |
1107 | } | |
a1e565aa | 1108 | } |
9af3c2de YG |
1109 | |
1110 | /* we can use NODE_DATA(nid) from here */ | |
03e85f9d | 1111 | pgdat->node_id = nid; |
c68ab18c | 1112 | pgdat->node_start_pfn = 0; |
03e85f9d | 1113 | |
9af3c2de | 1114 | /* init node's zones as empty zones, we don't have any present pages.*/ |
03e85f9d | 1115 | free_area_init_core_hotplug(nid); |
9af3c2de | 1116 | |
959ecc48 KH |
1117 | /* |
1118 | * The node we allocated has no zone fallback lists. For avoiding | |
1119 | * to access not-initialized zonelist, build here. | |
1120 | */ | |
72675e13 | 1121 | build_all_zonelists(pgdat); |
959ecc48 | 1122 | |
0bd85420 TC |
1123 | /* |
1124 | * When memory is hot-added, all the memory is in offline state. So | |
1125 | * clear all zones' present_pages because they will be updated in | |
1126 | * online_pages() and offline_pages(). | |
1127 | */ | |
03e85f9d | 1128 | reset_node_managed_pages(pgdat); |
0bd85420 TC |
1129 | reset_node_present_pages(pgdat); |
1130 | ||
9af3c2de YG |
1131 | return pgdat; |
1132 | } | |
1133 | ||
b9ff0360 | 1134 | static void rollback_node_hotadd(int nid) |
9af3c2de | 1135 | { |
b9ff0360 OS |
1136 | pg_data_t *pgdat = NODE_DATA(nid); |
1137 | ||
9af3c2de | 1138 | arch_refresh_nodedata(nid, NULL); |
5830169f | 1139 | free_percpu(pgdat->per_cpu_nodestats); |
9af3c2de | 1140 | arch_free_nodedata(pgdat); |
9af3c2de YG |
1141 | } |
1142 | ||
0a547039 | 1143 | |
ba2d2666 MG |
1144 | /* |
1145 | * __try_online_node - online a node if offlined | |
e8b098fc | 1146 | * @nid: the node ID |
b9ff0360 | 1147 | * @set_node_online: Whether we want to online the node |
cf23422b | 1148 | * called by cpu_up() to online a node without onlined memory. |
b9ff0360 OS |
1149 | * |
1150 | * Returns: | |
1151 | * 1 -> a new node has been allocated | |
1152 | * 0 -> the node is already online | |
1153 | * -ENOMEM -> the node could not be allocated | |
cf23422b | 1154 | */ |
c68ab18c | 1155 | static int __try_online_node(int nid, bool set_node_online) |
cf23422b | 1156 | { |
b9ff0360 OS |
1157 | pg_data_t *pgdat; |
1158 | int ret = 1; | |
cf23422b | 1159 | |
01b0f197 TK |
1160 | if (node_online(nid)) |
1161 | return 0; | |
1162 | ||
c68ab18c | 1163 | pgdat = hotadd_new_pgdat(nid); |
7553e8f2 | 1164 | if (!pgdat) { |
01b0f197 | 1165 | pr_err("Cannot online node %d due to NULL pgdat\n", nid); |
cf23422b | 1166 | ret = -ENOMEM; |
1167 | goto out; | |
1168 | } | |
b9ff0360 OS |
1169 | |
1170 | if (set_node_online) { | |
1171 | node_set_online(nid); | |
1172 | ret = register_one_node(nid); | |
1173 | BUG_ON(ret); | |
1174 | } | |
cf23422b | 1175 | out: |
b9ff0360 OS |
1176 | return ret; |
1177 | } | |
1178 | ||
1179 | /* | |
1180 | * Users of this function always want to online/register the node | |
1181 | */ | |
1182 | int try_online_node(int nid) | |
1183 | { | |
1184 | int ret; | |
1185 | ||
1186 | mem_hotplug_begin(); | |
c68ab18c | 1187 | ret = __try_online_node(nid, true); |
bfc8c901 | 1188 | mem_hotplug_done(); |
cf23422b | 1189 | return ret; |
1190 | } | |
1191 | ||
27356f54 TK |
1192 | static int check_hotplug_memory_range(u64 start, u64 size) |
1193 | { | |
ba325585 | 1194 | /* memory range must be block size aligned */ |
cec3ebd0 DH |
1195 | if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || |
1196 | !IS_ALIGNED(size, memory_block_size_bytes())) { | |
ba325585 | 1197 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", |
cec3ebd0 | 1198 | memory_block_size_bytes(), start, size); |
27356f54 TK |
1199 | return -EINVAL; |
1200 | } | |
1201 | ||
1202 | return 0; | |
1203 | } | |
1204 | ||
31bc3858 VK |
1205 | static int online_memory_block(struct memory_block *mem, void *arg) |
1206 | { | |
1adf8b46 | 1207 | mem->online_type = mhp_default_online_type; |
dc18d706 | 1208 | return device_online(&mem->dev); |
31bc3858 VK |
1209 | } |
1210 | ||
a08a2ae3 OS |
1211 | bool mhp_supports_memmap_on_memory(unsigned long size) |
1212 | { | |
1213 | unsigned long nr_vmemmap_pages = size / PAGE_SIZE; | |
1214 | unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); | |
1215 | unsigned long remaining_size = size - vmemmap_size; | |
1216 | ||
1217 | /* | |
1218 | * Besides having arch support and the feature enabled at runtime, we | |
1219 | * need a few more assumptions to hold true: | |
1220 | * | |
1221 | * a) We span a single memory block: memory onlining/offlinin;g happens | |
1222 | * in memory block granularity. We don't want the vmemmap of online | |
1223 | * memory blocks to reside on offline memory blocks. In the future, | |
1224 | * we might want to support variable-sized memory blocks to make the | |
1225 | * feature more versatile. | |
1226 | * | |
1227 | * b) The vmemmap pages span complete PMDs: We don't want vmemmap code | |
1228 | * to populate memory from the altmap for unrelated parts (i.e., | |
1229 | * other memory blocks) | |
1230 | * | |
1231 | * c) The vmemmap pages (and thereby the pages that will be exposed to | |
1232 | * the buddy) have to cover full pageblocks: memory onlining/offlining | |
1233 | * code requires applicable ranges to be page-aligned, for example, to | |
1234 | * set the migratetypes properly. | |
1235 | * | |
1236 | * TODO: Although we have a check here to make sure that vmemmap pages | |
1237 | * fully populate a PMD, it is not the right place to check for | |
1238 | * this. A much better solution involves improving vmemmap code | |
1239 | * to fallback to base pages when trying to populate vmemmap using | |
1240 | * altmap as an alternative source of memory, and we do not exactly | |
1241 | * populate a single PMD. | |
1242 | */ | |
1243 | return memmap_on_memory && | |
2d7a2171 | 1244 | !hugetlb_free_vmemmap_enabled && |
a08a2ae3 OS |
1245 | IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && |
1246 | size == memory_block_size_bytes() && | |
1247 | IS_ALIGNED(vmemmap_size, PMD_SIZE) && | |
1248 | IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); | |
1249 | } | |
1250 | ||
8df1d0e4 DH |
1251 | /* |
1252 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1253 | * and online/offline operations (triggered e.g. by sysfs). | |
1254 | * | |
1255 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | |
1256 | */ | |
b6117199 | 1257 | int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) |
bc02af93 | 1258 | { |
d15dfd31 | 1259 | struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; |
a08a2ae3 | 1260 | struct vmem_altmap mhp_altmap = {}; |
028fc57a | 1261 | struct memory_group *group = NULL; |
62cedb9f | 1262 | u64 start, size; |
b9ff0360 | 1263 | bool new_node = false; |
bc02af93 YG |
1264 | int ret; |
1265 | ||
62cedb9f DV |
1266 | start = res->start; |
1267 | size = resource_size(res); | |
1268 | ||
27356f54 TK |
1269 | ret = check_hotplug_memory_range(start, size); |
1270 | if (ret) | |
1271 | return ret; | |
1272 | ||
028fc57a DH |
1273 | if (mhp_flags & MHP_NID_IS_MGID) { |
1274 | group = memory_group_find_by_id(nid); | |
1275 | if (!group) | |
1276 | return -EINVAL; | |
1277 | nid = group->nid; | |
1278 | } | |
1279 | ||
fa6d9ec7 VV |
1280 | if (!node_possible(nid)) { |
1281 | WARN(1, "node %d was absent from the node_possible_map\n", nid); | |
1282 | return -EINVAL; | |
1283 | } | |
1284 | ||
bfc8c901 | 1285 | mem_hotplug_begin(); |
ac13c462 | 1286 | |
52219aea DH |
1287 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1288 | memblock_add_node(start, size, nid); | |
7f36e3e5 | 1289 | |
c68ab18c | 1290 | ret = __try_online_node(nid, false); |
b9ff0360 OS |
1291 | if (ret < 0) |
1292 | goto error; | |
1293 | new_node = ret; | |
9af3c2de | 1294 | |
a08a2ae3 OS |
1295 | /* |
1296 | * Self hosted memmap array | |
1297 | */ | |
1298 | if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { | |
1299 | if (!mhp_supports_memmap_on_memory(size)) { | |
1300 | ret = -EINVAL; | |
1301 | goto error; | |
1302 | } | |
1303 | mhp_altmap.free = PHYS_PFN(size); | |
1304 | mhp_altmap.base_pfn = PHYS_PFN(start); | |
1305 | params.altmap = &mhp_altmap; | |
1306 | } | |
1307 | ||
bc02af93 | 1308 | /* call arch's memory hotadd */ |
f5637d3b | 1309 | ret = arch_add_memory(nid, start, size, ¶ms); |
9af3c2de YG |
1310 | if (ret < 0) |
1311 | goto error; | |
1312 | ||
db051a0d | 1313 | /* create memory block devices after memory was added */ |
028fc57a DH |
1314 | ret = create_memory_block_devices(start, size, mhp_altmap.alloc, |
1315 | group); | |
db051a0d | 1316 | if (ret) { |
65a2aa5f | 1317 | arch_remove_memory(start, size, NULL); |
db051a0d DH |
1318 | goto error; |
1319 | } | |
1320 | ||
a1e565aa | 1321 | if (new_node) { |
d5b6f6a3 | 1322 | /* If sysfs file of new node can't be created, cpu on the node |
0fc44159 YG |
1323 | * can't be hot-added. There is no rollback way now. |
1324 | * So, check by BUG_ON() to catch it reluctantly.. | |
d5b6f6a3 | 1325 | * We online node here. We can't roll back from here. |
0fc44159 | 1326 | */ |
d5b6f6a3 OS |
1327 | node_set_online(nid); |
1328 | ret = __register_one_node(nid); | |
0fc44159 YG |
1329 | BUG_ON(ret); |
1330 | } | |
1331 | ||
d5b6f6a3 | 1332 | /* link memory sections under this node.*/ |
90c7eaeb LD |
1333 | link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), |
1334 | MEMINIT_HOTPLUG); | |
d5b6f6a3 | 1335 | |
d96ae530 | 1336 | /* create new memmap entry */ |
7b7b2721 DH |
1337 | if (!strcmp(res->name, "System RAM")) |
1338 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
d96ae530 | 1339 | |
381eab4a DH |
1340 | /* device_online() will take the lock when calling online_pages() */ |
1341 | mem_hotplug_done(); | |
1342 | ||
9ca6551e DH |
1343 | /* |
1344 | * In case we're allowed to merge the resource, flag it and trigger | |
1345 | * merging now that adding succeeded. | |
1346 | */ | |
26011267 | 1347 | if (mhp_flags & MHP_MERGE_RESOURCE) |
9ca6551e DH |
1348 | merge_system_ram_resource(res); |
1349 | ||
31bc3858 | 1350 | /* online pages if requested */ |
1adf8b46 | 1351 | if (mhp_default_online_type != MMOP_OFFLINE) |
fbcf73ce | 1352 | walk_memory_blocks(start, size, NULL, online_memory_block); |
31bc3858 | 1353 | |
381eab4a | 1354 | return ret; |
9af3c2de YG |
1355 | error: |
1356 | /* rollback pgdat allocation and others */ | |
b9ff0360 OS |
1357 | if (new_node) |
1358 | rollback_node_hotadd(nid); | |
52219aea DH |
1359 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1360 | memblock_remove(start, size); | |
bfc8c901 | 1361 | mem_hotplug_done(); |
bc02af93 YG |
1362 | return ret; |
1363 | } | |
62cedb9f | 1364 | |
8df1d0e4 | 1365 | /* requires device_hotplug_lock, see add_memory_resource() */ |
b6117199 | 1366 | int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
62cedb9f DV |
1367 | { |
1368 | struct resource *res; | |
1369 | int ret; | |
1370 | ||
7b7b2721 | 1371 | res = register_memory_resource(start, size, "System RAM"); |
6f754ba4 VK |
1372 | if (IS_ERR(res)) |
1373 | return PTR_ERR(res); | |
62cedb9f | 1374 | |
b6117199 | 1375 | ret = add_memory_resource(nid, res, mhp_flags); |
62cedb9f DV |
1376 | if (ret < 0) |
1377 | release_memory_resource(res); | |
1378 | return ret; | |
1379 | } | |
8df1d0e4 | 1380 | |
b6117199 | 1381 | int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
8df1d0e4 DH |
1382 | { |
1383 | int rc; | |
1384 | ||
1385 | lock_device_hotplug(); | |
b6117199 | 1386 | rc = __add_memory(nid, start, size, mhp_flags); |
8df1d0e4 DH |
1387 | unlock_device_hotplug(); |
1388 | ||
1389 | return rc; | |
1390 | } | |
bc02af93 | 1391 | EXPORT_SYMBOL_GPL(add_memory); |
0c0e6195 | 1392 | |
7b7b2721 DH |
1393 | /* |
1394 | * Add special, driver-managed memory to the system as system RAM. Such | |
1395 | * memory is not exposed via the raw firmware-provided memmap as system | |
1396 | * RAM, instead, it is detected and added by a driver - during cold boot, | |
1397 | * after a reboot, and after kexec. | |
1398 | * | |
1399 | * Reasons why this memory should not be used for the initial memmap of a | |
1400 | * kexec kernel or for placing kexec images: | |
1401 | * - The booting kernel is in charge of determining how this memory will be | |
1402 | * used (e.g., use persistent memory as system RAM) | |
1403 | * - Coordination with a hypervisor is required before this memory | |
1404 | * can be used (e.g., inaccessible parts). | |
1405 | * | |
1406 | * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided | |
1407 | * memory map") are created. Also, the created memory resource is flagged | |
7cf603d1 | 1408 | * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case |
7b7b2721 DH |
1409 | * this memory as well (esp., not place kexec images onto it). |
1410 | * | |
1411 | * The resource_name (visible via /proc/iomem) has to have the format | |
1412 | * "System RAM ($DRIVER)". | |
1413 | */ | |
1414 | int add_memory_driver_managed(int nid, u64 start, u64 size, | |
b6117199 | 1415 | const char *resource_name, mhp_t mhp_flags) |
7b7b2721 DH |
1416 | { |
1417 | struct resource *res; | |
1418 | int rc; | |
1419 | ||
1420 | if (!resource_name || | |
1421 | strstr(resource_name, "System RAM (") != resource_name || | |
1422 | resource_name[strlen(resource_name) - 1] != ')') | |
1423 | return -EINVAL; | |
1424 | ||
1425 | lock_device_hotplug(); | |
1426 | ||
1427 | res = register_memory_resource(start, size, resource_name); | |
1428 | if (IS_ERR(res)) { | |
1429 | rc = PTR_ERR(res); | |
1430 | goto out_unlock; | |
1431 | } | |
1432 | ||
b6117199 | 1433 | rc = add_memory_resource(nid, res, mhp_flags); |
7b7b2721 DH |
1434 | if (rc < 0) |
1435 | release_memory_resource(res); | |
1436 | ||
1437 | out_unlock: | |
1438 | unlock_device_hotplug(); | |
1439 | return rc; | |
1440 | } | |
1441 | EXPORT_SYMBOL_GPL(add_memory_driver_managed); | |
1442 | ||
bca3feaa AK |
1443 | /* |
1444 | * Platforms should define arch_get_mappable_range() that provides | |
1445 | * maximum possible addressable physical memory range for which the | |
1446 | * linear mapping could be created. The platform returned address | |
1447 | * range must adhere to these following semantics. | |
1448 | * | |
1449 | * - range.start <= range.end | |
1450 | * - Range includes both end points [range.start..range.end] | |
1451 | * | |
1452 | * There is also a fallback definition provided here, allowing the | |
1453 | * entire possible physical address range in case any platform does | |
1454 | * not define arch_get_mappable_range(). | |
1455 | */ | |
1456 | struct range __weak arch_get_mappable_range(void) | |
1457 | { | |
1458 | struct range mhp_range = { | |
1459 | .start = 0UL, | |
1460 | .end = -1ULL, | |
1461 | }; | |
1462 | return mhp_range; | |
1463 | } | |
1464 | ||
1465 | struct range mhp_get_pluggable_range(bool need_mapping) | |
1466 | { | |
1467 | const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; | |
1468 | struct range mhp_range; | |
1469 | ||
1470 | if (need_mapping) { | |
1471 | mhp_range = arch_get_mappable_range(); | |
1472 | if (mhp_range.start > max_phys) { | |
1473 | mhp_range.start = 0; | |
1474 | mhp_range.end = 0; | |
1475 | } | |
1476 | mhp_range.end = min_t(u64, mhp_range.end, max_phys); | |
1477 | } else { | |
1478 | mhp_range.start = 0; | |
1479 | mhp_range.end = max_phys; | |
1480 | } | |
1481 | return mhp_range; | |
1482 | } | |
1483 | EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); | |
1484 | ||
1485 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) | |
1486 | { | |
1487 | struct range mhp_range = mhp_get_pluggable_range(need_mapping); | |
1488 | u64 end = start + size; | |
1489 | ||
1490 | if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) | |
1491 | return true; | |
1492 | ||
1493 | pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", | |
1494 | start, end, mhp_range.start, mhp_range.end); | |
1495 | return false; | |
1496 | } | |
1497 | ||
0c0e6195 KH |
1498 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1499 | /* | |
92917998 DH |
1500 | * Confirm all pages in a range [start, end) belong to the same zone (skipping |
1501 | * memory holes). When true, return the zone. | |
0c0e6195 | 1502 | */ |
92917998 DH |
1503 | struct zone *test_pages_in_a_zone(unsigned long start_pfn, |
1504 | unsigned long end_pfn) | |
0c0e6195 | 1505 | { |
5f0f2887 | 1506 | unsigned long pfn, sec_end_pfn; |
0c0e6195 KH |
1507 | struct zone *zone = NULL; |
1508 | struct page *page; | |
673d40c8 | 1509 | |
deb88a2a | 1510 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); |
0c0e6195 | 1511 | pfn < end_pfn; |
deb88a2a | 1512 | pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { |
5f0f2887 AB |
1513 | /* Make sure the memory section is present first */ |
1514 | if (!present_section_nr(pfn_to_section_nr(pfn))) | |
0c0e6195 | 1515 | continue; |
5f0f2887 AB |
1516 | for (; pfn < sec_end_pfn && pfn < end_pfn; |
1517 | pfn += MAX_ORDER_NR_PAGES) { | |
24feb47c | 1518 | /* Check if we got outside of the zone */ |
673d40c8 | 1519 | if (zone && !zone_spans_pfn(zone, pfn)) |
92917998 | 1520 | return NULL; |
673d40c8 | 1521 | page = pfn_to_page(pfn); |
5f0f2887 | 1522 | if (zone && page_zone(page) != zone) |
92917998 | 1523 | return NULL; |
5f0f2887 AB |
1524 | zone = page_zone(page); |
1525 | } | |
0c0e6195 | 1526 | } |
deb88a2a | 1527 | |
92917998 | 1528 | return zone; |
0c0e6195 KH |
1529 | } |
1530 | ||
1531 | /* | |
0efadf48 | 1532 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, |
aa218795 DH |
1533 | * non-lru movable pages and hugepages). Will skip over most unmovable |
1534 | * pages (esp., pages that can be skipped when offlining), but bail out on | |
1535 | * definitely unmovable pages. | |
1536 | * | |
1537 | * Returns: | |
1538 | * 0 in case a movable page is found and movable_pfn was updated. | |
1539 | * -ENOENT in case no movable page was found. | |
1540 | * -EBUSY in case a definitely unmovable page was found. | |
0c0e6195 | 1541 | */ |
aa218795 DH |
1542 | static int scan_movable_pages(unsigned long start, unsigned long end, |
1543 | unsigned long *movable_pfn) | |
0c0e6195 KH |
1544 | { |
1545 | unsigned long pfn; | |
eeb0efd0 | 1546 | |
0c0e6195 | 1547 | for (pfn = start; pfn < end; pfn++) { |
eeb0efd0 OS |
1548 | struct page *page, *head; |
1549 | unsigned long skip; | |
1550 | ||
1551 | if (!pfn_valid(pfn)) | |
1552 | continue; | |
1553 | page = pfn_to_page(pfn); | |
1554 | if (PageLRU(page)) | |
aa218795 | 1555 | goto found; |
eeb0efd0 | 1556 | if (__PageMovable(page)) |
aa218795 DH |
1557 | goto found; |
1558 | ||
1559 | /* | |
1560 | * PageOffline() pages that are not marked __PageMovable() and | |
1561 | * have a reference count > 0 (after MEM_GOING_OFFLINE) are | |
1562 | * definitely unmovable. If their reference count would be 0, | |
1563 | * they could at least be skipped when offlining memory. | |
1564 | */ | |
1565 | if (PageOffline(page) && page_count(page)) | |
1566 | return -EBUSY; | |
eeb0efd0 OS |
1567 | |
1568 | if (!PageHuge(page)) | |
1569 | continue; | |
1570 | head = compound_head(page); | |
8f251a3d MK |
1571 | /* |
1572 | * This test is racy as we hold no reference or lock. The | |
1573 | * hugetlb page could have been free'ed and head is no longer | |
1574 | * a hugetlb page before the following check. In such unlikely | |
1575 | * cases false positives and negatives are possible. Calling | |
1576 | * code must deal with these scenarios. | |
1577 | */ | |
1578 | if (HPageMigratable(head)) | |
aa218795 | 1579 | goto found; |
d8c6546b | 1580 | skip = compound_nr(head) - (page - head); |
eeb0efd0 | 1581 | pfn += skip - 1; |
0c0e6195 | 1582 | } |
aa218795 DH |
1583 | return -ENOENT; |
1584 | found: | |
1585 | *movable_pfn = pfn; | |
0c0e6195 KH |
1586 | return 0; |
1587 | } | |
1588 | ||
0c0e6195 KH |
1589 | static int |
1590 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1591 | { | |
1592 | unsigned long pfn; | |
6c357848 | 1593 | struct page *page, *head; |
0c0e6195 KH |
1594 | int ret = 0; |
1595 | LIST_HEAD(source); | |
786dee86 LM |
1596 | static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, |
1597 | DEFAULT_RATELIMIT_BURST); | |
0c0e6195 | 1598 | |
a85009c3 | 1599 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
0c0e6195 KH |
1600 | if (!pfn_valid(pfn)) |
1601 | continue; | |
1602 | page = pfn_to_page(pfn); | |
6c357848 | 1603 | head = compound_head(page); |
c8721bbb NH |
1604 | |
1605 | if (PageHuge(page)) { | |
d8c6546b | 1606 | pfn = page_to_pfn(head) + compound_nr(head) - 1; |
daf3538a | 1607 | isolate_huge_page(head, &source); |
c8721bbb | 1608 | continue; |
94723aaf | 1609 | } else if (PageTransHuge(page)) |
6c357848 | 1610 | pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; |
c8721bbb | 1611 | |
b15c8726 MH |
1612 | /* |
1613 | * HWPoison pages have elevated reference counts so the migration would | |
1614 | * fail on them. It also doesn't make any sense to migrate them in the | |
1615 | * first place. Still try to unmap such a page in case it is still mapped | |
1616 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | |
1617 | * the unmap as the catch all safety net). | |
1618 | */ | |
1619 | if (PageHWPoison(page)) { | |
1620 | if (WARN_ON(PageLRU(page))) | |
1621 | isolate_lru_page(page); | |
1622 | if (page_mapped(page)) | |
013339df | 1623 | try_to_unmap(page, TTU_IGNORE_MLOCK); |
b15c8726 MH |
1624 | continue; |
1625 | } | |
1626 | ||
700c2a46 | 1627 | if (!get_page_unless_zero(page)) |
0c0e6195 KH |
1628 | continue; |
1629 | /* | |
0efadf48 YX |
1630 | * We can skip free pages. And we can deal with pages on |
1631 | * LRU and non-lru movable pages. | |
0c0e6195 | 1632 | */ |
0efadf48 YX |
1633 | if (PageLRU(page)) |
1634 | ret = isolate_lru_page(page); | |
1635 | else | |
1636 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | |
0c0e6195 | 1637 | if (!ret) { /* Success */ |
62695a84 | 1638 | list_add_tail(&page->lru, &source); |
0efadf48 YX |
1639 | if (!__PageMovable(page)) |
1640 | inc_node_page_state(page, NR_ISOLATED_ANON + | |
9de4f22a | 1641 | page_is_file_lru(page)); |
6d9c285a | 1642 | |
0c0e6195 | 1643 | } else { |
786dee86 LM |
1644 | if (__ratelimit(&migrate_rs)) { |
1645 | pr_warn("failed to isolate pfn %lx\n", pfn); | |
1646 | dump_page(page, "isolation failed"); | |
1647 | } | |
0c0e6195 | 1648 | } |
1723058e | 1649 | put_page(page); |
0c0e6195 | 1650 | } |
f3ab2636 | 1651 | if (!list_empty(&source)) { |
203e6e5c JK |
1652 | nodemask_t nmask = node_states[N_MEMORY]; |
1653 | struct migration_target_control mtc = { | |
1654 | .nmask = &nmask, | |
1655 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, | |
1656 | }; | |
1657 | ||
1658 | /* | |
1659 | * We have checked that migration range is on a single zone so | |
1660 | * we can use the nid of the first page to all the others. | |
1661 | */ | |
1662 | mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); | |
1663 | ||
1664 | /* | |
1665 | * try to allocate from a different node but reuse this node | |
1666 | * if there are no other online nodes to be used (e.g. we are | |
1667 | * offlining a part of the only existing node) | |
1668 | */ | |
1669 | node_clear(mtc.nid, nmask); | |
1670 | if (nodes_empty(nmask)) | |
1671 | node_set(mtc.nid, nmask); | |
1672 | ret = migrate_pages(&source, alloc_migration_target, NULL, | |
1673 | (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | |
2932c8b0 MH |
1674 | if (ret) { |
1675 | list_for_each_entry(page, &source, lru) { | |
786dee86 LM |
1676 | if (__ratelimit(&migrate_rs)) { |
1677 | pr_warn("migrating pfn %lx failed ret:%d\n", | |
1678 | page_to_pfn(page), ret); | |
1679 | dump_page(page, "migration failure"); | |
1680 | } | |
2932c8b0 | 1681 | } |
c8721bbb | 1682 | putback_movable_pages(&source); |
2932c8b0 | 1683 | } |
0c0e6195 | 1684 | } |
1723058e | 1685 | |
0c0e6195 KH |
1686 | return ret; |
1687 | } | |
1688 | ||
c5320926 TC |
1689 | static int __init cmdline_parse_movable_node(char *p) |
1690 | { | |
55ac590c | 1691 | movable_node_enabled = true; |
c5320926 TC |
1692 | return 0; |
1693 | } | |
1694 | early_param("movable_node", cmdline_parse_movable_node); | |
1695 | ||
d9713679 LJ |
1696 | /* check which state of node_states will be changed when offline memory */ |
1697 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1698 | struct zone *zone, struct memory_notify *arg) | |
1699 | { | |
1700 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1701 | unsigned long present_pages = 0; | |
86b27bea | 1702 | enum zone_type zt; |
d9713679 | 1703 | |
98fa15f3 AK |
1704 | arg->status_change_nid = NUMA_NO_NODE; |
1705 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
1706 | arg->status_change_nid_high = NUMA_NO_NODE; | |
d9713679 LJ |
1707 | |
1708 | /* | |
86b27bea OS |
1709 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. |
1710 | * If the memory to be offline is within the range | |
1711 | * [0..ZONE_NORMAL], and it is the last present memory there, | |
1712 | * the zones in that range will become empty after the offlining, | |
1713 | * thus we can determine that we need to clear the node from | |
1714 | * node_states[N_NORMAL_MEMORY]. | |
d9713679 | 1715 | */ |
86b27bea | 1716 | for (zt = 0; zt <= ZONE_NORMAL; zt++) |
d9713679 | 1717 | present_pages += pgdat->node_zones[zt].present_pages; |
86b27bea | 1718 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) |
d9713679 | 1719 | arg->status_change_nid_normal = zone_to_nid(zone); |
d9713679 | 1720 | |
6715ddf9 LJ |
1721 | #ifdef CONFIG_HIGHMEM |
1722 | /* | |
86b27bea OS |
1723 | * node_states[N_HIGH_MEMORY] contains nodes which |
1724 | * have normal memory or high memory. | |
1725 | * Here we add the present_pages belonging to ZONE_HIGHMEM. | |
1726 | * If the zone is within the range of [0..ZONE_HIGHMEM), and | |
1727 | * we determine that the zones in that range become empty, | |
1728 | * we need to clear the node for N_HIGH_MEMORY. | |
6715ddf9 | 1729 | */ |
86b27bea OS |
1730 | present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; |
1731 | if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) | |
6715ddf9 | 1732 | arg->status_change_nid_high = zone_to_nid(zone); |
6715ddf9 LJ |
1733 | #endif |
1734 | ||
d9713679 | 1735 | /* |
86b27bea OS |
1736 | * We have accounted the pages from [0..ZONE_NORMAL), and |
1737 | * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM | |
1738 | * as well. | |
1739 | * Here we count the possible pages from ZONE_MOVABLE. | |
1740 | * If after having accounted all the pages, we see that the nr_pages | |
1741 | * to be offlined is over or equal to the accounted pages, | |
1742 | * we know that the node will become empty, and so, we can clear | |
1743 | * it for N_MEMORY as well. | |
d9713679 | 1744 | */ |
86b27bea | 1745 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; |
d9713679 | 1746 | |
d9713679 LJ |
1747 | if (nr_pages >= present_pages) |
1748 | arg->status_change_nid = zone_to_nid(zone); | |
d9713679 LJ |
1749 | } |
1750 | ||
1751 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1752 | { | |
1753 | if (arg->status_change_nid_normal >= 0) | |
1754 | node_clear_state(node, N_NORMAL_MEMORY); | |
1755 | ||
cf01f6f5 | 1756 | if (arg->status_change_nid_high >= 0) |
d9713679 | 1757 | node_clear_state(node, N_HIGH_MEMORY); |
6715ddf9 | 1758 | |
cf01f6f5 | 1759 | if (arg->status_change_nid >= 0) |
6715ddf9 | 1760 | node_clear_state(node, N_MEMORY); |
d9713679 LJ |
1761 | } |
1762 | ||
c5e79ef5 DH |
1763 | static int count_system_ram_pages_cb(unsigned long start_pfn, |
1764 | unsigned long nr_pages, void *data) | |
1765 | { | |
1766 | unsigned long *nr_system_ram_pages = data; | |
1767 | ||
1768 | *nr_system_ram_pages += nr_pages; | |
1769 | return 0; | |
1770 | } | |
1771 | ||
73a11c96 | 1772 | int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
0c0e6195 | 1773 | { |
73a11c96 | 1774 | const unsigned long end_pfn = start_pfn + nr_pages; |
0a1a9a00 | 1775 | unsigned long pfn, system_ram_pages = 0; |
d702909f | 1776 | unsigned long flags; |
0c0e6195 | 1777 | struct zone *zone; |
7b78d335 | 1778 | struct memory_notify arg; |
ea15153c | 1779 | int ret, node; |
79605093 | 1780 | char *reason; |
0c0e6195 | 1781 | |
dd8e2f23 OS |
1782 | /* |
1783 | * {on,off}lining is constrained to full memory sections (or more | |
041711ce | 1784 | * precisely to memory blocks from the user space POV). |
dd8e2f23 OS |
1785 | * memmap_on_memory is an exception because it reserves initial part |
1786 | * of the physical memory space for vmemmaps. That space is pageblock | |
1787 | * aligned. | |
1788 | */ | |
4986fac1 | 1789 | if (WARN_ON_ONCE(!nr_pages || |
dd8e2f23 OS |
1790 | !IS_ALIGNED(start_pfn, pageblock_nr_pages) || |
1791 | !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) | |
4986fac1 DH |
1792 | return -EINVAL; |
1793 | ||
381eab4a DH |
1794 | mem_hotplug_begin(); |
1795 | ||
c5e79ef5 DH |
1796 | /* |
1797 | * Don't allow to offline memory blocks that contain holes. | |
1798 | * Consequently, memory blocks with holes can never get onlined | |
1799 | * via the hotplug path - online_pages() - as hotplugged memory has | |
1800 | * no holes. This way, we e.g., don't have to worry about marking | |
1801 | * memory holes PG_reserved, don't need pfn_valid() checks, and can | |
1802 | * avoid using walk_system_ram_range() later. | |
1803 | */ | |
73a11c96 | 1804 | walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, |
c5e79ef5 | 1805 | count_system_ram_pages_cb); |
73a11c96 | 1806 | if (system_ram_pages != nr_pages) { |
c5e79ef5 DH |
1807 | ret = -EINVAL; |
1808 | reason = "memory holes"; | |
1809 | goto failed_removal; | |
1810 | } | |
1811 | ||
0c0e6195 KH |
1812 | /* This makes hotplug much easier...and readable. |
1813 | we assume this for now. .*/ | |
92917998 DH |
1814 | zone = test_pages_in_a_zone(start_pfn, end_pfn); |
1815 | if (!zone) { | |
79605093 MH |
1816 | ret = -EINVAL; |
1817 | reason = "multizone range"; | |
1818 | goto failed_removal; | |
381eab4a | 1819 | } |
7b78d335 | 1820 | node = zone_to_nid(zone); |
7b78d335 | 1821 | |
ec6e8c7e VB |
1822 | /* |
1823 | * Disable pcplists so that page isolation cannot race with freeing | |
1824 | * in a way that pages from isolated pageblock are left on pcplists. | |
1825 | */ | |
1826 | zone_pcp_disable(zone); | |
d479960e | 1827 | lru_cache_disable(); |
ec6e8c7e | 1828 | |
0c0e6195 | 1829 | /* set above range as isolated */ |
b023f468 | 1830 | ret = start_isolate_page_range(start_pfn, end_pfn, |
d381c547 | 1831 | MIGRATE_MOVABLE, |
756d25be | 1832 | MEMORY_OFFLINE | REPORT_FAILURE); |
3fa0c7c7 | 1833 | if (ret) { |
79605093 | 1834 | reason = "failure to isolate range"; |
ec6e8c7e | 1835 | goto failed_removal_pcplists_disabled; |
381eab4a | 1836 | } |
7b78d335 YG |
1837 | |
1838 | arg.start_pfn = start_pfn; | |
1839 | arg.nr_pages = nr_pages; | |
d9713679 | 1840 | node_states_check_changes_offline(nr_pages, zone, &arg); |
7b78d335 YG |
1841 | |
1842 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1843 | ret = notifier_to_errno(ret); | |
79605093 MH |
1844 | if (ret) { |
1845 | reason = "notifier failure"; | |
1846 | goto failed_removal_isolated; | |
1847 | } | |
7b78d335 | 1848 | |
bb8965bd | 1849 | do { |
aa218795 DH |
1850 | pfn = start_pfn; |
1851 | do { | |
bb8965bd MH |
1852 | if (signal_pending(current)) { |
1853 | ret = -EINTR; | |
1854 | reason = "signal backoff"; | |
1855 | goto failed_removal_isolated; | |
1856 | } | |
72b39cfc | 1857 | |
bb8965bd | 1858 | cond_resched(); |
bb8965bd | 1859 | |
aa218795 DH |
1860 | ret = scan_movable_pages(pfn, end_pfn, &pfn); |
1861 | if (!ret) { | |
bb8965bd MH |
1862 | /* |
1863 | * TODO: fatal migration failures should bail | |
1864 | * out | |
1865 | */ | |
1866 | do_migrate_range(pfn, end_pfn); | |
1867 | } | |
aa218795 DH |
1868 | } while (!ret); |
1869 | ||
1870 | if (ret != -ENOENT) { | |
1871 | reason = "unmovable page"; | |
1872 | goto failed_removal_isolated; | |
bb8965bd | 1873 | } |
0c0e6195 | 1874 | |
bb8965bd MH |
1875 | /* |
1876 | * Dissolve free hugepages in the memory block before doing | |
1877 | * offlining actually in order to make hugetlbfs's object | |
1878 | * counting consistent. | |
1879 | */ | |
1880 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | |
1881 | if (ret) { | |
1882 | reason = "failure to dissolve huge pages"; | |
1883 | goto failed_removal_isolated; | |
1884 | } | |
0a1a9a00 | 1885 | |
0a1a9a00 | 1886 | ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); |
ec6e8c7e | 1887 | |
5557c766 | 1888 | } while (ret); |
72b39cfc | 1889 | |
0a1a9a00 DH |
1890 | /* Mark all sections offline and remove free pages from the buddy. */ |
1891 | __offline_isolated_pages(start_pfn, end_pfn); | |
7c33023a | 1892 | pr_debug("Offlined Pages %ld\n", nr_pages); |
0a1a9a00 | 1893 | |
9b7ea46a | 1894 | /* |
b30c5927 DH |
1895 | * The memory sections are marked offline, and the pageblock flags |
1896 | * effectively stale; nobody should be touching them. Fixup the number | |
1897 | * of isolated pageblocks, memory onlining will properly revert this. | |
9b7ea46a QC |
1898 | */ |
1899 | spin_lock_irqsave(&zone->lock, flags); | |
ea15153c | 1900 | zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; |
9b7ea46a QC |
1901 | spin_unlock_irqrestore(&zone->lock, flags); |
1902 | ||
d479960e | 1903 | lru_cache_enable(); |
ec6e8c7e VB |
1904 | zone_pcp_enable(zone); |
1905 | ||
0c0e6195 | 1906 | /* removal success */ |
0a1a9a00 | 1907 | adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); |
4b097002 | 1908 | adjust_present_page_count(pfn_to_page(start_pfn), -nr_pages); |
7b78d335 | 1909 | |
b92ca18e | 1910 | /* reinitialise watermarks and update pcp limits */ |
1b79acc9 KM |
1911 | init_per_zone_wmark_min(); |
1912 | ||
1e8537ba | 1913 | if (!populated_zone(zone)) { |
340175b7 | 1914 | zone_pcp_reset(zone); |
72675e13 | 1915 | build_all_zonelists(NULL); |
b92ca18e | 1916 | } |
340175b7 | 1917 | |
d9713679 | 1918 | node_states_clear_node(node, &arg); |
698b1b30 | 1919 | if (arg.status_change_nid >= 0) { |
8fe23e05 | 1920 | kswapd_stop(node); |
698b1b30 VB |
1921 | kcompactd_stop(node); |
1922 | } | |
bce7394a | 1923 | |
0c0e6195 | 1924 | writeback_set_ratelimit(); |
7b78d335 YG |
1925 | |
1926 | memory_notify(MEM_OFFLINE, &arg); | |
feee6b29 | 1927 | remove_pfn_range_from_zone(zone, start_pfn, nr_pages); |
381eab4a | 1928 | mem_hotplug_done(); |
0c0e6195 KH |
1929 | return 0; |
1930 | ||
79605093 MH |
1931 | failed_removal_isolated: |
1932 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
c4efe484 | 1933 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
ec6e8c7e | 1934 | failed_removal_pcplists_disabled: |
946746d1 | 1935 | lru_cache_enable(); |
ec6e8c7e | 1936 | zone_pcp_enable(zone); |
0c0e6195 | 1937 | failed_removal: |
79605093 | 1938 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", |
e33e33b4 | 1939 | (unsigned long long) start_pfn << PAGE_SHIFT, |
79605093 MH |
1940 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
1941 | reason); | |
0c0e6195 | 1942 | /* pushback to free area */ |
381eab4a | 1943 | mem_hotplug_done(); |
0c0e6195 KH |
1944 | return ret; |
1945 | } | |
71088785 | 1946 | |
d6de9d53 | 1947 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) |
bbc76be6 WC |
1948 | { |
1949 | int ret = !is_memblock_offlined(mem); | |
e1c158e4 | 1950 | int *nid = arg; |
bbc76be6 | 1951 | |
e1c158e4 | 1952 | *nid = mem->nid; |
349daa0f RD |
1953 | if (unlikely(ret)) { |
1954 | phys_addr_t beginpa, endpa; | |
1955 | ||
1956 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); | |
b6c88d3b | 1957 | endpa = beginpa + memory_block_size_bytes() - 1; |
756a025f | 1958 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", |
349daa0f | 1959 | &beginpa, &endpa); |
bbc76be6 | 1960 | |
eca499ab PT |
1961 | return -EBUSY; |
1962 | } | |
1963 | return 0; | |
bbc76be6 WC |
1964 | } |
1965 | ||
a08a2ae3 OS |
1966 | static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg) |
1967 | { | |
1968 | /* | |
1969 | * If not set, continue with the next block. | |
1970 | */ | |
1971 | return mem->nr_vmemmap_pages; | |
1972 | } | |
1973 | ||
0f1cfe9d | 1974 | static int check_cpu_on_node(pg_data_t *pgdat) |
60a5a19e | 1975 | { |
60a5a19e TC |
1976 | int cpu; |
1977 | ||
1978 | for_each_present_cpu(cpu) { | |
1979 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1980 | /* | |
1981 | * the cpu on this node isn't removed, and we can't | |
1982 | * offline this node. | |
1983 | */ | |
1984 | return -EBUSY; | |
1985 | } | |
1986 | ||
1987 | return 0; | |
1988 | } | |
1989 | ||
2c91f8fc DH |
1990 | static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) |
1991 | { | |
1992 | int nid = *(int *)arg; | |
1993 | ||
1994 | /* | |
1995 | * If a memory block belongs to multiple nodes, the stored nid is not | |
1996 | * reliable. However, such blocks are always online (e.g., cannot get | |
1997 | * offlined) and, therefore, are still spanned by the node. | |
1998 | */ | |
1999 | return mem->nid == nid ? -EEXIST : 0; | |
2000 | } | |
2001 | ||
0f1cfe9d TK |
2002 | /** |
2003 | * try_offline_node | |
e8b098fc | 2004 | * @nid: the node ID |
0f1cfe9d TK |
2005 | * |
2006 | * Offline a node if all memory sections and cpus of the node are removed. | |
2007 | * | |
2008 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
2009 | * and online/offline operations before this call. | |
2010 | */ | |
90b30cdc | 2011 | void try_offline_node(int nid) |
60a5a19e | 2012 | { |
d822b86a | 2013 | pg_data_t *pgdat = NODE_DATA(nid); |
2c91f8fc | 2014 | int rc; |
60a5a19e | 2015 | |
2c91f8fc DH |
2016 | /* |
2017 | * If the node still spans pages (especially ZONE_DEVICE), don't | |
2018 | * offline it. A node spans memory after move_pfn_range_to_zone(), | |
2019 | * e.g., after the memory block was onlined. | |
2020 | */ | |
2021 | if (pgdat->node_spanned_pages) | |
2022 | return; | |
60a5a19e | 2023 | |
2c91f8fc DH |
2024 | /* |
2025 | * Especially offline memory blocks might not be spanned by the | |
2026 | * node. They will get spanned by the node once they get onlined. | |
2027 | * However, they link to the node in sysfs and can get onlined later. | |
2028 | */ | |
2029 | rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb); | |
2030 | if (rc) | |
60a5a19e | 2031 | return; |
60a5a19e | 2032 | |
46a3679b | 2033 | if (check_cpu_on_node(pgdat)) |
60a5a19e TC |
2034 | return; |
2035 | ||
2036 | /* | |
2037 | * all memory/cpu of this node are removed, we can offline this | |
2038 | * node now. | |
2039 | */ | |
2040 | node_set_offline(nid); | |
2041 | unregister_one_node(nid); | |
2042 | } | |
90b30cdc | 2043 | EXPORT_SYMBOL(try_offline_node); |
60a5a19e | 2044 | |
e1c158e4 | 2045 | static int __ref try_remove_memory(u64 start, u64 size) |
bbc76be6 | 2046 | { |
a08a2ae3 OS |
2047 | struct vmem_altmap mhp_altmap = {}; |
2048 | struct vmem_altmap *altmap = NULL; | |
2049 | unsigned long nr_vmemmap_pages; | |
e1c158e4 | 2050 | int rc = 0, nid = NUMA_NO_NODE; |
993c1aad | 2051 | |
27356f54 TK |
2052 | BUG_ON(check_hotplug_memory_range(start, size)); |
2053 | ||
6677e3ea | 2054 | /* |
242831eb | 2055 | * All memory blocks must be offlined before removing memory. Check |
eca499ab | 2056 | * whether all memory blocks in question are offline and return error |
242831eb | 2057 | * if this is not the case. |
e1c158e4 DH |
2058 | * |
2059 | * While at it, determine the nid. Note that if we'd have mixed nodes, | |
2060 | * we'd only try to offline the last determined one -- which is good | |
2061 | * enough for the cases we care about. | |
6677e3ea | 2062 | */ |
e1c158e4 | 2063 | rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb); |
eca499ab | 2064 | if (rc) |
b4223a51 | 2065 | return rc; |
6677e3ea | 2066 | |
a08a2ae3 OS |
2067 | /* |
2068 | * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in | |
2069 | * the same granularity it was added - a single memory block. | |
2070 | */ | |
2071 | if (memmap_on_memory) { | |
2072 | nr_vmemmap_pages = walk_memory_blocks(start, size, NULL, | |
2073 | get_nr_vmemmap_pages_cb); | |
2074 | if (nr_vmemmap_pages) { | |
2075 | if (size != memory_block_size_bytes()) { | |
2076 | pr_warn("Refuse to remove %#llx - %#llx," | |
2077 | "wrong granularity\n", | |
2078 | start, start + size); | |
2079 | return -EINVAL; | |
2080 | } | |
2081 | ||
2082 | /* | |
2083 | * Let remove_pmd_table->free_hugepage_table do the | |
2084 | * right thing if we used vmem_altmap when hot-adding | |
2085 | * the range. | |
2086 | */ | |
2087 | mhp_altmap.alloc = nr_vmemmap_pages; | |
2088 | altmap = &mhp_altmap; | |
2089 | } | |
2090 | } | |
2091 | ||
46c66c4b YI |
2092 | /* remove memmap entry */ |
2093 | firmware_map_remove(start, start + size, "System RAM"); | |
4c4b7f9b | 2094 | |
f1037ec0 DW |
2095 | /* |
2096 | * Memory block device removal under the device_hotplug_lock is | |
2097 | * a barrier against racing online attempts. | |
2098 | */ | |
4c4b7f9b | 2099 | remove_memory_block_devices(start, size); |
46c66c4b | 2100 | |
f1037ec0 DW |
2101 | mem_hotplug_begin(); |
2102 | ||
65a2aa5f | 2103 | arch_remove_memory(start, size, altmap); |
52219aea DH |
2104 | |
2105 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { | |
2106 | memblock_free(start, size); | |
2107 | memblock_remove(start, size); | |
2108 | } | |
2109 | ||
cb8e3c8b | 2110 | release_mem_region_adjustable(start, size); |
24d335ca | 2111 | |
e1c158e4 DH |
2112 | if (nid != NUMA_NO_NODE) |
2113 | try_offline_node(nid); | |
60a5a19e | 2114 | |
bfc8c901 | 2115 | mem_hotplug_done(); |
b4223a51 | 2116 | return 0; |
71088785 | 2117 | } |
d15e5926 | 2118 | |
eca499ab | 2119 | /** |
5640c9ca | 2120 | * __remove_memory - Remove memory if every memory block is offline |
eca499ab PT |
2121 | * @start: physical address of the region to remove |
2122 | * @size: size of the region to remove | |
2123 | * | |
2124 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
2125 | * and online/offline operations before this call, as required by | |
2126 | * try_offline_node(). | |
2127 | */ | |
e1c158e4 | 2128 | void __remove_memory(u64 start, u64 size) |
eca499ab PT |
2129 | { |
2130 | ||
2131 | /* | |
29a90db9 | 2132 | * trigger BUG() if some memory is not offlined prior to calling this |
eca499ab PT |
2133 | * function |
2134 | */ | |
e1c158e4 | 2135 | if (try_remove_memory(start, size)) |
eca499ab PT |
2136 | BUG(); |
2137 | } | |
2138 | ||
2139 | /* | |
2140 | * Remove memory if every memory block is offline, otherwise return -EBUSY is | |
2141 | * some memory is not offline | |
2142 | */ | |
e1c158e4 | 2143 | int remove_memory(u64 start, u64 size) |
d15e5926 | 2144 | { |
eca499ab PT |
2145 | int rc; |
2146 | ||
d15e5926 | 2147 | lock_device_hotplug(); |
e1c158e4 | 2148 | rc = try_remove_memory(start, size); |
d15e5926 | 2149 | unlock_device_hotplug(); |
eca499ab PT |
2150 | |
2151 | return rc; | |
d15e5926 | 2152 | } |
71088785 | 2153 | EXPORT_SYMBOL_GPL(remove_memory); |
08b3acd7 | 2154 | |
8dc4bb58 DH |
2155 | static int try_offline_memory_block(struct memory_block *mem, void *arg) |
2156 | { | |
2157 | uint8_t online_type = MMOP_ONLINE_KERNEL; | |
2158 | uint8_t **online_types = arg; | |
2159 | struct page *page; | |
2160 | int rc; | |
2161 | ||
2162 | /* | |
2163 | * Sense the online_type via the zone of the memory block. Offlining | |
2164 | * with multiple zones within one memory block will be rejected | |
2165 | * by offlining code ... so we don't care about that. | |
2166 | */ | |
2167 | page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr)); | |
2168 | if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) | |
2169 | online_type = MMOP_ONLINE_MOVABLE; | |
2170 | ||
2171 | rc = device_offline(&mem->dev); | |
2172 | /* | |
2173 | * Default is MMOP_OFFLINE - change it only if offlining succeeded, | |
2174 | * so try_reonline_memory_block() can do the right thing. | |
2175 | */ | |
2176 | if (!rc) | |
2177 | **online_types = online_type; | |
2178 | ||
2179 | (*online_types)++; | |
2180 | /* Ignore if already offline. */ | |
2181 | return rc < 0 ? rc : 0; | |
2182 | } | |
2183 | ||
2184 | static int try_reonline_memory_block(struct memory_block *mem, void *arg) | |
2185 | { | |
2186 | uint8_t **online_types = arg; | |
2187 | int rc; | |
2188 | ||
2189 | if (**online_types != MMOP_OFFLINE) { | |
2190 | mem->online_type = **online_types; | |
2191 | rc = device_online(&mem->dev); | |
2192 | if (rc < 0) | |
2193 | pr_warn("%s: Failed to re-online memory: %d", | |
2194 | __func__, rc); | |
2195 | } | |
2196 | ||
2197 | /* Continue processing all remaining memory blocks. */ | |
2198 | (*online_types)++; | |
2199 | return 0; | |
2200 | } | |
2201 | ||
08b3acd7 | 2202 | /* |
8dc4bb58 DH |
2203 | * Try to offline and remove memory. Might take a long time to finish in case |
2204 | * memory is still in use. Primarily useful for memory devices that logically | |
2205 | * unplugged all memory (so it's no longer in use) and want to offline + remove | |
2206 | * that memory. | |
08b3acd7 | 2207 | */ |
e1c158e4 | 2208 | int offline_and_remove_memory(u64 start, u64 size) |
08b3acd7 | 2209 | { |
8dc4bb58 DH |
2210 | const unsigned long mb_count = size / memory_block_size_bytes(); |
2211 | uint8_t *online_types, *tmp; | |
2212 | int rc; | |
08b3acd7 DH |
2213 | |
2214 | if (!IS_ALIGNED(start, memory_block_size_bytes()) || | |
8dc4bb58 DH |
2215 | !IS_ALIGNED(size, memory_block_size_bytes()) || !size) |
2216 | return -EINVAL; | |
2217 | ||
2218 | /* | |
2219 | * We'll remember the old online type of each memory block, so we can | |
2220 | * try to revert whatever we did when offlining one memory block fails | |
2221 | * after offlining some others succeeded. | |
2222 | */ | |
2223 | online_types = kmalloc_array(mb_count, sizeof(*online_types), | |
2224 | GFP_KERNEL); | |
2225 | if (!online_types) | |
2226 | return -ENOMEM; | |
2227 | /* | |
2228 | * Initialize all states to MMOP_OFFLINE, so when we abort processing in | |
2229 | * try_offline_memory_block(), we'll skip all unprocessed blocks in | |
2230 | * try_reonline_memory_block(). | |
2231 | */ | |
2232 | memset(online_types, MMOP_OFFLINE, mb_count); | |
08b3acd7 DH |
2233 | |
2234 | lock_device_hotplug(); | |
8dc4bb58 DH |
2235 | |
2236 | tmp = online_types; | |
2237 | rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); | |
08b3acd7 DH |
2238 | |
2239 | /* | |
8dc4bb58 | 2240 | * In case we succeeded to offline all memory, remove it. |
08b3acd7 DH |
2241 | * This cannot fail as it cannot get onlined in the meantime. |
2242 | */ | |
2243 | if (!rc) { | |
e1c158e4 | 2244 | rc = try_remove_memory(start, size); |
8dc4bb58 DH |
2245 | if (rc) |
2246 | pr_err("%s: Failed to remove memory: %d", __func__, rc); | |
2247 | } | |
2248 | ||
2249 | /* | |
2250 | * Rollback what we did. While memory onlining might theoretically fail | |
2251 | * (nacked by a notifier), it barely ever happens. | |
2252 | */ | |
2253 | if (rc) { | |
2254 | tmp = online_types; | |
2255 | walk_memory_blocks(start, size, &tmp, | |
2256 | try_reonline_memory_block); | |
08b3acd7 DH |
2257 | } |
2258 | unlock_device_hotplug(); | |
2259 | ||
8dc4bb58 | 2260 | kfree(online_types); |
08b3acd7 DH |
2261 | return rc; |
2262 | } | |
2263 | EXPORT_SYMBOL_GPL(offline_and_remove_memory); | |
aba6efc4 | 2264 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |