Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3947be19 DH |
2 | /* |
3 | * linux/mm/memory_hotplug.c | |
4 | * | |
5 | * Copyright (C) | |
6 | */ | |
7 | ||
3947be19 DH |
8 | #include <linux/stddef.h> |
9 | #include <linux/mm.h> | |
174cd4b1 | 10 | #include <linux/sched/signal.h> |
3947be19 DH |
11 | #include <linux/swap.h> |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/pagemap.h> | |
3947be19 | 14 | #include <linux/compiler.h> |
b95f1b31 | 15 | #include <linux/export.h> |
2d1d43f6 | 16 | #include <linux/writeback.h> |
3947be19 DH |
17 | #include <linux/slab.h> |
18 | #include <linux/sysctl.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/memory.h> | |
4b94ffdc | 21 | #include <linux/memremap.h> |
3947be19 | 22 | #include <linux/memory_hotplug.h> |
3947be19 | 23 | #include <linux/vmalloc.h> |
0a547039 | 24 | #include <linux/ioport.h> |
0c0e6195 KH |
25 | #include <linux/delay.h> |
26 | #include <linux/migrate.h> | |
27 | #include <linux/page-isolation.h> | |
71088785 | 28 | #include <linux/pfn.h> |
6ad696d2 | 29 | #include <linux/suspend.h> |
6d9c285a | 30 | #include <linux/mm_inline.h> |
d96ae530 | 31 | #include <linux/firmware-map.h> |
60a5a19e | 32 | #include <linux/stop_machine.h> |
c8721bbb | 33 | #include <linux/hugetlb.h> |
c5320926 | 34 | #include <linux/memblock.h> |
698b1b30 | 35 | #include <linux/compaction.h> |
b15c8726 | 36 | #include <linux/rmap.h> |
8581fd40 | 37 | #include <linux/module.h> |
3947be19 DH |
38 | |
39 | #include <asm/tlbflush.h> | |
40 | ||
1e5ad9a3 | 41 | #include "internal.h" |
e900a918 | 42 | #include "shuffle.h" |
1e5ad9a3 | 43 | |
2d1f649c AK |
44 | enum { |
45 | MEMMAP_ON_MEMORY_DISABLE = 0, | |
46 | MEMMAP_ON_MEMORY_ENABLE, | |
47 | MEMMAP_ON_MEMORY_FORCE, | |
48 | }; | |
49 | ||
50 | static int memmap_mode __read_mostly = MEMMAP_ON_MEMORY_DISABLE; | |
51 | ||
52 | static inline unsigned long memory_block_memmap_size(void) | |
53 | { | |
54 | return PHYS_PFN(memory_block_size_bytes()) * sizeof(struct page); | |
55 | } | |
56 | ||
57 | static inline unsigned long memory_block_memmap_on_memory_pages(void) | |
58 | { | |
59 | unsigned long nr_pages = PFN_UP(memory_block_memmap_size()); | |
60 | ||
61 | /* | |
62 | * In "forced" memmap_on_memory mode, we add extra pages to align the | |
63 | * vmemmap size to cover full pageblocks. That way, we can add memory | |
64 | * even if the vmemmap size is not properly aligned, however, we might waste | |
65 | * memory. | |
66 | */ | |
67 | if (memmap_mode == MEMMAP_ON_MEMORY_FORCE) | |
68 | return pageblock_align(nr_pages); | |
69 | return nr_pages; | |
70 | } | |
71 | ||
6e02c46b | 72 | #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY |
e3a9d9fc OS |
73 | /* |
74 | * memory_hotplug.memmap_on_memory parameter | |
75 | */ | |
2d1f649c AK |
76 | static int set_memmap_mode(const char *val, const struct kernel_param *kp) |
77 | { | |
78 | int ret, mode; | |
79 | bool enabled; | |
80 | ||
81 | if (sysfs_streq(val, "force") || sysfs_streq(val, "FORCE")) { | |
82 | mode = MEMMAP_ON_MEMORY_FORCE; | |
83 | } else { | |
84 | ret = kstrtobool(val, &enabled); | |
85 | if (ret < 0) | |
86 | return ret; | |
87 | if (enabled) | |
88 | mode = MEMMAP_ON_MEMORY_ENABLE; | |
89 | else | |
90 | mode = MEMMAP_ON_MEMORY_DISABLE; | |
91 | } | |
92 | *((int *)kp->arg) = mode; | |
93 | if (mode == MEMMAP_ON_MEMORY_FORCE) { | |
94 | unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); | |
95 | ||
96 | pr_info_once("Memory hotplug will waste %ld pages in each memory block\n", | |
97 | memmap_pages - PFN_UP(memory_block_memmap_size())); | |
98 | } | |
99 | return 0; | |
100 | } | |
101 | ||
102 | static int get_memmap_mode(char *buffer, const struct kernel_param *kp) | |
103 | { | |
11684134 SK |
104 | int mode = *((int *)kp->arg); |
105 | ||
106 | if (mode == MEMMAP_ON_MEMORY_FORCE) | |
107 | return sprintf(buffer, "force\n"); | |
108 | return sprintf(buffer, "%c\n", mode ? 'Y' : 'N'); | |
2d1f649c AK |
109 | } |
110 | ||
111 | static const struct kernel_param_ops memmap_mode_ops = { | |
112 | .set = set_memmap_mode, | |
113 | .get = get_memmap_mode, | |
114 | }; | |
115 | module_param_cb(memmap_on_memory, &memmap_mode_ops, &memmap_mode, 0444); | |
116 | MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug\n" | |
117 | "With value \"force\" it could result in memory wastage due " | |
118 | "to memmap size limitations (Y/N/force)"); | |
6e02c46b | 119 | |
66361095 | 120 | static inline bool mhp_memmap_on_memory(void) |
6e02c46b | 121 | { |
2d1f649c | 122 | return memmap_mode != MEMMAP_ON_MEMORY_DISABLE; |
6e02c46b | 123 | } |
66361095 MS |
124 | #else |
125 | static inline bool mhp_memmap_on_memory(void) | |
126 | { | |
127 | return false; | |
128 | } | |
e3a9d9fc | 129 | #endif |
a08a2ae3 | 130 | |
e83a437f DH |
131 | enum { |
132 | ONLINE_POLICY_CONTIG_ZONES = 0, | |
133 | ONLINE_POLICY_AUTO_MOVABLE, | |
134 | }; | |
135 | ||
ac62554b | 136 | static const char * const online_policy_to_str[] = { |
e83a437f DH |
137 | [ONLINE_POLICY_CONTIG_ZONES] = "contig-zones", |
138 | [ONLINE_POLICY_AUTO_MOVABLE] = "auto-movable", | |
139 | }; | |
140 | ||
141 | static int set_online_policy(const char *val, const struct kernel_param *kp) | |
142 | { | |
143 | int ret = sysfs_match_string(online_policy_to_str, val); | |
144 | ||
145 | if (ret < 0) | |
146 | return ret; | |
147 | *((int *)kp->arg) = ret; | |
148 | return 0; | |
149 | } | |
150 | ||
151 | static int get_online_policy(char *buffer, const struct kernel_param *kp) | |
152 | { | |
153 | return sprintf(buffer, "%s\n", online_policy_to_str[*((int *)kp->arg)]); | |
154 | } | |
155 | ||
156 | /* | |
157 | * memory_hotplug.online_policy: configure online behavior when onlining without | |
158 | * specifying a zone (MMOP_ONLINE) | |
159 | * | |
160 | * "contig-zones": keep zone contiguous | |
161 | * "auto-movable": online memory to ZONE_MOVABLE if the configuration | |
162 | * (auto_movable_ratio, auto_movable_numa_aware) allows for it | |
163 | */ | |
164 | static int online_policy __read_mostly = ONLINE_POLICY_CONTIG_ZONES; | |
165 | static const struct kernel_param_ops online_policy_ops = { | |
166 | .set = set_online_policy, | |
167 | .get = get_online_policy, | |
168 | }; | |
169 | module_param_cb(online_policy, &online_policy_ops, &online_policy, 0644); | |
170 | MODULE_PARM_DESC(online_policy, | |
171 | "Set the online policy (\"contig-zones\", \"auto-movable\") " | |
172 | "Default: \"contig-zones\""); | |
173 | ||
174 | /* | |
175 | * memory_hotplug.auto_movable_ratio: specify maximum MOVABLE:KERNEL ratio | |
176 | * | |
177 | * The ratio represent an upper limit and the kernel might decide to not | |
178 | * online some memory to ZONE_MOVABLE -- e.g., because hotplugged KERNEL memory | |
179 | * doesn't allow for more MOVABLE memory. | |
180 | */ | |
181 | static unsigned int auto_movable_ratio __read_mostly = 301; | |
182 | module_param(auto_movable_ratio, uint, 0644); | |
183 | MODULE_PARM_DESC(auto_movable_ratio, | |
184 | "Set the maximum ratio of MOVABLE:KERNEL memory in the system " | |
185 | "in percent for \"auto-movable\" online policy. Default: 301"); | |
186 | ||
187 | /* | |
188 | * memory_hotplug.auto_movable_numa_aware: consider numa node stats | |
189 | */ | |
190 | #ifdef CONFIG_NUMA | |
191 | static bool auto_movable_numa_aware __read_mostly = true; | |
192 | module_param(auto_movable_numa_aware, bool, 0644); | |
193 | MODULE_PARM_DESC(auto_movable_numa_aware, | |
194 | "Consider numa node stats in addition to global stats in " | |
195 | "\"auto-movable\" online policy. Default: true"); | |
196 | #endif /* CONFIG_NUMA */ | |
197 | ||
9d0ad8ca DK |
198 | /* |
199 | * online_page_callback contains pointer to current page onlining function. | |
200 | * Initially it is generic_online_page(). If it is required it could be | |
201 | * changed by calling set_online_page_callback() for callback registration | |
202 | * and restore_online_page_callback() for generic callback restore. | |
203 | */ | |
204 | ||
9d0ad8ca | 205 | static online_page_callback_t online_page_callback = generic_online_page; |
bfc8c901 | 206 | static DEFINE_MUTEX(online_page_callback_lock); |
9d0ad8ca | 207 | |
3f906ba2 | 208 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); |
bfc8c901 | 209 | |
3f906ba2 TG |
210 | void get_online_mems(void) |
211 | { | |
212 | percpu_down_read(&mem_hotplug_lock); | |
213 | } | |
bfc8c901 | 214 | |
3f906ba2 TG |
215 | void put_online_mems(void) |
216 | { | |
217 | percpu_up_read(&mem_hotplug_lock); | |
218 | } | |
bfc8c901 | 219 | |
4932381e MH |
220 | bool movable_node_enabled = false; |
221 | ||
8604d9e5 | 222 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE |
1adf8b46 | 223 | int mhp_default_online_type = MMOP_OFFLINE; |
8604d9e5 | 224 | #else |
1adf8b46 | 225 | int mhp_default_online_type = MMOP_ONLINE; |
8604d9e5 | 226 | #endif |
31bc3858 | 227 | |
86dd995d VK |
228 | static int __init setup_memhp_default_state(char *str) |
229 | { | |
1adf8b46 | 230 | const int online_type = mhp_online_type_from_str(str); |
5f47adf7 DH |
231 | |
232 | if (online_type >= 0) | |
1adf8b46 | 233 | mhp_default_online_type = online_type; |
86dd995d VK |
234 | |
235 | return 1; | |
236 | } | |
237 | __setup("memhp_default_state=", setup_memhp_default_state); | |
238 | ||
30467e0b | 239 | void mem_hotplug_begin(void) |
20d6c96b | 240 | { |
3f906ba2 TG |
241 | cpus_read_lock(); |
242 | percpu_down_write(&mem_hotplug_lock); | |
20d6c96b KM |
243 | } |
244 | ||
30467e0b | 245 | void mem_hotplug_done(void) |
bfc8c901 | 246 | { |
3f906ba2 TG |
247 | percpu_up_write(&mem_hotplug_lock); |
248 | cpus_read_unlock(); | |
bfc8c901 | 249 | } |
20d6c96b | 250 | |
357b4da5 JG |
251 | u64 max_mem_size = U64_MAX; |
252 | ||
45e0b78b | 253 | /* add this memory to iomem resource */ |
7b7b2721 DH |
254 | static struct resource *register_memory_resource(u64 start, u64 size, |
255 | const char *resource_name) | |
45e0b78b | 256 | { |
2794129e DH |
257 | struct resource *res; |
258 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
7b7b2721 DH |
259 | |
260 | if (strcmp(resource_name, "System RAM")) | |
7cf603d1 | 261 | flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; |
357b4da5 | 262 | |
bca3feaa AK |
263 | if (!mhp_range_allowed(start, size, true)) |
264 | return ERR_PTR(-E2BIG); | |
265 | ||
f3cd4c86 BH |
266 | /* |
267 | * Make sure value parsed from 'mem=' only restricts memory adding | |
268 | * while booting, so that memory hotplug won't be impacted. Please | |
269 | * refer to document of 'mem=' in kernel-parameters.txt for more | |
270 | * details. | |
271 | */ | |
272 | if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) | |
357b4da5 JG |
273 | return ERR_PTR(-E2BIG); |
274 | ||
2794129e DH |
275 | /* |
276 | * Request ownership of the new memory range. This might be | |
277 | * a child of an existing resource that was present but | |
278 | * not marked as busy. | |
279 | */ | |
280 | res = __request_region(&iomem_resource, start, size, | |
281 | resource_name, flags); | |
282 | ||
283 | if (!res) { | |
284 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", | |
285 | start, start + size); | |
6f754ba4 | 286 | return ERR_PTR(-EEXIST); |
45e0b78b KM |
287 | } |
288 | return res; | |
289 | } | |
290 | ||
291 | static void release_memory_resource(struct resource *res) | |
292 | { | |
293 | if (!res) | |
294 | return; | |
295 | release_resource(res); | |
296 | kfree(res); | |
45e0b78b KM |
297 | } |
298 | ||
943189db | 299 | static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) |
7ea62160 DW |
300 | { |
301 | /* | |
302 | * Disallow all operations smaller than a sub-section and only | |
303 | * allow operations smaller than a section for | |
304 | * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() | |
305 | * enforces a larger memory_block_size_bytes() granularity for | |
306 | * memory that will be marked online, so this check should only | |
307 | * fire for direct arch_{add,remove}_memory() users outside of | |
308 | * add_memory_resource(). | |
309 | */ | |
310 | unsigned long min_align; | |
311 | ||
312 | if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) | |
313 | min_align = PAGES_PER_SUBSECTION; | |
314 | else | |
315 | min_align = PAGES_PER_SECTION; | |
943189db | 316 | if (!IS_ALIGNED(pfn | nr_pages, min_align)) |
7ea62160 | 317 | return -EINVAL; |
7ea62160 DW |
318 | return 0; |
319 | } | |
320 | ||
9f605f26 DW |
321 | /* |
322 | * Return page for the valid pfn only if the page is online. All pfn | |
323 | * walkers which rely on the fully initialized page->flags and others | |
324 | * should use this rather than pfn_valid && pfn_to_page | |
325 | */ | |
326 | struct page *pfn_to_online_page(unsigned long pfn) | |
327 | { | |
328 | unsigned long nr = pfn_to_section_nr(pfn); | |
1f90a347 | 329 | struct dev_pagemap *pgmap; |
9f9b02e5 DW |
330 | struct mem_section *ms; |
331 | ||
332 | if (nr >= NR_MEM_SECTIONS) | |
333 | return NULL; | |
334 | ||
335 | ms = __nr_to_section(nr); | |
336 | if (!online_section(ms)) | |
337 | return NULL; | |
338 | ||
339 | /* | |
340 | * Save some code text when online_section() + | |
341 | * pfn_section_valid() are sufficient. | |
342 | */ | |
343 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) | |
344 | return NULL; | |
345 | ||
346 | if (!pfn_section_valid(ms, pfn)) | |
347 | return NULL; | |
9f605f26 | 348 | |
1f90a347 DW |
349 | if (!online_device_section(ms)) |
350 | return pfn_to_page(pfn); | |
351 | ||
352 | /* | |
353 | * Slowpath: when ZONE_DEVICE collides with | |
354 | * ZONE_{NORMAL,MOVABLE} within the same section some pfns in | |
355 | * the section may be 'offline' but 'valid'. Only | |
356 | * get_dev_pagemap() can determine sub-section online status. | |
357 | */ | |
358 | pgmap = get_dev_pagemap(pfn, NULL); | |
359 | put_dev_pagemap(pgmap); | |
360 | ||
361 | /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ | |
362 | if (pgmap) | |
363 | return NULL; | |
364 | ||
9f9b02e5 | 365 | return pfn_to_page(pfn); |
9f605f26 DW |
366 | } |
367 | EXPORT_SYMBOL_GPL(pfn_to_online_page); | |
368 | ||
7ea62160 | 369 | int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, |
f5637d3b | 370 | struct mhp_params *params) |
4edd7cef | 371 | { |
6cdd0b30 DH |
372 | const unsigned long end_pfn = pfn + nr_pages; |
373 | unsigned long cur_nr_pages; | |
9a845030 | 374 | int err; |
f5637d3b | 375 | struct vmem_altmap *altmap = params->altmap; |
4b94ffdc | 376 | |
6366238b | 377 | if (WARN_ON_ONCE(!pgprot_val(params->pgprot))) |
bfeb022f LG |
378 | return -EINVAL; |
379 | ||
bca3feaa | 380 | VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); |
dca4436d | 381 | |
4b94ffdc DW |
382 | if (altmap) { |
383 | /* | |
384 | * Validate altmap is within bounds of the total request | |
385 | */ | |
7ea62160 | 386 | if (altmap->base_pfn != pfn |
4b94ffdc DW |
387 | || vmem_altmap_offset(altmap) > nr_pages) { |
388 | pr_warn_once("memory add fail, invalid altmap\n"); | |
7ea62160 | 389 | return -EINVAL; |
4b94ffdc DW |
390 | } |
391 | altmap->alloc = 0; | |
392 | } | |
393 | ||
943189db | 394 | if (check_pfn_span(pfn, nr_pages)) { |
50135045 | 395 | WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); |
943189db AK |
396 | return -EINVAL; |
397 | } | |
7ea62160 | 398 | |
6cdd0b30 DH |
399 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
400 | /* Select all remaining pages up to the next section boundary */ | |
401 | cur_nr_pages = min(end_pfn - pfn, | |
402 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
e3246d8f JM |
403 | err = sparse_add_section(nid, pfn, cur_nr_pages, altmap, |
404 | params->pgmap); | |
ba72b4c8 DW |
405 | if (err) |
406 | break; | |
f64ac5e6 | 407 | cond_resched(); |
4edd7cef | 408 | } |
c435a390 | 409 | vmemmap_populate_print_last(); |
4edd7cef DR |
410 | return err; |
411 | } | |
4edd7cef | 412 | |
815121d2 | 413 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
d09b0137 | 414 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
415 | unsigned long start_pfn, |
416 | unsigned long end_pfn) | |
417 | { | |
49ba3c6b | 418 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { |
7ce700bf | 419 | if (unlikely(!pfn_to_online_page(start_pfn))) |
815121d2 YI |
420 | continue; |
421 | ||
422 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
423 | continue; | |
424 | ||
9b05158f | 425 | if (zone != page_zone(pfn_to_page(start_pfn))) |
815121d2 YI |
426 | continue; |
427 | ||
428 | return start_pfn; | |
429 | } | |
430 | ||
431 | return 0; | |
432 | } | |
433 | ||
434 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
d09b0137 | 435 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
436 | unsigned long start_pfn, |
437 | unsigned long end_pfn) | |
438 | { | |
815121d2 YI |
439 | unsigned long pfn; |
440 | ||
441 | /* pfn is the end pfn of a memory section. */ | |
442 | pfn = end_pfn - 1; | |
49ba3c6b | 443 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { |
7ce700bf | 444 | if (unlikely(!pfn_to_online_page(pfn))) |
815121d2 YI |
445 | continue; |
446 | ||
447 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
448 | continue; | |
449 | ||
9b05158f | 450 | if (zone != page_zone(pfn_to_page(pfn))) |
815121d2 YI |
451 | continue; |
452 | ||
453 | return pfn; | |
454 | } | |
455 | ||
456 | return 0; | |
457 | } | |
458 | ||
459 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
460 | unsigned long end_pfn) | |
461 | { | |
815121d2 | 462 | unsigned long pfn; |
815121d2 YI |
463 | int nid = zone_to_nid(zone); |
464 | ||
5d12071c | 465 | if (zone->zone_start_pfn == start_pfn) { |
815121d2 YI |
466 | /* |
467 | * If the section is smallest section in the zone, it need | |
468 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
469 | * In this case, we find second smallest valid mem_section | |
470 | * for shrinking zone. | |
471 | */ | |
472 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
5d12071c | 473 | zone_end_pfn(zone)); |
815121d2 | 474 | if (pfn) { |
5d12071c | 475 | zone->spanned_pages = zone_end_pfn(zone) - pfn; |
815121d2 | 476 | zone->zone_start_pfn = pfn; |
950b68d9 DH |
477 | } else { |
478 | zone->zone_start_pfn = 0; | |
479 | zone->spanned_pages = 0; | |
815121d2 | 480 | } |
5d12071c | 481 | } else if (zone_end_pfn(zone) == end_pfn) { |
815121d2 YI |
482 | /* |
483 | * If the section is biggest section in the zone, it need | |
484 | * shrink zone->spanned_pages. | |
485 | * In this case, we find second biggest valid mem_section for | |
486 | * shrinking zone. | |
487 | */ | |
5d12071c | 488 | pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, |
815121d2 YI |
489 | start_pfn); |
490 | if (pfn) | |
5d12071c | 491 | zone->spanned_pages = pfn - zone->zone_start_pfn + 1; |
950b68d9 DH |
492 | else { |
493 | zone->zone_start_pfn = 0; | |
494 | zone->spanned_pages = 0; | |
495 | } | |
815121d2 | 496 | } |
815121d2 YI |
497 | } |
498 | ||
00d6c019 | 499 | static void update_pgdat_span(struct pglist_data *pgdat) |
815121d2 | 500 | { |
00d6c019 DH |
501 | unsigned long node_start_pfn = 0, node_end_pfn = 0; |
502 | struct zone *zone; | |
503 | ||
504 | for (zone = pgdat->node_zones; | |
505 | zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { | |
6c922cf7 | 506 | unsigned long end_pfn = zone_end_pfn(zone); |
00d6c019 DH |
507 | |
508 | /* No need to lock the zones, they can't change. */ | |
656d5711 DH |
509 | if (!zone->spanned_pages) |
510 | continue; | |
511 | if (!node_end_pfn) { | |
512 | node_start_pfn = zone->zone_start_pfn; | |
6c922cf7 | 513 | node_end_pfn = end_pfn; |
656d5711 DH |
514 | continue; |
515 | } | |
516 | ||
6c922cf7 ML |
517 | if (end_pfn > node_end_pfn) |
518 | node_end_pfn = end_pfn; | |
00d6c019 DH |
519 | if (zone->zone_start_pfn < node_start_pfn) |
520 | node_start_pfn = zone->zone_start_pfn; | |
815121d2 YI |
521 | } |
522 | ||
00d6c019 DH |
523 | pgdat->node_start_pfn = node_start_pfn; |
524 | pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; | |
815121d2 YI |
525 | } |
526 | ||
feee6b29 DH |
527 | void __ref remove_pfn_range_from_zone(struct zone *zone, |
528 | unsigned long start_pfn, | |
529 | unsigned long nr_pages) | |
815121d2 | 530 | { |
b7e3debd | 531 | const unsigned long end_pfn = start_pfn + nr_pages; |
815121d2 | 532 | struct pglist_data *pgdat = zone->zone_pgdat; |
27cacaad | 533 | unsigned long pfn, cur_nr_pages; |
815121d2 | 534 | |
d33695b1 | 535 | /* Poison struct pages because they are now uninitialized again. */ |
b7e3debd BW |
536 | for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { |
537 | cond_resched(); | |
538 | ||
539 | /* Select all remaining pages up to the next section boundary */ | |
540 | cur_nr_pages = | |
541 | min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); | |
542 | page_init_poison(pfn_to_page(pfn), | |
543 | sizeof(struct page) * cur_nr_pages); | |
544 | } | |
d33695b1 | 545 | |
7ce700bf DH |
546 | /* |
547 | * Zone shrinking code cannot properly deal with ZONE_DEVICE. So | |
548 | * we will not try to shrink the zones - which is okay as | |
549 | * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. | |
550 | */ | |
5ef5f810 | 551 | if (zone_is_zone_device(zone)) |
7ce700bf | 552 | return; |
7ce700bf | 553 | |
feee6b29 DH |
554 | clear_zone_contiguous(zone); |
555 | ||
815121d2 | 556 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); |
00d6c019 | 557 | update_pgdat_span(pgdat); |
feee6b29 DH |
558 | |
559 | set_zone_contiguous(zone); | |
815121d2 YI |
560 | } |
561 | ||
ea01ea93 | 562 | /** |
feee6b29 | 563 | * __remove_pages() - remove sections of pages |
7ea62160 | 564 | * @pfn: starting pageframe (must be aligned to start of a section) |
ea01ea93 | 565 | * @nr_pages: number of pages to remove (must be multiple of section size) |
e8b098fc | 566 | * @altmap: alternative device page map or %NULL if default memmap is used |
ea01ea93 BP |
567 | * |
568 | * Generic helper function to remove section mappings and sysfs entries | |
569 | * for the section of the memory we are removing. Caller needs to make | |
570 | * sure that pages are marked reserved and zones are adjust properly by | |
571 | * calling offline_pages(). | |
572 | */ | |
feee6b29 DH |
573 | void __remove_pages(unsigned long pfn, unsigned long nr_pages, |
574 | struct vmem_altmap *altmap) | |
ea01ea93 | 575 | { |
52fb87c8 DH |
576 | const unsigned long end_pfn = pfn + nr_pages; |
577 | unsigned long cur_nr_pages; | |
ea01ea93 | 578 | |
943189db | 579 | if (check_pfn_span(pfn, nr_pages)) { |
50135045 | 580 | WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); |
7ea62160 | 581 | return; |
943189db | 582 | } |
ea01ea93 | 583 | |
52fb87c8 | 584 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
dd33ad7b | 585 | cond_resched(); |
52fb87c8 | 586 | /* Select all remaining pages up to the next section boundary */ |
a11b9419 DH |
587 | cur_nr_pages = min(end_pfn - pfn, |
588 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
bd5f79ab | 589 | sparse_remove_section(pfn, cur_nr_pages, altmap); |
ea01ea93 | 590 | } |
ea01ea93 | 591 | } |
ea01ea93 | 592 | |
9d0ad8ca DK |
593 | int set_online_page_callback(online_page_callback_t callback) |
594 | { | |
595 | int rc = -EINVAL; | |
596 | ||
bfc8c901 VD |
597 | get_online_mems(); |
598 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
599 | |
600 | if (online_page_callback == generic_online_page) { | |
601 | online_page_callback = callback; | |
602 | rc = 0; | |
603 | } | |
604 | ||
bfc8c901 VD |
605 | mutex_unlock(&online_page_callback_lock); |
606 | put_online_mems(); | |
9d0ad8ca DK |
607 | |
608 | return rc; | |
609 | } | |
610 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
611 | ||
612 | int restore_online_page_callback(online_page_callback_t callback) | |
613 | { | |
614 | int rc = -EINVAL; | |
615 | ||
bfc8c901 VD |
616 | get_online_mems(); |
617 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
618 | |
619 | if (online_page_callback == callback) { | |
620 | online_page_callback = generic_online_page; | |
621 | rc = 0; | |
622 | } | |
623 | ||
bfc8c901 VD |
624 | mutex_unlock(&online_page_callback_lock); |
625 | put_online_mems(); | |
9d0ad8ca DK |
626 | |
627 | return rc; | |
628 | } | |
629 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
630 | ||
18db1491 | 631 | void generic_online_page(struct page *page, unsigned int order) |
9d0ad8ca | 632 | { |
c87cbc1f VB |
633 | /* |
634 | * Freeing the page with debug_pagealloc enabled will try to unmap it, | |
635 | * so we should map it first. This is better than introducing a special | |
636 | * case in page freeing fast path. | |
637 | */ | |
77bc7fd6 | 638 | debug_pagealloc_map_pages(page, 1 << order); |
a9cd410a AK |
639 | __free_pages_core(page, order); |
640 | totalram_pages_add(1UL << order); | |
a9cd410a | 641 | } |
18db1491 | 642 | EXPORT_SYMBOL_GPL(generic_online_page); |
a9cd410a | 643 | |
aac65321 | 644 | static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) |
3947be19 | 645 | { |
b2c2ab20 DH |
646 | const unsigned long end_pfn = start_pfn + nr_pages; |
647 | unsigned long pfn; | |
b2c2ab20 DH |
648 | |
649 | /* | |
5e0a760b | 650 | * Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might |
aac65321 DH |
651 | * decide to not expose all pages to the buddy (e.g., expose them |
652 | * later). We account all pages as being online and belonging to this | |
653 | * zone ("present"). | |
a08a2ae3 OS |
654 | * When using memmap_on_memory, the range might not be aligned to |
655 | * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect | |
656 | * this and the first chunk to online will be pageblock_nr_pages. | |
b2c2ab20 | 657 | */ |
a08a2ae3 | 658 | for (pfn = start_pfn; pfn < end_pfn;) { |
59f876fb KS |
659 | int order; |
660 | ||
661 | /* | |
662 | * Free to online pages in the largest chunks alignment allows. | |
663 | * | |
664 | * __ffs() behaviour is undefined for 0. start == 0 is | |
5e0a760b KS |
665 | * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for |
666 | * the case. | |
59f876fb KS |
667 | */ |
668 | if (pfn) | |
5e0a760b | 669 | order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn)); |
59f876fb | 670 | else |
5e0a760b | 671 | order = MAX_PAGE_ORDER; |
a08a2ae3 OS |
672 | |
673 | (*online_page_callback)(pfn_to_page(pfn), order); | |
674 | pfn += (1UL << order); | |
675 | } | |
2d070eab | 676 | |
b2c2ab20 DH |
677 | /* mark all involved sections as online */ |
678 | online_mem_sections(start_pfn, end_pfn); | |
75884fb1 KH |
679 | } |
680 | ||
d9713679 LJ |
681 | /* check which state of node_states will be changed when online memory */ |
682 | static void node_states_check_changes_online(unsigned long nr_pages, | |
683 | struct zone *zone, struct memory_notify *arg) | |
684 | { | |
685 | int nid = zone_to_nid(zone); | |
d9713679 | 686 | |
98fa15f3 AK |
687 | arg->status_change_nid = NUMA_NO_NODE; |
688 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
d9713679 | 689 | |
8efe33f4 OS |
690 | if (!node_state(nid, N_MEMORY)) |
691 | arg->status_change_nid = nid; | |
692 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) | |
d9713679 | 693 | arg->status_change_nid_normal = nid; |
d9713679 LJ |
694 | } |
695 | ||
696 | static void node_states_set_node(int node, struct memory_notify *arg) | |
697 | { | |
698 | if (arg->status_change_nid_normal >= 0) | |
699 | node_set_state(node, N_NORMAL_MEMORY); | |
700 | ||
83d83612 OS |
701 | if (arg->status_change_nid >= 0) |
702 | node_set_state(node, N_MEMORY); | |
d9713679 LJ |
703 | } |
704 | ||
f1dd2cd1 MH |
705 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, |
706 | unsigned long nr_pages) | |
707 | { | |
708 | unsigned long old_end_pfn = zone_end_pfn(zone); | |
709 | ||
710 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) | |
711 | zone->zone_start_pfn = start_pfn; | |
712 | ||
713 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; | |
714 | } | |
715 | ||
716 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, | |
717 | unsigned long nr_pages) | |
718 | { | |
719 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); | |
720 | ||
721 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
722 | pgdat->node_start_pfn = start_pfn; | |
723 | ||
724 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | |
f1dd2cd1 | 725 | |
3fccb74c | 726 | } |
1f90a347 | 727 | |
ed7802dd | 728 | #ifdef CONFIG_ZONE_DEVICE |
1f90a347 DW |
729 | static void section_taint_zone_device(unsigned long pfn) |
730 | { | |
731 | struct mem_section *ms = __pfn_to_section(pfn); | |
732 | ||
733 | ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; | |
734 | } | |
ed7802dd MS |
735 | #else |
736 | static inline void section_taint_zone_device(unsigned long pfn) | |
737 | { | |
738 | } | |
739 | #endif | |
1f90a347 | 740 | |
3fccb74c DH |
741 | /* |
742 | * Associate the pfn range with the given zone, initializing the memmaps | |
743 | * and resizing the pgdat/zone data to span the added pages. After this | |
744 | * call, all affected pages are PG_reserved. | |
d882c006 DH |
745 | * |
746 | * All aligned pageblocks are initialized to the specified migratetype | |
747 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related | |
748 | * zone stats (e.g., nr_isolate_pageblock) are touched. | |
3fccb74c | 749 | */ |
a99583e7 | 750 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
d882c006 DH |
751 | unsigned long nr_pages, |
752 | struct vmem_altmap *altmap, int migratetype) | |
f1dd2cd1 MH |
753 | { |
754 | struct pglist_data *pgdat = zone->zone_pgdat; | |
755 | int nid = pgdat->node_id; | |
df429ac0 | 756 | |
f1dd2cd1 MH |
757 | clear_zone_contiguous(zone); |
758 | ||
fa004ab7 WY |
759 | if (zone_is_empty(zone)) |
760 | init_currently_empty_zone(zone, start_pfn, nr_pages); | |
f1dd2cd1 | 761 | resize_zone_range(zone, start_pfn, nr_pages); |
f1dd2cd1 | 762 | resize_pgdat_range(pgdat, start_pfn, nr_pages); |
f1dd2cd1 | 763 | |
1f90a347 DW |
764 | /* |
765 | * Subsection population requires care in pfn_to_online_page(). | |
766 | * Set the taint to enable the slow path detection of | |
767 | * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} | |
768 | * section. | |
769 | */ | |
770 | if (zone_is_zone_device(zone)) { | |
771 | if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) | |
772 | section_taint_zone_device(start_pfn); | |
773 | if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) | |
774 | section_taint_zone_device(start_pfn + nr_pages); | |
775 | } | |
776 | ||
f1dd2cd1 MH |
777 | /* |
778 | * TODO now we have a visible range of pages which are not associated | |
779 | * with their zone properly. Not nice but set_pfnblock_flags_mask | |
780 | * expects the zone spans the pfn range. All the pages in the range | |
781 | * are reserved so nobody should be touching them so we should be safe | |
782 | */ | |
ab28cb6e | 783 | memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, |
d882c006 | 784 | MEMINIT_HOTPLUG, altmap, migratetype); |
f1dd2cd1 MH |
785 | |
786 | set_zone_contiguous(zone); | |
787 | } | |
788 | ||
e83a437f DH |
789 | struct auto_movable_stats { |
790 | unsigned long kernel_early_pages; | |
791 | unsigned long movable_pages; | |
792 | }; | |
793 | ||
794 | static void auto_movable_stats_account_zone(struct auto_movable_stats *stats, | |
795 | struct zone *zone) | |
796 | { | |
797 | if (zone_idx(zone) == ZONE_MOVABLE) { | |
798 | stats->movable_pages += zone->present_pages; | |
799 | } else { | |
800 | stats->kernel_early_pages += zone->present_early_pages; | |
801 | #ifdef CONFIG_CMA | |
802 | /* | |
803 | * CMA pages (never on hotplugged memory) behave like | |
804 | * ZONE_MOVABLE. | |
805 | */ | |
806 | stats->movable_pages += zone->cma_pages; | |
807 | stats->kernel_early_pages -= zone->cma_pages; | |
808 | #endif /* CONFIG_CMA */ | |
809 | } | |
810 | } | |
3fcebf90 DH |
811 | struct auto_movable_group_stats { |
812 | unsigned long movable_pages; | |
813 | unsigned long req_kernel_early_pages; | |
814 | }; | |
e83a437f | 815 | |
3fcebf90 DH |
816 | static int auto_movable_stats_account_group(struct memory_group *group, |
817 | void *arg) | |
818 | { | |
819 | const int ratio = READ_ONCE(auto_movable_ratio); | |
820 | struct auto_movable_group_stats *stats = arg; | |
821 | long pages; | |
822 | ||
823 | /* | |
824 | * We don't support modifying the config while the auto-movable online | |
825 | * policy is already enabled. Just avoid the division by zero below. | |
826 | */ | |
827 | if (!ratio) | |
828 | return 0; | |
829 | ||
830 | /* | |
831 | * Calculate how many early kernel pages this group requires to | |
832 | * satisfy the configured zone ratio. | |
833 | */ | |
834 | pages = group->present_movable_pages * 100 / ratio; | |
835 | pages -= group->present_kernel_pages; | |
836 | ||
837 | if (pages > 0) | |
838 | stats->req_kernel_early_pages += pages; | |
839 | stats->movable_pages += group->present_movable_pages; | |
840 | return 0; | |
841 | } | |
842 | ||
843 | static bool auto_movable_can_online_movable(int nid, struct memory_group *group, | |
844 | unsigned long nr_pages) | |
e83a437f | 845 | { |
e83a437f | 846 | unsigned long kernel_early_pages, movable_pages; |
3fcebf90 DH |
847 | struct auto_movable_group_stats group_stats = {}; |
848 | struct auto_movable_stats stats = {}; | |
e83a437f DH |
849 | pg_data_t *pgdat = NODE_DATA(nid); |
850 | struct zone *zone; | |
851 | int i; | |
852 | ||
853 | /* Walk all relevant zones and collect MOVABLE vs. KERNEL stats. */ | |
854 | if (nid == NUMA_NO_NODE) { | |
855 | /* TODO: cache values */ | |
856 | for_each_populated_zone(zone) | |
857 | auto_movable_stats_account_zone(&stats, zone); | |
858 | } else { | |
859 | for (i = 0; i < MAX_NR_ZONES; i++) { | |
860 | zone = pgdat->node_zones + i; | |
861 | if (populated_zone(zone)) | |
862 | auto_movable_stats_account_zone(&stats, zone); | |
863 | } | |
864 | } | |
865 | ||
866 | kernel_early_pages = stats.kernel_early_pages; | |
867 | movable_pages = stats.movable_pages; | |
868 | ||
3fcebf90 DH |
869 | /* |
870 | * Kernel memory inside dynamic memory group allows for more MOVABLE | |
871 | * memory within the same group. Remove the effect of all but the | |
872 | * current group from the stats. | |
873 | */ | |
874 | walk_dynamic_memory_groups(nid, auto_movable_stats_account_group, | |
875 | group, &group_stats); | |
876 | if (kernel_early_pages <= group_stats.req_kernel_early_pages) | |
877 | return false; | |
878 | kernel_early_pages -= group_stats.req_kernel_early_pages; | |
879 | movable_pages -= group_stats.movable_pages; | |
880 | ||
881 | if (group && group->is_dynamic) | |
882 | kernel_early_pages += group->present_kernel_pages; | |
883 | ||
e83a437f DH |
884 | /* |
885 | * Test if we could online the given number of pages to ZONE_MOVABLE | |
886 | * and still stay in the configured ratio. | |
887 | */ | |
888 | movable_pages += nr_pages; | |
889 | return movable_pages <= (auto_movable_ratio * kernel_early_pages) / 100; | |
890 | } | |
891 | ||
c246a213 MH |
892 | /* |
893 | * Returns a default kernel memory zone for the given pfn range. | |
894 | * If no kernel zone covers this pfn range it will automatically go | |
895 | * to the ZONE_NORMAL. | |
896 | */ | |
c6f03e29 | 897 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, |
c246a213 MH |
898 | unsigned long nr_pages) |
899 | { | |
900 | struct pglist_data *pgdat = NODE_DATA(nid); | |
901 | int zid; | |
902 | ||
d6aad201 | 903 | for (zid = 0; zid < ZONE_NORMAL; zid++) { |
c246a213 MH |
904 | struct zone *zone = &pgdat->node_zones[zid]; |
905 | ||
906 | if (zone_intersects(zone, start_pfn, nr_pages)) | |
907 | return zone; | |
908 | } | |
909 | ||
910 | return &pgdat->node_zones[ZONE_NORMAL]; | |
911 | } | |
912 | ||
e83a437f DH |
913 | /* |
914 | * Determine to which zone to online memory dynamically based on user | |
915 | * configuration and system stats. We care about the following ratio: | |
916 | * | |
917 | * MOVABLE : KERNEL | |
918 | * | |
919 | * Whereby MOVABLE is memory in ZONE_MOVABLE and KERNEL is memory in | |
920 | * one of the kernel zones. CMA pages inside one of the kernel zones really | |
921 | * behaves like ZONE_MOVABLE, so we treat them accordingly. | |
922 | * | |
923 | * We don't allow for hotplugged memory in a KERNEL zone to increase the | |
924 | * amount of MOVABLE memory we can have, so we end up with: | |
925 | * | |
926 | * MOVABLE : KERNEL_EARLY | |
927 | * | |
928 | * Whereby KERNEL_EARLY is memory in one of the kernel zones, available sinze | |
929 | * boot. We base our calculation on KERNEL_EARLY internally, because: | |
930 | * | |
931 | * a) Hotplugged memory in one of the kernel zones can sometimes still get | |
932 | * hotunplugged, especially when hot(un)plugging individual memory blocks. | |
933 | * There is no coordination across memory devices, therefore "automatic" | |
934 | * hotunplugging, as implemented in hypervisors, could result in zone | |
935 | * imbalances. | |
936 | * b) Early/boot memory in one of the kernel zones can usually not get | |
937 | * hotunplugged again (e.g., no firmware interface to unplug, fragmented | |
938 | * with unmovable allocations). While there are corner cases where it might | |
939 | * still work, it is barely relevant in practice. | |
940 | * | |
3fcebf90 DH |
941 | * Exceptions are dynamic memory groups, which allow for more MOVABLE |
942 | * memory within the same memory group -- because in that case, there is | |
943 | * coordination within the single memory device managed by a single driver. | |
944 | * | |
e83a437f DH |
945 | * We rely on "present pages" instead of "managed pages", as the latter is |
946 | * highly unreliable and dynamic in virtualized environments, and does not | |
947 | * consider boot time allocations. For example, memory ballooning adjusts the | |
948 | * managed pages when inflating/deflating the balloon, and balloon compaction | |
949 | * can even migrate inflated pages between zones. | |
950 | * | |
951 | * Using "present pages" is better but some things to keep in mind are: | |
952 | * | |
953 | * a) Some memblock allocations, such as for the crashkernel area, are | |
954 | * effectively unused by the kernel, yet they account to "present pages". | |
955 | * Fortunately, these allocations are comparatively small in relevant setups | |
956 | * (e.g., fraction of system memory). | |
957 | * b) Some hotplugged memory blocks in virtualized environments, esecially | |
958 | * hotplugged by virtio-mem, look like they are completely present, however, | |
959 | * only parts of the memory block are actually currently usable. | |
960 | * "present pages" is an upper limit that can get reached at runtime. As | |
961 | * we base our calculations on KERNEL_EARLY, this is not an issue. | |
962 | */ | |
445fcf7c DH |
963 | static struct zone *auto_movable_zone_for_pfn(int nid, |
964 | struct memory_group *group, | |
965 | unsigned long pfn, | |
e83a437f DH |
966 | unsigned long nr_pages) |
967 | { | |
445fcf7c DH |
968 | unsigned long online_pages = 0, max_pages, end_pfn; |
969 | struct page *page; | |
970 | ||
e83a437f DH |
971 | if (!auto_movable_ratio) |
972 | goto kernel_zone; | |
973 | ||
445fcf7c DH |
974 | if (group && !group->is_dynamic) { |
975 | max_pages = group->s.max_pages; | |
976 | online_pages = group->present_movable_pages; | |
977 | ||
978 | /* If anything is !MOVABLE online the rest !MOVABLE. */ | |
979 | if (group->present_kernel_pages) | |
980 | goto kernel_zone; | |
981 | } else if (!group || group->d.unit_pages == nr_pages) { | |
982 | max_pages = nr_pages; | |
983 | } else { | |
984 | max_pages = group->d.unit_pages; | |
985 | /* | |
986 | * Take a look at all online sections in the current unit. | |
987 | * We can safely assume that all pages within a section belong | |
988 | * to the same zone, because dynamic memory groups only deal | |
989 | * with hotplugged memory. | |
990 | */ | |
991 | pfn = ALIGN_DOWN(pfn, group->d.unit_pages); | |
992 | end_pfn = pfn + group->d.unit_pages; | |
993 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
994 | page = pfn_to_online_page(pfn); | |
995 | if (!page) | |
996 | continue; | |
997 | /* If anything is !MOVABLE online the rest !MOVABLE. */ | |
07252dfe | 998 | if (!is_zone_movable_page(page)) |
445fcf7c DH |
999 | goto kernel_zone; |
1000 | online_pages += PAGES_PER_SECTION; | |
1001 | } | |
1002 | } | |
1003 | ||
1004 | /* | |
1005 | * Online MOVABLE if we could *currently* online all remaining parts | |
1006 | * MOVABLE. We expect to (add+) online them immediately next, so if | |
1007 | * nobody interferes, all will be MOVABLE if possible. | |
1008 | */ | |
1009 | nr_pages = max_pages - online_pages; | |
3fcebf90 | 1010 | if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages)) |
e83a437f DH |
1011 | goto kernel_zone; |
1012 | ||
1013 | #ifdef CONFIG_NUMA | |
1014 | if (auto_movable_numa_aware && | |
3fcebf90 | 1015 | !auto_movable_can_online_movable(nid, group, nr_pages)) |
e83a437f DH |
1016 | goto kernel_zone; |
1017 | #endif /* CONFIG_NUMA */ | |
1018 | ||
1019 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
1020 | kernel_zone: | |
1021 | return default_kernel_zone_for_pfn(nid, pfn, nr_pages); | |
1022 | } | |
1023 | ||
c6f03e29 MH |
1024 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, |
1025 | unsigned long nr_pages) | |
e5e68930 | 1026 | { |
c6f03e29 MH |
1027 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, |
1028 | nr_pages); | |
1029 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
1030 | bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); | |
1031 | bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); | |
e5e68930 MH |
1032 | |
1033 | /* | |
c6f03e29 MH |
1034 | * We inherit the existing zone in a simple case where zones do not |
1035 | * overlap in the given range | |
e5e68930 | 1036 | */ |
c6f03e29 MH |
1037 | if (in_kernel ^ in_movable) |
1038 | return (in_kernel) ? kernel_zone : movable_zone; | |
9f123ab5 | 1039 | |
c6f03e29 MH |
1040 | /* |
1041 | * If the range doesn't belong to any zone or two zones overlap in the | |
1042 | * given range then we use movable zone only if movable_node is | |
1043 | * enabled because we always online to a kernel zone by default. | |
1044 | */ | |
1045 | return movable_node_enabled ? movable_zone : kernel_zone; | |
9f123ab5 MH |
1046 | } |
1047 | ||
7cf209ba | 1048 | struct zone *zone_for_pfn_range(int online_type, int nid, |
445fcf7c | 1049 | struct memory_group *group, unsigned long start_pfn, |
e5e68930 | 1050 | unsigned long nr_pages) |
f1dd2cd1 | 1051 | { |
c6f03e29 MH |
1052 | if (online_type == MMOP_ONLINE_KERNEL) |
1053 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); | |
f1dd2cd1 | 1054 | |
c6f03e29 MH |
1055 | if (online_type == MMOP_ONLINE_MOVABLE) |
1056 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
df429ac0 | 1057 | |
e83a437f | 1058 | if (online_policy == ONLINE_POLICY_AUTO_MOVABLE) |
445fcf7c | 1059 | return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages); |
e83a437f | 1060 | |
c6f03e29 | 1061 | return default_zone_for_pfn(nid, start_pfn, nr_pages); |
e5e68930 MH |
1062 | } |
1063 | ||
a08a2ae3 OS |
1064 | /* |
1065 | * This function should only be called by memory_block_{online,offline}, | |
1066 | * and {online,offline}_pages. | |
1067 | */ | |
836809ec DH |
1068 | void adjust_present_page_count(struct page *page, struct memory_group *group, |
1069 | long nr_pages) | |
f9901144 | 1070 | { |
4b097002 | 1071 | struct zone *zone = page_zone(page); |
836809ec | 1072 | const bool movable = zone_idx(zone) == ZONE_MOVABLE; |
4b097002 DH |
1073 | |
1074 | /* | |
1075 | * We only support onlining/offlining/adding/removing of complete | |
1076 | * memory blocks; therefore, either all is either early or hotplugged. | |
1077 | */ | |
1078 | if (early_section(__pfn_to_section(page_to_pfn(page)))) | |
1079 | zone->present_early_pages += nr_pages; | |
f9901144 | 1080 | zone->present_pages += nr_pages; |
f9901144 | 1081 | zone->zone_pgdat->node_present_pages += nr_pages; |
836809ec DH |
1082 | |
1083 | if (group && movable) | |
1084 | group->present_movable_pages += nr_pages; | |
1085 | else if (group && !movable) | |
1086 | group->present_kernel_pages += nr_pages; | |
f9901144 DH |
1087 | } |
1088 | ||
a08a2ae3 OS |
1089 | int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, |
1090 | struct zone *zone) | |
1091 | { | |
1092 | unsigned long end_pfn = pfn + nr_pages; | |
66361095 | 1093 | int ret, i; |
a08a2ae3 OS |
1094 | |
1095 | ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
1096 | if (ret) | |
1097 | return ret; | |
1098 | ||
1099 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); | |
1100 | ||
66361095 MS |
1101 | for (i = 0; i < nr_pages; i++) |
1102 | SetPageVmemmapSelfHosted(pfn_to_page(pfn + i)); | |
1103 | ||
a08a2ae3 OS |
1104 | /* |
1105 | * It might be that the vmemmap_pages fully span sections. If that is | |
1106 | * the case, mark those sections online here as otherwise they will be | |
1107 | * left offline. | |
1108 | */ | |
1109 | if (nr_pages >= PAGES_PER_SECTION) | |
1110 | online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
1111 | ||
1112 | return ret; | |
1113 | } | |
1114 | ||
1115 | void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) | |
1116 | { | |
1117 | unsigned long end_pfn = pfn + nr_pages; | |
1118 | ||
1119 | /* | |
1120 | * It might be that the vmemmap_pages fully span sections. If that is | |
1121 | * the case, mark those sections offline here as otherwise they will be | |
1122 | * left online. | |
1123 | */ | |
1124 | if (nr_pages >= PAGES_PER_SECTION) | |
1125 | offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
1126 | ||
1127 | /* | |
1128 | * The pages associated with this vmemmap have been offlined, so | |
1129 | * we can reset its state here. | |
1130 | */ | |
1131 | remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); | |
1132 | kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
1133 | } | |
1134 | ||
001002e7 SK |
1135 | /* |
1136 | * Must be called with mem_hotplug_lock in write mode. | |
1137 | */ | |
836809ec DH |
1138 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, |
1139 | struct zone *zone, struct memory_group *group) | |
75884fb1 | 1140 | { |
aa47228a | 1141 | unsigned long flags; |
6811378e | 1142 | int need_zonelists_rebuild = 0; |
a08a2ae3 | 1143 | const int nid = zone_to_nid(zone); |
7b78d335 YG |
1144 | int ret; |
1145 | struct memory_notify arg; | |
d0dc12e8 | 1146 | |
dd8e2f23 OS |
1147 | /* |
1148 | * {on,off}lining is constrained to full memory sections (or more | |
041711ce | 1149 | * precisely to memory blocks from the user space POV). |
dd8e2f23 OS |
1150 | * memmap_on_memory is an exception because it reserves initial part |
1151 | * of the physical memory space for vmemmaps. That space is pageblock | |
1152 | * aligned. | |
1153 | */ | |
ee0913c4 | 1154 | if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) || |
dd8e2f23 | 1155 | !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) |
4986fac1 DH |
1156 | return -EINVAL; |
1157 | ||
381eab4a | 1158 | |
f1dd2cd1 | 1159 | /* associate pfn range with the zone */ |
b30c5927 | 1160 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); |
f1dd2cd1 | 1161 | |
7b78d335 YG |
1162 | arg.start_pfn = pfn; |
1163 | arg.nr_pages = nr_pages; | |
d9713679 | 1164 | node_states_check_changes_online(nr_pages, zone, &arg); |
7b78d335 | 1165 | |
7b78d335 YG |
1166 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
1167 | ret = notifier_to_errno(ret); | |
e33e33b4 CY |
1168 | if (ret) |
1169 | goto failed_addition; | |
1170 | ||
b30c5927 DH |
1171 | /* |
1172 | * Fixup the number of isolated pageblocks before marking the sections | |
1173 | * onlining, such that undo_isolate_page_range() works correctly. | |
1174 | */ | |
1175 | spin_lock_irqsave(&zone->lock, flags); | |
1176 | zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; | |
1177 | spin_unlock_irqrestore(&zone->lock, flags); | |
1178 | ||
6811378e YG |
1179 | /* |
1180 | * If this zone is not populated, then it is not in zonelist. | |
1181 | * This means the page allocator ignores this zone. | |
1182 | * So, zonelist must be updated after online. | |
1183 | */ | |
6dcd73d7 | 1184 | if (!populated_zone(zone)) { |
6811378e | 1185 | need_zonelists_rebuild = 1; |
72675e13 | 1186 | setup_zone_pageset(zone); |
6dcd73d7 | 1187 | } |
6811378e | 1188 | |
aac65321 | 1189 | online_pages_range(pfn, nr_pages); |
836809ec | 1190 | adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); |
aa47228a | 1191 | |
b30c5927 DH |
1192 | node_states_set_node(nid, &arg); |
1193 | if (need_zonelists_rebuild) | |
1194 | build_all_zonelists(NULL); | |
b30c5927 DH |
1195 | |
1196 | /* Basic onlining is complete, allow allocation of onlined pages. */ | |
1197 | undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); | |
1198 | ||
93146d98 | 1199 | /* |
b86c5fc4 DH |
1200 | * Freshly onlined pages aren't shuffled (e.g., all pages are placed to |
1201 | * the tail of the freelist when undoing isolation). Shuffle the whole | |
1202 | * zone to make sure the just onlined pages are properly distributed | |
1203 | * across the whole freelist - to create an initial shuffle. | |
93146d98 | 1204 | */ |
e900a918 DW |
1205 | shuffle_zone(zone); |
1206 | ||
b92ca18e | 1207 | /* reinitialise watermarks and update pcp limits */ |
1b79acc9 KM |
1208 | init_per_zone_wmark_min(); |
1209 | ||
ca9a46f8 DH |
1210 | kswapd_run(nid); |
1211 | kcompactd_run(nid); | |
61b13993 | 1212 | |
2d1d43f6 | 1213 | writeback_set_ratelimit(); |
7b78d335 | 1214 | |
ca9a46f8 | 1215 | memory_notify(MEM_ONLINE, &arg); |
30467e0b | 1216 | return 0; |
e33e33b4 CY |
1217 | |
1218 | failed_addition: | |
1219 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", | |
1220 | (unsigned long long) pfn << PAGE_SHIFT, | |
1221 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); | |
1222 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
feee6b29 | 1223 | remove_pfn_range_from_zone(zone, pfn, nr_pages); |
e33e33b4 | 1224 | return ret; |
3947be19 | 1225 | } |
bc02af93 | 1226 | |
e1319331 | 1227 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
09f49dca | 1228 | static pg_data_t __ref *hotadd_init_pgdat(int nid) |
9af3c2de YG |
1229 | { |
1230 | struct pglist_data *pgdat; | |
9af3c2de | 1231 | |
09f49dca MH |
1232 | /* |
1233 | * NODE_DATA is preallocated (free_area_init) but its internal | |
1234 | * state is not allocated completely. Add missing pieces. | |
1235 | * Completely offline nodes stay around and they just need | |
1236 | * reintialization. | |
1237 | */ | |
70b5b46a | 1238 | pgdat = NODE_DATA(nid); |
03e85f9d | 1239 | |
9af3c2de | 1240 | /* init node's zones as empty zones, we don't have any present pages.*/ |
70b5b46a | 1241 | free_area_init_core_hotplug(pgdat); |
9af3c2de | 1242 | |
959ecc48 KH |
1243 | /* |
1244 | * The node we allocated has no zone fallback lists. For avoiding | |
1245 | * to access not-initialized zonelist, build here. | |
1246 | */ | |
72675e13 | 1247 | build_all_zonelists(pgdat); |
959ecc48 | 1248 | |
9af3c2de YG |
1249 | return pgdat; |
1250 | } | |
1251 | ||
ba2d2666 MG |
1252 | /* |
1253 | * __try_online_node - online a node if offlined | |
e8b098fc | 1254 | * @nid: the node ID |
b9ff0360 | 1255 | * @set_node_online: Whether we want to online the node |
cf23422b | 1256 | * called by cpu_up() to online a node without onlined memory. |
b9ff0360 OS |
1257 | * |
1258 | * Returns: | |
1259 | * 1 -> a new node has been allocated | |
1260 | * 0 -> the node is already online | |
1261 | * -ENOMEM -> the node could not be allocated | |
cf23422b | 1262 | */ |
c68ab18c | 1263 | static int __try_online_node(int nid, bool set_node_online) |
cf23422b | 1264 | { |
b9ff0360 OS |
1265 | pg_data_t *pgdat; |
1266 | int ret = 1; | |
cf23422b | 1267 | |
01b0f197 TK |
1268 | if (node_online(nid)) |
1269 | return 0; | |
1270 | ||
09f49dca | 1271 | pgdat = hotadd_init_pgdat(nid); |
7553e8f2 | 1272 | if (!pgdat) { |
01b0f197 | 1273 | pr_err("Cannot online node %d due to NULL pgdat\n", nid); |
cf23422b | 1274 | ret = -ENOMEM; |
1275 | goto out; | |
1276 | } | |
b9ff0360 OS |
1277 | |
1278 | if (set_node_online) { | |
1279 | node_set_online(nid); | |
1280 | ret = register_one_node(nid); | |
1281 | BUG_ON(ret); | |
1282 | } | |
cf23422b | 1283 | out: |
b9ff0360 OS |
1284 | return ret; |
1285 | } | |
1286 | ||
1287 | /* | |
1288 | * Users of this function always want to online/register the node | |
1289 | */ | |
1290 | int try_online_node(int nid) | |
1291 | { | |
1292 | int ret; | |
1293 | ||
1294 | mem_hotplug_begin(); | |
c68ab18c | 1295 | ret = __try_online_node(nid, true); |
bfc8c901 | 1296 | mem_hotplug_done(); |
cf23422b | 1297 | return ret; |
1298 | } | |
1299 | ||
27356f54 TK |
1300 | static int check_hotplug_memory_range(u64 start, u64 size) |
1301 | { | |
ba325585 | 1302 | /* memory range must be block size aligned */ |
cec3ebd0 DH |
1303 | if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || |
1304 | !IS_ALIGNED(size, memory_block_size_bytes())) { | |
ba325585 | 1305 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", |
cec3ebd0 | 1306 | memory_block_size_bytes(), start, size); |
27356f54 TK |
1307 | return -EINVAL; |
1308 | } | |
1309 | ||
1310 | return 0; | |
1311 | } | |
1312 | ||
31bc3858 VK |
1313 | static int online_memory_block(struct memory_block *mem, void *arg) |
1314 | { | |
1adf8b46 | 1315 | mem->online_type = mhp_default_online_type; |
dc18d706 | 1316 | return device_online(&mem->dev); |
31bc3858 VK |
1317 | } |
1318 | ||
85a2b4b0 AK |
1319 | #ifndef arch_supports_memmap_on_memory |
1320 | static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size) | |
1321 | { | |
1322 | /* | |
1323 | * As default, we want the vmemmap to span a complete PMD such that we | |
1324 | * can map the vmemmap using a single PMD if supported by the | |
1325 | * architecture. | |
1326 | */ | |
1327 | return IS_ALIGNED(vmemmap_size, PMD_SIZE); | |
1328 | } | |
1329 | #endif | |
1330 | ||
e3c2bfdd | 1331 | static bool mhp_supports_memmap_on_memory(unsigned long size) |
a08a2ae3 | 1332 | { |
85a2b4b0 | 1333 | unsigned long vmemmap_size = memory_block_memmap_size(); |
2d1f649c | 1334 | unsigned long memmap_pages = memory_block_memmap_on_memory_pages(); |
a08a2ae3 OS |
1335 | |
1336 | /* | |
1337 | * Besides having arch support and the feature enabled at runtime, we | |
1338 | * need a few more assumptions to hold true: | |
1339 | * | |
1340 | * a) We span a single memory block: memory onlining/offlinin;g happens | |
1341 | * in memory block granularity. We don't want the vmemmap of online | |
1342 | * memory blocks to reside on offline memory blocks. In the future, | |
1343 | * we might want to support variable-sized memory blocks to make the | |
1344 | * feature more versatile. | |
1345 | * | |
1346 | * b) The vmemmap pages span complete PMDs: We don't want vmemmap code | |
1347 | * to populate memory from the altmap for unrelated parts (i.e., | |
1348 | * other memory blocks) | |
1349 | * | |
1350 | * c) The vmemmap pages (and thereby the pages that will be exposed to | |
1351 | * the buddy) have to cover full pageblocks: memory onlining/offlining | |
1352 | * code requires applicable ranges to be page-aligned, for example, to | |
1353 | * set the migratetypes properly. | |
1354 | * | |
1355 | * TODO: Although we have a check here to make sure that vmemmap pages | |
1356 | * fully populate a PMD, it is not the right place to check for | |
1357 | * this. A much better solution involves improving vmemmap code | |
1358 | * to fallback to base pages when trying to populate vmemmap using | |
1359 | * altmap as an alternative source of memory, and we do not exactly | |
1360 | * populate a single PMD. | |
1361 | */ | |
2d1f649c AK |
1362 | if (!mhp_memmap_on_memory() || size != memory_block_size_bytes()) |
1363 | return false; | |
1364 | ||
1365 | /* | |
1366 | * Make sure the vmemmap allocation is fully contained | |
1367 | * so that we always allocate vmemmap memory from altmap area. | |
1368 | */ | |
1369 | if (!IS_ALIGNED(vmemmap_size, PAGE_SIZE)) | |
1370 | return false; | |
1371 | ||
1372 | /* | |
1373 | * start pfn should be pageblock_nr_pages aligned for correctly | |
1374 | * setting migrate types | |
1375 | */ | |
1376 | if (!pageblock_aligned(memmap_pages)) | |
1377 | return false; | |
1378 | ||
1379 | if (memmap_pages == PHYS_PFN(memory_block_size_bytes())) | |
1380 | /* No effective hotplugged memory doesn't make sense. */ | |
1381 | return false; | |
1382 | ||
1383 | return arch_supports_memmap_on_memory(vmemmap_size); | |
a08a2ae3 OS |
1384 | } |
1385 | ||
6b8f0798 VV |
1386 | static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size) |
1387 | { | |
1388 | unsigned long memblock_size = memory_block_size_bytes(); | |
1389 | u64 cur_start; | |
1390 | ||
1391 | /* | |
1392 | * For memmap_on_memory, the altmaps were added on a per-memblock | |
1393 | * basis; we have to process each individual memory block. | |
1394 | */ | |
1395 | for (cur_start = start; cur_start < start + size; | |
1396 | cur_start += memblock_size) { | |
1397 | struct vmem_altmap *altmap = NULL; | |
1398 | struct memory_block *mem; | |
1399 | ||
1400 | mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start))); | |
1401 | if (WARN_ON_ONCE(!mem)) | |
1402 | continue; | |
1403 | ||
1404 | altmap = mem->altmap; | |
1405 | mem->altmap = NULL; | |
1406 | ||
1407 | remove_memory_block_devices(cur_start, memblock_size); | |
1408 | ||
1409 | arch_remove_memory(cur_start, memblock_size, altmap); | |
1410 | ||
1411 | /* Verify that all vmemmap pages have actually been freed. */ | |
1412 | WARN(altmap->alloc, "Altmap not fully unmapped"); | |
1413 | kfree(altmap); | |
1414 | } | |
1415 | } | |
1416 | ||
1417 | static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group, | |
1418 | u64 start, u64 size) | |
1419 | { | |
1420 | unsigned long memblock_size = memory_block_size_bytes(); | |
1421 | u64 cur_start; | |
1422 | int ret; | |
1423 | ||
1424 | for (cur_start = start; cur_start < start + size; | |
1425 | cur_start += memblock_size) { | |
1426 | struct mhp_params params = { .pgprot = | |
1427 | pgprot_mhp(PAGE_KERNEL) }; | |
1428 | struct vmem_altmap mhp_altmap = { | |
1429 | .base_pfn = PHYS_PFN(cur_start), | |
1430 | .end_pfn = PHYS_PFN(cur_start + memblock_size - 1), | |
1431 | }; | |
1432 | ||
1433 | mhp_altmap.free = memory_block_memmap_on_memory_pages(); | |
1434 | params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap), | |
1435 | GFP_KERNEL); | |
1436 | if (!params.altmap) { | |
1437 | ret = -ENOMEM; | |
1438 | goto out; | |
1439 | } | |
1440 | ||
1441 | /* call arch's memory hotadd */ | |
1442 | ret = arch_add_memory(nid, cur_start, memblock_size, ¶ms); | |
1443 | if (ret < 0) { | |
1444 | kfree(params.altmap); | |
1445 | goto out; | |
1446 | } | |
1447 | ||
1448 | /* create memory block devices after memory was added */ | |
1449 | ret = create_memory_block_devices(cur_start, memblock_size, | |
1450 | params.altmap, group); | |
1451 | if (ret) { | |
1452 | arch_remove_memory(cur_start, memblock_size, NULL); | |
1453 | kfree(params.altmap); | |
1454 | goto out; | |
1455 | } | |
1456 | } | |
1457 | ||
1458 | return 0; | |
1459 | out: | |
1460 | if (ret && cur_start != start) | |
1461 | remove_memory_blocks_and_altmaps(start, cur_start - start); | |
1462 | return ret; | |
1463 | } | |
1464 | ||
8df1d0e4 DH |
1465 | /* |
1466 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1467 | * and online/offline operations (triggered e.g. by sysfs). | |
1468 | * | |
1469 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | |
1470 | */ | |
b6117199 | 1471 | int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) |
bc02af93 | 1472 | { |
d15dfd31 | 1473 | struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; |
32befe9e | 1474 | enum memblock_flags memblock_flags = MEMBLOCK_NONE; |
028fc57a | 1475 | struct memory_group *group = NULL; |
62cedb9f | 1476 | u64 start, size; |
b9ff0360 | 1477 | bool new_node = false; |
bc02af93 YG |
1478 | int ret; |
1479 | ||
62cedb9f DV |
1480 | start = res->start; |
1481 | size = resource_size(res); | |
1482 | ||
27356f54 TK |
1483 | ret = check_hotplug_memory_range(start, size); |
1484 | if (ret) | |
1485 | return ret; | |
1486 | ||
028fc57a DH |
1487 | if (mhp_flags & MHP_NID_IS_MGID) { |
1488 | group = memory_group_find_by_id(nid); | |
1489 | if (!group) | |
1490 | return -EINVAL; | |
1491 | nid = group->nid; | |
1492 | } | |
1493 | ||
fa6d9ec7 VV |
1494 | if (!node_possible(nid)) { |
1495 | WARN(1, "node %d was absent from the node_possible_map\n", nid); | |
1496 | return -EINVAL; | |
1497 | } | |
1498 | ||
bfc8c901 | 1499 | mem_hotplug_begin(); |
ac13c462 | 1500 | |
53d38316 | 1501 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { |
32befe9e DH |
1502 | if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED) |
1503 | memblock_flags = MEMBLOCK_DRIVER_MANAGED; | |
1504 | ret = memblock_add_node(start, size, nid, memblock_flags); | |
53d38316 DH |
1505 | if (ret) |
1506 | goto error_mem_hotplug_end; | |
1507 | } | |
7f36e3e5 | 1508 | |
c68ab18c | 1509 | ret = __try_online_node(nid, false); |
b9ff0360 OS |
1510 | if (ret < 0) |
1511 | goto error; | |
1512 | new_node = ret; | |
9af3c2de | 1513 | |
a08a2ae3 OS |
1514 | /* |
1515 | * Self hosted memmap array | |
1516 | */ | |
6b8f0798 VV |
1517 | if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && |
1518 | mhp_supports_memmap_on_memory(memory_block_size_bytes())) { | |
1519 | ret = create_altmaps_and_memory_blocks(nid, group, start, size); | |
1520 | if (ret) | |
1521 | goto error; | |
1522 | } else { | |
1523 | ret = arch_add_memory(nid, start, size, ¶ms); | |
1524 | if (ret < 0) | |
1525 | goto error; | |
9af3c2de | 1526 | |
6b8f0798 VV |
1527 | /* create memory block devices after memory was added */ |
1528 | ret = create_memory_block_devices(start, size, NULL, group); | |
1529 | if (ret) { | |
1530 | arch_remove_memory(start, size, params.altmap); | |
1531 | goto error; | |
1532 | } | |
db051a0d DH |
1533 | } |
1534 | ||
a1e565aa | 1535 | if (new_node) { |
d5b6f6a3 | 1536 | /* If sysfs file of new node can't be created, cpu on the node |
0fc44159 YG |
1537 | * can't be hot-added. There is no rollback way now. |
1538 | * So, check by BUG_ON() to catch it reluctantly.. | |
d5b6f6a3 | 1539 | * We online node here. We can't roll back from here. |
0fc44159 | 1540 | */ |
d5b6f6a3 OS |
1541 | node_set_online(nid); |
1542 | ret = __register_one_node(nid); | |
0fc44159 YG |
1543 | BUG_ON(ret); |
1544 | } | |
1545 | ||
cc651559 DH |
1546 | register_memory_blocks_under_node(nid, PFN_DOWN(start), |
1547 | PFN_UP(start + size - 1), | |
1548 | MEMINIT_HOTPLUG); | |
d5b6f6a3 | 1549 | |
d96ae530 | 1550 | /* create new memmap entry */ |
7b7b2721 DH |
1551 | if (!strcmp(res->name, "System RAM")) |
1552 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
d96ae530 | 1553 | |
381eab4a DH |
1554 | /* device_online() will take the lock when calling online_pages() */ |
1555 | mem_hotplug_done(); | |
1556 | ||
9ca6551e DH |
1557 | /* |
1558 | * In case we're allowed to merge the resource, flag it and trigger | |
1559 | * merging now that adding succeeded. | |
1560 | */ | |
26011267 | 1561 | if (mhp_flags & MHP_MERGE_RESOURCE) |
9ca6551e DH |
1562 | merge_system_ram_resource(res); |
1563 | ||
31bc3858 | 1564 | /* online pages if requested */ |
1adf8b46 | 1565 | if (mhp_default_online_type != MMOP_OFFLINE) |
fbcf73ce | 1566 | walk_memory_blocks(start, size, NULL, online_memory_block); |
31bc3858 | 1567 | |
381eab4a | 1568 | return ret; |
9af3c2de | 1569 | error: |
52219aea DH |
1570 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1571 | memblock_remove(start, size); | |
53d38316 | 1572 | error_mem_hotplug_end: |
bfc8c901 | 1573 | mem_hotplug_done(); |
bc02af93 YG |
1574 | return ret; |
1575 | } | |
62cedb9f | 1576 | |
8df1d0e4 | 1577 | /* requires device_hotplug_lock, see add_memory_resource() */ |
b6117199 | 1578 | int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
62cedb9f DV |
1579 | { |
1580 | struct resource *res; | |
1581 | int ret; | |
1582 | ||
7b7b2721 | 1583 | res = register_memory_resource(start, size, "System RAM"); |
6f754ba4 VK |
1584 | if (IS_ERR(res)) |
1585 | return PTR_ERR(res); | |
62cedb9f | 1586 | |
b6117199 | 1587 | ret = add_memory_resource(nid, res, mhp_flags); |
62cedb9f DV |
1588 | if (ret < 0) |
1589 | release_memory_resource(res); | |
1590 | return ret; | |
1591 | } | |
8df1d0e4 | 1592 | |
b6117199 | 1593 | int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
8df1d0e4 DH |
1594 | { |
1595 | int rc; | |
1596 | ||
1597 | lock_device_hotplug(); | |
b6117199 | 1598 | rc = __add_memory(nid, start, size, mhp_flags); |
8df1d0e4 DH |
1599 | unlock_device_hotplug(); |
1600 | ||
1601 | return rc; | |
1602 | } | |
bc02af93 | 1603 | EXPORT_SYMBOL_GPL(add_memory); |
0c0e6195 | 1604 | |
7b7b2721 DH |
1605 | /* |
1606 | * Add special, driver-managed memory to the system as system RAM. Such | |
1607 | * memory is not exposed via the raw firmware-provided memmap as system | |
1608 | * RAM, instead, it is detected and added by a driver - during cold boot, | |
1609 | * after a reboot, and after kexec. | |
1610 | * | |
1611 | * Reasons why this memory should not be used for the initial memmap of a | |
1612 | * kexec kernel or for placing kexec images: | |
1613 | * - The booting kernel is in charge of determining how this memory will be | |
1614 | * used (e.g., use persistent memory as system RAM) | |
1615 | * - Coordination with a hypervisor is required before this memory | |
1616 | * can be used (e.g., inaccessible parts). | |
1617 | * | |
1618 | * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided | |
1619 | * memory map") are created. Also, the created memory resource is flagged | |
7cf603d1 | 1620 | * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case |
7b7b2721 DH |
1621 | * this memory as well (esp., not place kexec images onto it). |
1622 | * | |
1623 | * The resource_name (visible via /proc/iomem) has to have the format | |
1624 | * "System RAM ($DRIVER)". | |
1625 | */ | |
1626 | int add_memory_driver_managed(int nid, u64 start, u64 size, | |
b6117199 | 1627 | const char *resource_name, mhp_t mhp_flags) |
7b7b2721 DH |
1628 | { |
1629 | struct resource *res; | |
1630 | int rc; | |
1631 | ||
1632 | if (!resource_name || | |
1633 | strstr(resource_name, "System RAM (") != resource_name || | |
1634 | resource_name[strlen(resource_name) - 1] != ')') | |
1635 | return -EINVAL; | |
1636 | ||
1637 | lock_device_hotplug(); | |
1638 | ||
1639 | res = register_memory_resource(start, size, resource_name); | |
1640 | if (IS_ERR(res)) { | |
1641 | rc = PTR_ERR(res); | |
1642 | goto out_unlock; | |
1643 | } | |
1644 | ||
b6117199 | 1645 | rc = add_memory_resource(nid, res, mhp_flags); |
7b7b2721 DH |
1646 | if (rc < 0) |
1647 | release_memory_resource(res); | |
1648 | ||
1649 | out_unlock: | |
1650 | unlock_device_hotplug(); | |
1651 | return rc; | |
1652 | } | |
1653 | EXPORT_SYMBOL_GPL(add_memory_driver_managed); | |
1654 | ||
bca3feaa AK |
1655 | /* |
1656 | * Platforms should define arch_get_mappable_range() that provides | |
1657 | * maximum possible addressable physical memory range for which the | |
1658 | * linear mapping could be created. The platform returned address | |
1659 | * range must adhere to these following semantics. | |
1660 | * | |
1661 | * - range.start <= range.end | |
1662 | * - Range includes both end points [range.start..range.end] | |
1663 | * | |
1664 | * There is also a fallback definition provided here, allowing the | |
1665 | * entire possible physical address range in case any platform does | |
1666 | * not define arch_get_mappable_range(). | |
1667 | */ | |
1668 | struct range __weak arch_get_mappable_range(void) | |
1669 | { | |
1670 | struct range mhp_range = { | |
1671 | .start = 0UL, | |
1672 | .end = -1ULL, | |
1673 | }; | |
1674 | return mhp_range; | |
1675 | } | |
1676 | ||
1677 | struct range mhp_get_pluggable_range(bool need_mapping) | |
1678 | { | |
1679 | const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; | |
1680 | struct range mhp_range; | |
1681 | ||
1682 | if (need_mapping) { | |
1683 | mhp_range = arch_get_mappable_range(); | |
1684 | if (mhp_range.start > max_phys) { | |
1685 | mhp_range.start = 0; | |
1686 | mhp_range.end = 0; | |
1687 | } | |
1688 | mhp_range.end = min_t(u64, mhp_range.end, max_phys); | |
1689 | } else { | |
1690 | mhp_range.start = 0; | |
1691 | mhp_range.end = max_phys; | |
1692 | } | |
1693 | return mhp_range; | |
1694 | } | |
1695 | EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); | |
1696 | ||
1697 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) | |
1698 | { | |
1699 | struct range mhp_range = mhp_get_pluggable_range(need_mapping); | |
1700 | u64 end = start + size; | |
1701 | ||
1702 | if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) | |
1703 | return true; | |
1704 | ||
1705 | pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", | |
1706 | start, end, mhp_range.start, mhp_range.end); | |
1707 | return false; | |
1708 | } | |
1709 | ||
0c0e6195 | 1710 | #ifdef CONFIG_MEMORY_HOTREMOVE |
0c0e6195 | 1711 | /* |
0efadf48 | 1712 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, |
aa218795 DH |
1713 | * non-lru movable pages and hugepages). Will skip over most unmovable |
1714 | * pages (esp., pages that can be skipped when offlining), but bail out on | |
1715 | * definitely unmovable pages. | |
1716 | * | |
1717 | * Returns: | |
1718 | * 0 in case a movable page is found and movable_pfn was updated. | |
1719 | * -ENOENT in case no movable page was found. | |
1720 | * -EBUSY in case a definitely unmovable page was found. | |
0c0e6195 | 1721 | */ |
aa218795 DH |
1722 | static int scan_movable_pages(unsigned long start, unsigned long end, |
1723 | unsigned long *movable_pfn) | |
0c0e6195 KH |
1724 | { |
1725 | unsigned long pfn; | |
eeb0efd0 | 1726 | |
0c0e6195 | 1727 | for (pfn = start; pfn < end; pfn++) { |
eeb0efd0 OS |
1728 | struct page *page, *head; |
1729 | unsigned long skip; | |
1730 | ||
1731 | if (!pfn_valid(pfn)) | |
1732 | continue; | |
1733 | page = pfn_to_page(pfn); | |
1734 | if (PageLRU(page)) | |
aa218795 | 1735 | goto found; |
eeb0efd0 | 1736 | if (__PageMovable(page)) |
aa218795 DH |
1737 | goto found; |
1738 | ||
1739 | /* | |
1740 | * PageOffline() pages that are not marked __PageMovable() and | |
1741 | * have a reference count > 0 (after MEM_GOING_OFFLINE) are | |
1742 | * definitely unmovable. If their reference count would be 0, | |
1743 | * they could at least be skipped when offlining memory. | |
1744 | */ | |
1745 | if (PageOffline(page) && page_count(page)) | |
1746 | return -EBUSY; | |
eeb0efd0 OS |
1747 | |
1748 | if (!PageHuge(page)) | |
1749 | continue; | |
1750 | head = compound_head(page); | |
8f251a3d MK |
1751 | /* |
1752 | * This test is racy as we hold no reference or lock. The | |
1753 | * hugetlb page could have been free'ed and head is no longer | |
1754 | * a hugetlb page before the following check. In such unlikely | |
1755 | * cases false positives and negatives are possible. Calling | |
1756 | * code must deal with these scenarios. | |
1757 | */ | |
1758 | if (HPageMigratable(head)) | |
aa218795 | 1759 | goto found; |
1640a0ef | 1760 | skip = compound_nr(head) - (pfn - page_to_pfn(head)); |
eeb0efd0 | 1761 | pfn += skip - 1; |
0c0e6195 | 1762 | } |
aa218795 DH |
1763 | return -ENOENT; |
1764 | found: | |
1765 | *movable_pfn = pfn; | |
0c0e6195 KH |
1766 | return 0; |
1767 | } | |
1768 | ||
32cf666e | 1769 | static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) |
0c0e6195 KH |
1770 | { |
1771 | unsigned long pfn; | |
6c357848 | 1772 | struct page *page, *head; |
0c0e6195 | 1773 | LIST_HEAD(source); |
786dee86 LM |
1774 | static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, |
1775 | DEFAULT_RATELIMIT_BURST); | |
0c0e6195 | 1776 | |
a85009c3 | 1777 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
869f7ee6 | 1778 | struct folio *folio; |
f7f9c00d | 1779 | bool isolated; |
869f7ee6 | 1780 | |
0c0e6195 KH |
1781 | if (!pfn_valid(pfn)) |
1782 | continue; | |
1783 | page = pfn_to_page(pfn); | |
869f7ee6 MWO |
1784 | folio = page_folio(page); |
1785 | head = &folio->page; | |
c8721bbb NH |
1786 | |
1787 | if (PageHuge(page)) { | |
d8c6546b | 1788 | pfn = page_to_pfn(head) + compound_nr(head) - 1; |
6aa3a920 | 1789 | isolate_hugetlb(folio, &source); |
c8721bbb | 1790 | continue; |
94723aaf | 1791 | } else if (PageTransHuge(page)) |
6c357848 | 1792 | pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; |
c8721bbb | 1793 | |
b15c8726 MH |
1794 | /* |
1795 | * HWPoison pages have elevated reference counts so the migration would | |
1796 | * fail on them. It also doesn't make any sense to migrate them in the | |
1797 | * first place. Still try to unmap such a page in case it is still mapped | |
1798 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | |
1799 | * the unmap as the catch all safety net). | |
1800 | */ | |
1801 | if (PageHWPoison(page)) { | |
869f7ee6 MWO |
1802 | if (WARN_ON(folio_test_lru(folio))) |
1803 | folio_isolate_lru(folio); | |
1804 | if (folio_mapped(folio)) | |
1805 | try_to_unmap(folio, TTU_IGNORE_MLOCK); | |
b15c8726 MH |
1806 | continue; |
1807 | } | |
1808 | ||
700c2a46 | 1809 | if (!get_page_unless_zero(page)) |
0c0e6195 KH |
1810 | continue; |
1811 | /* | |
0efadf48 YX |
1812 | * We can skip free pages. And we can deal with pages on |
1813 | * LRU and non-lru movable pages. | |
0c0e6195 | 1814 | */ |
cd775580 | 1815 | if (PageLRU(page)) |
f7f9c00d | 1816 | isolated = isolate_lru_page(page); |
cd775580 BW |
1817 | else |
1818 | isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | |
1819 | if (isolated) { | |
62695a84 | 1820 | list_add_tail(&page->lru, &source); |
0efadf48 YX |
1821 | if (!__PageMovable(page)) |
1822 | inc_node_page_state(page, NR_ISOLATED_ANON + | |
9de4f22a | 1823 | page_is_file_lru(page)); |
6d9c285a | 1824 | |
0c0e6195 | 1825 | } else { |
786dee86 LM |
1826 | if (__ratelimit(&migrate_rs)) { |
1827 | pr_warn("failed to isolate pfn %lx\n", pfn); | |
1828 | dump_page(page, "isolation failed"); | |
1829 | } | |
0c0e6195 | 1830 | } |
1723058e | 1831 | put_page(page); |
0c0e6195 | 1832 | } |
f3ab2636 | 1833 | if (!list_empty(&source)) { |
203e6e5c JK |
1834 | nodemask_t nmask = node_states[N_MEMORY]; |
1835 | struct migration_target_control mtc = { | |
1836 | .nmask = &nmask, | |
1837 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, | |
1838 | }; | |
32cf666e | 1839 | int ret; |
203e6e5c JK |
1840 | |
1841 | /* | |
1842 | * We have checked that migration range is on a single zone so | |
1843 | * we can use the nid of the first page to all the others. | |
1844 | */ | |
1845 | mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); | |
1846 | ||
1847 | /* | |
1848 | * try to allocate from a different node but reuse this node | |
1849 | * if there are no other online nodes to be used (e.g. we are | |
1850 | * offlining a part of the only existing node) | |
1851 | */ | |
1852 | node_clear(mtc.nid, nmask); | |
1853 | if (nodes_empty(nmask)) | |
1854 | node_set(mtc.nid, nmask); | |
1855 | ret = migrate_pages(&source, alloc_migration_target, NULL, | |
5ac95884 | 1856 | (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL); |
2932c8b0 MH |
1857 | if (ret) { |
1858 | list_for_each_entry(page, &source, lru) { | |
786dee86 LM |
1859 | if (__ratelimit(&migrate_rs)) { |
1860 | pr_warn("migrating pfn %lx failed ret:%d\n", | |
1861 | page_to_pfn(page), ret); | |
1862 | dump_page(page, "migration failure"); | |
1863 | } | |
2932c8b0 | 1864 | } |
c8721bbb | 1865 | putback_movable_pages(&source); |
2932c8b0 | 1866 | } |
0c0e6195 | 1867 | } |
0c0e6195 KH |
1868 | } |
1869 | ||
c5320926 TC |
1870 | static int __init cmdline_parse_movable_node(char *p) |
1871 | { | |
55ac590c | 1872 | movable_node_enabled = true; |
c5320926 TC |
1873 | return 0; |
1874 | } | |
1875 | early_param("movable_node", cmdline_parse_movable_node); | |
1876 | ||
d9713679 LJ |
1877 | /* check which state of node_states will be changed when offline memory */ |
1878 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1879 | struct zone *zone, struct memory_notify *arg) | |
1880 | { | |
1881 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1882 | unsigned long present_pages = 0; | |
86b27bea | 1883 | enum zone_type zt; |
d9713679 | 1884 | |
98fa15f3 AK |
1885 | arg->status_change_nid = NUMA_NO_NODE; |
1886 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
d9713679 LJ |
1887 | |
1888 | /* | |
86b27bea OS |
1889 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. |
1890 | * If the memory to be offline is within the range | |
1891 | * [0..ZONE_NORMAL], and it is the last present memory there, | |
1892 | * the zones in that range will become empty after the offlining, | |
1893 | * thus we can determine that we need to clear the node from | |
1894 | * node_states[N_NORMAL_MEMORY]. | |
d9713679 | 1895 | */ |
86b27bea | 1896 | for (zt = 0; zt <= ZONE_NORMAL; zt++) |
d9713679 | 1897 | present_pages += pgdat->node_zones[zt].present_pages; |
86b27bea | 1898 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) |
d9713679 | 1899 | arg->status_change_nid_normal = zone_to_nid(zone); |
d9713679 LJ |
1900 | |
1901 | /* | |
6b740c6c DH |
1902 | * We have accounted the pages from [0..ZONE_NORMAL); ZONE_HIGHMEM |
1903 | * does not apply as we don't support 32bit. | |
86b27bea OS |
1904 | * Here we count the possible pages from ZONE_MOVABLE. |
1905 | * If after having accounted all the pages, we see that the nr_pages | |
1906 | * to be offlined is over or equal to the accounted pages, | |
1907 | * we know that the node will become empty, and so, we can clear | |
1908 | * it for N_MEMORY as well. | |
d9713679 | 1909 | */ |
86b27bea | 1910 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; |
d9713679 | 1911 | |
d9713679 LJ |
1912 | if (nr_pages >= present_pages) |
1913 | arg->status_change_nid = zone_to_nid(zone); | |
d9713679 LJ |
1914 | } |
1915 | ||
1916 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1917 | { | |
1918 | if (arg->status_change_nid_normal >= 0) | |
1919 | node_clear_state(node, N_NORMAL_MEMORY); | |
1920 | ||
cf01f6f5 | 1921 | if (arg->status_change_nid >= 0) |
6715ddf9 | 1922 | node_clear_state(node, N_MEMORY); |
d9713679 LJ |
1923 | } |
1924 | ||
c5e79ef5 DH |
1925 | static int count_system_ram_pages_cb(unsigned long start_pfn, |
1926 | unsigned long nr_pages, void *data) | |
1927 | { | |
1928 | unsigned long *nr_system_ram_pages = data; | |
1929 | ||
1930 | *nr_system_ram_pages += nr_pages; | |
1931 | return 0; | |
1932 | } | |
1933 | ||
001002e7 SK |
1934 | /* |
1935 | * Must be called with mem_hotplug_lock in write mode. | |
1936 | */ | |
836809ec | 1937 | int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, |
395f6081 | 1938 | struct zone *zone, struct memory_group *group) |
0c0e6195 | 1939 | { |
73a11c96 | 1940 | const unsigned long end_pfn = start_pfn + nr_pages; |
0a1a9a00 | 1941 | unsigned long pfn, system_ram_pages = 0; |
395f6081 | 1942 | const int node = zone_to_nid(zone); |
d702909f | 1943 | unsigned long flags; |
7b78d335 | 1944 | struct memory_notify arg; |
79605093 | 1945 | char *reason; |
395f6081 | 1946 | int ret; |
0c0e6195 | 1947 | |
dd8e2f23 OS |
1948 | /* |
1949 | * {on,off}lining is constrained to full memory sections (or more | |
041711ce | 1950 | * precisely to memory blocks from the user space POV). |
dd8e2f23 OS |
1951 | * memmap_on_memory is an exception because it reserves initial part |
1952 | * of the physical memory space for vmemmaps. That space is pageblock | |
1953 | * aligned. | |
1954 | */ | |
ee0913c4 | 1955 | if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) || |
dd8e2f23 | 1956 | !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) |
4986fac1 DH |
1957 | return -EINVAL; |
1958 | ||
c5e79ef5 DH |
1959 | /* |
1960 | * Don't allow to offline memory blocks that contain holes. | |
1961 | * Consequently, memory blocks with holes can never get onlined | |
1962 | * via the hotplug path - online_pages() - as hotplugged memory has | |
1963 | * no holes. This way, we e.g., don't have to worry about marking | |
1964 | * memory holes PG_reserved, don't need pfn_valid() checks, and can | |
1965 | * avoid using walk_system_ram_range() later. | |
1966 | */ | |
73a11c96 | 1967 | walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, |
c5e79ef5 | 1968 | count_system_ram_pages_cb); |
73a11c96 | 1969 | if (system_ram_pages != nr_pages) { |
c5e79ef5 DH |
1970 | ret = -EINVAL; |
1971 | reason = "memory holes"; | |
1972 | goto failed_removal; | |
1973 | } | |
1974 | ||
395f6081 DH |
1975 | /* |
1976 | * We only support offlining of memory blocks managed by a single zone, | |
1977 | * checked by calling code. This is just a sanity check that we might | |
1978 | * want to remove in the future. | |
1979 | */ | |
1980 | if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone || | |
1981 | page_zone(pfn_to_page(end_pfn - 1)) != zone)) { | |
79605093 MH |
1982 | ret = -EINVAL; |
1983 | reason = "multizone range"; | |
1984 | goto failed_removal; | |
381eab4a | 1985 | } |
7b78d335 | 1986 | |
ec6e8c7e VB |
1987 | /* |
1988 | * Disable pcplists so that page isolation cannot race with freeing | |
1989 | * in a way that pages from isolated pageblock are left on pcplists. | |
1990 | */ | |
1991 | zone_pcp_disable(zone); | |
d479960e | 1992 | lru_cache_disable(); |
ec6e8c7e | 1993 | |
0c0e6195 | 1994 | /* set above range as isolated */ |
b023f468 | 1995 | ret = start_isolate_page_range(start_pfn, end_pfn, |
d381c547 | 1996 | MIGRATE_MOVABLE, |
b2c9e2fb ZY |
1997 | MEMORY_OFFLINE | REPORT_FAILURE, |
1998 | GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL); | |
3fa0c7c7 | 1999 | if (ret) { |
79605093 | 2000 | reason = "failure to isolate range"; |
ec6e8c7e | 2001 | goto failed_removal_pcplists_disabled; |
381eab4a | 2002 | } |
7b78d335 YG |
2003 | |
2004 | arg.start_pfn = start_pfn; | |
2005 | arg.nr_pages = nr_pages; | |
d9713679 | 2006 | node_states_check_changes_offline(nr_pages, zone, &arg); |
7b78d335 YG |
2007 | |
2008 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
2009 | ret = notifier_to_errno(ret); | |
79605093 MH |
2010 | if (ret) { |
2011 | reason = "notifier failure"; | |
2012 | goto failed_removal_isolated; | |
2013 | } | |
7b78d335 | 2014 | |
bb8965bd | 2015 | do { |
aa218795 DH |
2016 | pfn = start_pfn; |
2017 | do { | |
de7cb03d DH |
2018 | /* |
2019 | * Historically we always checked for any signal and | |
2020 | * can't limit it to fatal signals without eventually | |
2021 | * breaking user space. | |
2022 | */ | |
bb8965bd MH |
2023 | if (signal_pending(current)) { |
2024 | ret = -EINTR; | |
2025 | reason = "signal backoff"; | |
2026 | goto failed_removal_isolated; | |
2027 | } | |
72b39cfc | 2028 | |
bb8965bd | 2029 | cond_resched(); |
bb8965bd | 2030 | |
aa218795 DH |
2031 | ret = scan_movable_pages(pfn, end_pfn, &pfn); |
2032 | if (!ret) { | |
bb8965bd MH |
2033 | /* |
2034 | * TODO: fatal migration failures should bail | |
2035 | * out | |
2036 | */ | |
2037 | do_migrate_range(pfn, end_pfn); | |
2038 | } | |
aa218795 DH |
2039 | } while (!ret); |
2040 | ||
2041 | if (ret != -ENOENT) { | |
2042 | reason = "unmovable page"; | |
2043 | goto failed_removal_isolated; | |
bb8965bd | 2044 | } |
0c0e6195 | 2045 | |
bb8965bd MH |
2046 | /* |
2047 | * Dissolve free hugepages in the memory block before doing | |
2048 | * offlining actually in order to make hugetlbfs's object | |
2049 | * counting consistent. | |
2050 | */ | |
2051 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | |
2052 | if (ret) { | |
2053 | reason = "failure to dissolve huge pages"; | |
2054 | goto failed_removal_isolated; | |
2055 | } | |
0a1a9a00 | 2056 | |
0a1a9a00 | 2057 | ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); |
ec6e8c7e | 2058 | |
5557c766 | 2059 | } while (ret); |
72b39cfc | 2060 | |
0a1a9a00 DH |
2061 | /* Mark all sections offline and remove free pages from the buddy. */ |
2062 | __offline_isolated_pages(start_pfn, end_pfn); | |
7c33023a | 2063 | pr_debug("Offlined Pages %ld\n", nr_pages); |
0a1a9a00 | 2064 | |
9b7ea46a | 2065 | /* |
b30c5927 DH |
2066 | * The memory sections are marked offline, and the pageblock flags |
2067 | * effectively stale; nobody should be touching them. Fixup the number | |
2068 | * of isolated pageblocks, memory onlining will properly revert this. | |
9b7ea46a QC |
2069 | */ |
2070 | spin_lock_irqsave(&zone->lock, flags); | |
ea15153c | 2071 | zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; |
9b7ea46a QC |
2072 | spin_unlock_irqrestore(&zone->lock, flags); |
2073 | ||
d479960e | 2074 | lru_cache_enable(); |
ec6e8c7e VB |
2075 | zone_pcp_enable(zone); |
2076 | ||
0c0e6195 | 2077 | /* removal success */ |
0a1a9a00 | 2078 | adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); |
836809ec | 2079 | adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages); |
7b78d335 | 2080 | |
b92ca18e | 2081 | /* reinitialise watermarks and update pcp limits */ |
1b79acc9 KM |
2082 | init_per_zone_wmark_min(); |
2083 | ||
b7812c86 QZ |
2084 | /* |
2085 | * Make sure to mark the node as memory-less before rebuilding the zone | |
2086 | * list. Otherwise this node would still appear in the fallback lists. | |
2087 | */ | |
2088 | node_states_clear_node(node, &arg); | |
1e8537ba | 2089 | if (!populated_zone(zone)) { |
340175b7 | 2090 | zone_pcp_reset(zone); |
72675e13 | 2091 | build_all_zonelists(NULL); |
b92ca18e | 2092 | } |
340175b7 | 2093 | |
698b1b30 | 2094 | if (arg.status_change_nid >= 0) { |
698b1b30 | 2095 | kcompactd_stop(node); |
b4a0215e | 2096 | kswapd_stop(node); |
698b1b30 | 2097 | } |
bce7394a | 2098 | |
0c0e6195 | 2099 | writeback_set_ratelimit(); |
7b78d335 YG |
2100 | |
2101 | memory_notify(MEM_OFFLINE, &arg); | |
feee6b29 | 2102 | remove_pfn_range_from_zone(zone, start_pfn, nr_pages); |
0c0e6195 KH |
2103 | return 0; |
2104 | ||
79605093 | 2105 | failed_removal_isolated: |
36ba30bc | 2106 | /* pushback to free area */ |
79605093 | 2107 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); |
c4efe484 | 2108 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
ec6e8c7e | 2109 | failed_removal_pcplists_disabled: |
946746d1 | 2110 | lru_cache_enable(); |
ec6e8c7e | 2111 | zone_pcp_enable(zone); |
0c0e6195 | 2112 | failed_removal: |
79605093 | 2113 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", |
e33e33b4 | 2114 | (unsigned long long) start_pfn << PAGE_SHIFT, |
79605093 MH |
2115 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
2116 | reason); | |
0c0e6195 KH |
2117 | return ret; |
2118 | } | |
71088785 | 2119 | |
d6de9d53 | 2120 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) |
bbc76be6 | 2121 | { |
e1c158e4 | 2122 | int *nid = arg; |
bbc76be6 | 2123 | |
e1c158e4 | 2124 | *nid = mem->nid; |
639118d1 | 2125 | if (unlikely(mem->state != MEM_OFFLINE)) { |
349daa0f RD |
2126 | phys_addr_t beginpa, endpa; |
2127 | ||
2128 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); | |
b6c88d3b | 2129 | endpa = beginpa + memory_block_size_bytes() - 1; |
756a025f | 2130 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", |
349daa0f | 2131 | &beginpa, &endpa); |
bbc76be6 | 2132 | |
eca499ab PT |
2133 | return -EBUSY; |
2134 | } | |
2135 | return 0; | |
bbc76be6 WC |
2136 | } |
2137 | ||
6b8f0798 | 2138 | static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg) |
a08a2ae3 | 2139 | { |
6b8f0798 VV |
2140 | u64 *num_altmaps = (u64 *)arg; |
2141 | ||
2142 | if (mem->altmap) | |
2143 | *num_altmaps += 1; | |
2144 | ||
1a8c64e1 | 2145 | return 0; |
a08a2ae3 OS |
2146 | } |
2147 | ||
b27340a5 | 2148 | static int check_cpu_on_node(int nid) |
60a5a19e | 2149 | { |
60a5a19e TC |
2150 | int cpu; |
2151 | ||
2152 | for_each_present_cpu(cpu) { | |
b27340a5 | 2153 | if (cpu_to_node(cpu) == nid) |
60a5a19e TC |
2154 | /* |
2155 | * the cpu on this node isn't removed, and we can't | |
2156 | * offline this node. | |
2157 | */ | |
2158 | return -EBUSY; | |
2159 | } | |
2160 | ||
2161 | return 0; | |
2162 | } | |
2163 | ||
2c91f8fc DH |
2164 | static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) |
2165 | { | |
2166 | int nid = *(int *)arg; | |
2167 | ||
2168 | /* | |
2169 | * If a memory block belongs to multiple nodes, the stored nid is not | |
2170 | * reliable. However, such blocks are always online (e.g., cannot get | |
2171 | * offlined) and, therefore, are still spanned by the node. | |
2172 | */ | |
2173 | return mem->nid == nid ? -EEXIST : 0; | |
2174 | } | |
2175 | ||
0f1cfe9d TK |
2176 | /** |
2177 | * try_offline_node | |
e8b098fc | 2178 | * @nid: the node ID |
0f1cfe9d TK |
2179 | * |
2180 | * Offline a node if all memory sections and cpus of the node are removed. | |
2181 | * | |
2182 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
2183 | * and online/offline operations before this call. | |
2184 | */ | |
90b30cdc | 2185 | void try_offline_node(int nid) |
60a5a19e | 2186 | { |
2c91f8fc | 2187 | int rc; |
60a5a19e | 2188 | |
2c91f8fc DH |
2189 | /* |
2190 | * If the node still spans pages (especially ZONE_DEVICE), don't | |
2191 | * offline it. A node spans memory after move_pfn_range_to_zone(), | |
2192 | * e.g., after the memory block was onlined. | |
2193 | */ | |
b27340a5 | 2194 | if (node_spanned_pages(nid)) |
2c91f8fc | 2195 | return; |
60a5a19e | 2196 | |
2c91f8fc DH |
2197 | /* |
2198 | * Especially offline memory blocks might not be spanned by the | |
2199 | * node. They will get spanned by the node once they get onlined. | |
2200 | * However, they link to the node in sysfs and can get onlined later. | |
2201 | */ | |
2202 | rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb); | |
2203 | if (rc) | |
60a5a19e | 2204 | return; |
60a5a19e | 2205 | |
b27340a5 | 2206 | if (check_cpu_on_node(nid)) |
60a5a19e TC |
2207 | return; |
2208 | ||
2209 | /* | |
2210 | * all memory/cpu of this node are removed, we can offline this | |
2211 | * node now. | |
2212 | */ | |
2213 | node_set_offline(nid); | |
2214 | unregister_one_node(nid); | |
2215 | } | |
90b30cdc | 2216 | EXPORT_SYMBOL(try_offline_node); |
60a5a19e | 2217 | |
6b8f0798 VV |
2218 | static int memory_blocks_have_altmaps(u64 start, u64 size) |
2219 | { | |
2220 | u64 num_memblocks = size / memory_block_size_bytes(); | |
2221 | u64 num_altmaps = 0; | |
2222 | ||
2223 | if (!mhp_memmap_on_memory()) | |
2224 | return 0; | |
2225 | ||
2226 | walk_memory_blocks(start, size, &num_altmaps, | |
2227 | count_memory_range_altmaps_cb); | |
2228 | ||
2229 | if (num_altmaps == 0) | |
2230 | return 0; | |
2231 | ||
2232 | if (WARN_ON_ONCE(num_memblocks != num_altmaps)) | |
2233 | return -EINVAL; | |
2234 | ||
2235 | return 1; | |
2236 | } | |
2237 | ||
e1c158e4 | 2238 | static int __ref try_remove_memory(u64 start, u64 size) |
bbc76be6 | 2239 | { |
6b8f0798 | 2240 | int rc, nid = NUMA_NO_NODE; |
993c1aad | 2241 | |
27356f54 TK |
2242 | BUG_ON(check_hotplug_memory_range(start, size)); |
2243 | ||
6677e3ea | 2244 | /* |
242831eb | 2245 | * All memory blocks must be offlined before removing memory. Check |
eca499ab | 2246 | * whether all memory blocks in question are offline and return error |
242831eb | 2247 | * if this is not the case. |
e1c158e4 DH |
2248 | * |
2249 | * While at it, determine the nid. Note that if we'd have mixed nodes, | |
2250 | * we'd only try to offline the last determined one -- which is good | |
2251 | * enough for the cases we care about. | |
6677e3ea | 2252 | */ |
e1c158e4 | 2253 | rc = walk_memory_blocks(start, size, &nid, check_memblock_offlined_cb); |
eca499ab | 2254 | if (rc) |
b4223a51 | 2255 | return rc; |
6677e3ea | 2256 | |
46c66c4b YI |
2257 | /* remove memmap entry */ |
2258 | firmware_map_remove(start, start + size, "System RAM"); | |
4c4b7f9b | 2259 | |
f1037ec0 DW |
2260 | mem_hotplug_begin(); |
2261 | ||
6b8f0798 VV |
2262 | rc = memory_blocks_have_altmaps(start, size); |
2263 | if (rc < 0) { | |
2264 | mem_hotplug_done(); | |
2265 | return rc; | |
2266 | } else if (!rc) { | |
2267 | /* | |
2268 | * Memory block device removal under the device_hotplug_lock is | |
2269 | * a barrier against racing online attempts. | |
2270 | * No altmaps present, do the removal directly | |
2271 | */ | |
2272 | remove_memory_block_devices(start, size); | |
2273 | arch_remove_memory(start, size, NULL); | |
2274 | } else { | |
2275 | /* all memblocks in the range have altmaps */ | |
2276 | remove_memory_blocks_and_altmaps(start, size); | |
1a8c64e1 AK |
2277 | } |
2278 | ||
52219aea | 2279 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { |
3ecc6834 | 2280 | memblock_phys_free(start, size); |
52219aea DH |
2281 | memblock_remove(start, size); |
2282 | } | |
2283 | ||
cb8e3c8b | 2284 | release_mem_region_adjustable(start, size); |
24d335ca | 2285 | |
e1c158e4 DH |
2286 | if (nid != NUMA_NO_NODE) |
2287 | try_offline_node(nid); | |
60a5a19e | 2288 | |
bfc8c901 | 2289 | mem_hotplug_done(); |
b4223a51 | 2290 | return 0; |
71088785 | 2291 | } |
d15e5926 | 2292 | |
eca499ab | 2293 | /** |
5640c9ca | 2294 | * __remove_memory - Remove memory if every memory block is offline |
eca499ab PT |
2295 | * @start: physical address of the region to remove |
2296 | * @size: size of the region to remove | |
2297 | * | |
2298 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
2299 | * and online/offline operations before this call, as required by | |
2300 | * try_offline_node(). | |
2301 | */ | |
e1c158e4 | 2302 | void __remove_memory(u64 start, u64 size) |
eca499ab PT |
2303 | { |
2304 | ||
2305 | /* | |
29a90db9 | 2306 | * trigger BUG() if some memory is not offlined prior to calling this |
eca499ab PT |
2307 | * function |
2308 | */ | |
e1c158e4 | 2309 | if (try_remove_memory(start, size)) |
eca499ab PT |
2310 | BUG(); |
2311 | } | |
2312 | ||
2313 | /* | |
2314 | * Remove memory if every memory block is offline, otherwise return -EBUSY is | |
2315 | * some memory is not offline | |
2316 | */ | |
e1c158e4 | 2317 | int remove_memory(u64 start, u64 size) |
d15e5926 | 2318 | { |
eca499ab PT |
2319 | int rc; |
2320 | ||
d15e5926 | 2321 | lock_device_hotplug(); |
e1c158e4 | 2322 | rc = try_remove_memory(start, size); |
d15e5926 | 2323 | unlock_device_hotplug(); |
eca499ab PT |
2324 | |
2325 | return rc; | |
d15e5926 | 2326 | } |
71088785 | 2327 | EXPORT_SYMBOL_GPL(remove_memory); |
08b3acd7 | 2328 | |
8dc4bb58 DH |
2329 | static int try_offline_memory_block(struct memory_block *mem, void *arg) |
2330 | { | |
2331 | uint8_t online_type = MMOP_ONLINE_KERNEL; | |
2332 | uint8_t **online_types = arg; | |
2333 | struct page *page; | |
2334 | int rc; | |
2335 | ||
2336 | /* | |
2337 | * Sense the online_type via the zone of the memory block. Offlining | |
2338 | * with multiple zones within one memory block will be rejected | |
2339 | * by offlining code ... so we don't care about that. | |
2340 | */ | |
2341 | page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr)); | |
2342 | if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) | |
2343 | online_type = MMOP_ONLINE_MOVABLE; | |
2344 | ||
2345 | rc = device_offline(&mem->dev); | |
2346 | /* | |
2347 | * Default is MMOP_OFFLINE - change it only if offlining succeeded, | |
2348 | * so try_reonline_memory_block() can do the right thing. | |
2349 | */ | |
2350 | if (!rc) | |
2351 | **online_types = online_type; | |
2352 | ||
2353 | (*online_types)++; | |
2354 | /* Ignore if already offline. */ | |
2355 | return rc < 0 ? rc : 0; | |
2356 | } | |
2357 | ||
2358 | static int try_reonline_memory_block(struct memory_block *mem, void *arg) | |
2359 | { | |
2360 | uint8_t **online_types = arg; | |
2361 | int rc; | |
2362 | ||
2363 | if (**online_types != MMOP_OFFLINE) { | |
2364 | mem->online_type = **online_types; | |
2365 | rc = device_online(&mem->dev); | |
2366 | if (rc < 0) | |
2367 | pr_warn("%s: Failed to re-online memory: %d", | |
2368 | __func__, rc); | |
2369 | } | |
2370 | ||
2371 | /* Continue processing all remaining memory blocks. */ | |
2372 | (*online_types)++; | |
2373 | return 0; | |
2374 | } | |
2375 | ||
08b3acd7 | 2376 | /* |
8dc4bb58 DH |
2377 | * Try to offline and remove memory. Might take a long time to finish in case |
2378 | * memory is still in use. Primarily useful for memory devices that logically | |
2379 | * unplugged all memory (so it's no longer in use) and want to offline + remove | |
2380 | * that memory. | |
08b3acd7 | 2381 | */ |
e1c158e4 | 2382 | int offline_and_remove_memory(u64 start, u64 size) |
08b3acd7 | 2383 | { |
8dc4bb58 DH |
2384 | const unsigned long mb_count = size / memory_block_size_bytes(); |
2385 | uint8_t *online_types, *tmp; | |
2386 | int rc; | |
08b3acd7 DH |
2387 | |
2388 | if (!IS_ALIGNED(start, memory_block_size_bytes()) || | |
8dc4bb58 DH |
2389 | !IS_ALIGNED(size, memory_block_size_bytes()) || !size) |
2390 | return -EINVAL; | |
2391 | ||
2392 | /* | |
2393 | * We'll remember the old online type of each memory block, so we can | |
2394 | * try to revert whatever we did when offlining one memory block fails | |
2395 | * after offlining some others succeeded. | |
2396 | */ | |
2397 | online_types = kmalloc_array(mb_count, sizeof(*online_types), | |
2398 | GFP_KERNEL); | |
2399 | if (!online_types) | |
2400 | return -ENOMEM; | |
2401 | /* | |
2402 | * Initialize all states to MMOP_OFFLINE, so when we abort processing in | |
2403 | * try_offline_memory_block(), we'll skip all unprocessed blocks in | |
2404 | * try_reonline_memory_block(). | |
2405 | */ | |
2406 | memset(online_types, MMOP_OFFLINE, mb_count); | |
08b3acd7 DH |
2407 | |
2408 | lock_device_hotplug(); | |
8dc4bb58 DH |
2409 | |
2410 | tmp = online_types; | |
2411 | rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); | |
08b3acd7 DH |
2412 | |
2413 | /* | |
8dc4bb58 | 2414 | * In case we succeeded to offline all memory, remove it. |
08b3acd7 DH |
2415 | * This cannot fail as it cannot get onlined in the meantime. |
2416 | */ | |
2417 | if (!rc) { | |
e1c158e4 | 2418 | rc = try_remove_memory(start, size); |
8dc4bb58 DH |
2419 | if (rc) |
2420 | pr_err("%s: Failed to remove memory: %d", __func__, rc); | |
2421 | } | |
2422 | ||
2423 | /* | |
2424 | * Rollback what we did. While memory onlining might theoretically fail | |
2425 | * (nacked by a notifier), it barely ever happens. | |
2426 | */ | |
2427 | if (rc) { | |
2428 | tmp = online_types; | |
2429 | walk_memory_blocks(start, size, &tmp, | |
2430 | try_reonline_memory_block); | |
08b3acd7 DH |
2431 | } |
2432 | unlock_device_hotplug(); | |
2433 | ||
8dc4bb58 | 2434 | kfree(online_types); |
08b3acd7 DH |
2435 | return rc; |
2436 | } | |
2437 | EXPORT_SYMBOL_GPL(offline_and_remove_memory); | |
aba6efc4 | 2438 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |