Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3947be19 DH |
2 | /* |
3 | * linux/mm/memory_hotplug.c | |
4 | * | |
5 | * Copyright (C) | |
6 | */ | |
7 | ||
3947be19 DH |
8 | #include <linux/stddef.h> |
9 | #include <linux/mm.h> | |
174cd4b1 | 10 | #include <linux/sched/signal.h> |
3947be19 DH |
11 | #include <linux/swap.h> |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/pagemap.h> | |
3947be19 | 14 | #include <linux/compiler.h> |
b95f1b31 | 15 | #include <linux/export.h> |
3947be19 | 16 | #include <linux/pagevec.h> |
2d1d43f6 | 17 | #include <linux/writeback.h> |
3947be19 DH |
18 | #include <linux/slab.h> |
19 | #include <linux/sysctl.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/memory.h> | |
4b94ffdc | 22 | #include <linux/memremap.h> |
3947be19 DH |
23 | #include <linux/memory_hotplug.h> |
24 | #include <linux/highmem.h> | |
25 | #include <linux/vmalloc.h> | |
0a547039 | 26 | #include <linux/ioport.h> |
0c0e6195 KH |
27 | #include <linux/delay.h> |
28 | #include <linux/migrate.h> | |
29 | #include <linux/page-isolation.h> | |
71088785 | 30 | #include <linux/pfn.h> |
6ad696d2 | 31 | #include <linux/suspend.h> |
6d9c285a | 32 | #include <linux/mm_inline.h> |
d96ae530 | 33 | #include <linux/firmware-map.h> |
60a5a19e | 34 | #include <linux/stop_machine.h> |
c8721bbb | 35 | #include <linux/hugetlb.h> |
c5320926 | 36 | #include <linux/memblock.h> |
698b1b30 | 37 | #include <linux/compaction.h> |
b15c8726 | 38 | #include <linux/rmap.h> |
3947be19 DH |
39 | |
40 | #include <asm/tlbflush.h> | |
41 | ||
1e5ad9a3 | 42 | #include "internal.h" |
e900a918 | 43 | #include "shuffle.h" |
1e5ad9a3 | 44 | |
e3a9d9fc OS |
45 | |
46 | /* | |
47 | * memory_hotplug.memmap_on_memory parameter | |
48 | */ | |
49 | static bool memmap_on_memory __ro_after_init; | |
50 | #ifdef CONFIG_MHP_MEMMAP_ON_MEMORY | |
51 | module_param(memmap_on_memory, bool, 0444); | |
52 | MODULE_PARM_DESC(memmap_on_memory, "Enable memmap on memory for memory hotplug"); | |
53 | #endif | |
a08a2ae3 | 54 | |
9d0ad8ca DK |
55 | /* |
56 | * online_page_callback contains pointer to current page onlining function. | |
57 | * Initially it is generic_online_page(). If it is required it could be | |
58 | * changed by calling set_online_page_callback() for callback registration | |
59 | * and restore_online_page_callback() for generic callback restore. | |
60 | */ | |
61 | ||
9d0ad8ca | 62 | static online_page_callback_t online_page_callback = generic_online_page; |
bfc8c901 | 63 | static DEFINE_MUTEX(online_page_callback_lock); |
9d0ad8ca | 64 | |
3f906ba2 | 65 | DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); |
bfc8c901 | 66 | |
3f906ba2 TG |
67 | void get_online_mems(void) |
68 | { | |
69 | percpu_down_read(&mem_hotplug_lock); | |
70 | } | |
bfc8c901 | 71 | |
3f906ba2 TG |
72 | void put_online_mems(void) |
73 | { | |
74 | percpu_up_read(&mem_hotplug_lock); | |
75 | } | |
bfc8c901 | 76 | |
4932381e MH |
77 | bool movable_node_enabled = false; |
78 | ||
8604d9e5 | 79 | #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE |
1adf8b46 | 80 | int mhp_default_online_type = MMOP_OFFLINE; |
8604d9e5 | 81 | #else |
1adf8b46 | 82 | int mhp_default_online_type = MMOP_ONLINE; |
8604d9e5 | 83 | #endif |
31bc3858 | 84 | |
86dd995d VK |
85 | static int __init setup_memhp_default_state(char *str) |
86 | { | |
1adf8b46 | 87 | const int online_type = mhp_online_type_from_str(str); |
5f47adf7 DH |
88 | |
89 | if (online_type >= 0) | |
1adf8b46 | 90 | mhp_default_online_type = online_type; |
86dd995d VK |
91 | |
92 | return 1; | |
93 | } | |
94 | __setup("memhp_default_state=", setup_memhp_default_state); | |
95 | ||
30467e0b | 96 | void mem_hotplug_begin(void) |
20d6c96b | 97 | { |
3f906ba2 TG |
98 | cpus_read_lock(); |
99 | percpu_down_write(&mem_hotplug_lock); | |
20d6c96b KM |
100 | } |
101 | ||
30467e0b | 102 | void mem_hotplug_done(void) |
bfc8c901 | 103 | { |
3f906ba2 TG |
104 | percpu_up_write(&mem_hotplug_lock); |
105 | cpus_read_unlock(); | |
bfc8c901 | 106 | } |
20d6c96b | 107 | |
357b4da5 JG |
108 | u64 max_mem_size = U64_MAX; |
109 | ||
45e0b78b | 110 | /* add this memory to iomem resource */ |
7b7b2721 DH |
111 | static struct resource *register_memory_resource(u64 start, u64 size, |
112 | const char *resource_name) | |
45e0b78b | 113 | { |
2794129e DH |
114 | struct resource *res; |
115 | unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | |
7b7b2721 DH |
116 | |
117 | if (strcmp(resource_name, "System RAM")) | |
7cf603d1 | 118 | flags |= IORESOURCE_SYSRAM_DRIVER_MANAGED; |
357b4da5 | 119 | |
bca3feaa AK |
120 | if (!mhp_range_allowed(start, size, true)) |
121 | return ERR_PTR(-E2BIG); | |
122 | ||
f3cd4c86 BH |
123 | /* |
124 | * Make sure value parsed from 'mem=' only restricts memory adding | |
125 | * while booting, so that memory hotplug won't be impacted. Please | |
126 | * refer to document of 'mem=' in kernel-parameters.txt for more | |
127 | * details. | |
128 | */ | |
129 | if (start + size > max_mem_size && system_state < SYSTEM_RUNNING) | |
357b4da5 JG |
130 | return ERR_PTR(-E2BIG); |
131 | ||
2794129e DH |
132 | /* |
133 | * Request ownership of the new memory range. This might be | |
134 | * a child of an existing resource that was present but | |
135 | * not marked as busy. | |
136 | */ | |
137 | res = __request_region(&iomem_resource, start, size, | |
138 | resource_name, flags); | |
139 | ||
140 | if (!res) { | |
141 | pr_debug("Unable to reserve System RAM region: %016llx->%016llx\n", | |
142 | start, start + size); | |
6f754ba4 | 143 | return ERR_PTR(-EEXIST); |
45e0b78b KM |
144 | } |
145 | return res; | |
146 | } | |
147 | ||
148 | static void release_memory_resource(struct resource *res) | |
149 | { | |
150 | if (!res) | |
151 | return; | |
152 | release_resource(res); | |
153 | kfree(res); | |
45e0b78b KM |
154 | } |
155 | ||
53947027 | 156 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
7ea62160 DW |
157 | static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, |
158 | const char *reason) | |
159 | { | |
160 | /* | |
161 | * Disallow all operations smaller than a sub-section and only | |
162 | * allow operations smaller than a section for | |
163 | * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range() | |
164 | * enforces a larger memory_block_size_bytes() granularity for | |
165 | * memory that will be marked online, so this check should only | |
166 | * fire for direct arch_{add,remove}_memory() users outside of | |
167 | * add_memory_resource(). | |
168 | */ | |
169 | unsigned long min_align; | |
170 | ||
171 | if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) | |
172 | min_align = PAGES_PER_SUBSECTION; | |
173 | else | |
174 | min_align = PAGES_PER_SECTION; | |
175 | if (!IS_ALIGNED(pfn, min_align) | |
176 | || !IS_ALIGNED(nr_pages, min_align)) { | |
177 | WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n", | |
178 | reason, pfn, pfn + nr_pages - 1); | |
179 | return -EINVAL; | |
180 | } | |
181 | return 0; | |
182 | } | |
183 | ||
9f605f26 DW |
184 | /* |
185 | * Return page for the valid pfn only if the page is online. All pfn | |
186 | * walkers which rely on the fully initialized page->flags and others | |
187 | * should use this rather than pfn_valid && pfn_to_page | |
188 | */ | |
189 | struct page *pfn_to_online_page(unsigned long pfn) | |
190 | { | |
191 | unsigned long nr = pfn_to_section_nr(pfn); | |
1f90a347 | 192 | struct dev_pagemap *pgmap; |
9f9b02e5 DW |
193 | struct mem_section *ms; |
194 | ||
195 | if (nr >= NR_MEM_SECTIONS) | |
196 | return NULL; | |
197 | ||
198 | ms = __nr_to_section(nr); | |
199 | if (!online_section(ms)) | |
200 | return NULL; | |
201 | ||
202 | /* | |
203 | * Save some code text when online_section() + | |
204 | * pfn_section_valid() are sufficient. | |
205 | */ | |
206 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) | |
207 | return NULL; | |
208 | ||
209 | if (!pfn_section_valid(ms, pfn)) | |
210 | return NULL; | |
9f605f26 | 211 | |
1f90a347 DW |
212 | if (!online_device_section(ms)) |
213 | return pfn_to_page(pfn); | |
214 | ||
215 | /* | |
216 | * Slowpath: when ZONE_DEVICE collides with | |
217 | * ZONE_{NORMAL,MOVABLE} within the same section some pfns in | |
218 | * the section may be 'offline' but 'valid'. Only | |
219 | * get_dev_pagemap() can determine sub-section online status. | |
220 | */ | |
221 | pgmap = get_dev_pagemap(pfn, NULL); | |
222 | put_dev_pagemap(pgmap); | |
223 | ||
224 | /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */ | |
225 | if (pgmap) | |
226 | return NULL; | |
227 | ||
9f9b02e5 | 228 | return pfn_to_page(pfn); |
9f605f26 DW |
229 | } |
230 | EXPORT_SYMBOL_GPL(pfn_to_online_page); | |
231 | ||
4edd7cef DR |
232 | /* |
233 | * Reasonably generic function for adding memory. It is | |
234 | * expected that archs that support memory hotplug will | |
235 | * call this function after deciding the zone to which to | |
236 | * add the new pages. | |
237 | */ | |
7ea62160 | 238 | int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, |
f5637d3b | 239 | struct mhp_params *params) |
4edd7cef | 240 | { |
6cdd0b30 DH |
241 | const unsigned long end_pfn = pfn + nr_pages; |
242 | unsigned long cur_nr_pages; | |
9a845030 | 243 | int err; |
f5637d3b | 244 | struct vmem_altmap *altmap = params->altmap; |
4b94ffdc | 245 | |
bfeb022f LG |
246 | if (WARN_ON_ONCE(!params->pgprot.pgprot)) |
247 | return -EINVAL; | |
248 | ||
bca3feaa | 249 | VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); |
dca4436d | 250 | |
4b94ffdc DW |
251 | if (altmap) { |
252 | /* | |
253 | * Validate altmap is within bounds of the total request | |
254 | */ | |
7ea62160 | 255 | if (altmap->base_pfn != pfn |
4b94ffdc DW |
256 | || vmem_altmap_offset(altmap) > nr_pages) { |
257 | pr_warn_once("memory add fail, invalid altmap\n"); | |
7ea62160 | 258 | return -EINVAL; |
4b94ffdc DW |
259 | } |
260 | altmap->alloc = 0; | |
261 | } | |
262 | ||
7ea62160 DW |
263 | err = check_pfn_span(pfn, nr_pages, "add"); |
264 | if (err) | |
265 | return err; | |
266 | ||
6cdd0b30 DH |
267 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
268 | /* Select all remaining pages up to the next section boundary */ | |
269 | cur_nr_pages = min(end_pfn - pfn, | |
270 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
271 | err = sparse_add_section(nid, pfn, cur_nr_pages, altmap); | |
ba72b4c8 DW |
272 | if (err) |
273 | break; | |
f64ac5e6 | 274 | cond_resched(); |
4edd7cef | 275 | } |
c435a390 | 276 | vmemmap_populate_print_last(); |
4edd7cef DR |
277 | return err; |
278 | } | |
4edd7cef | 279 | |
815121d2 | 280 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
d09b0137 | 281 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
282 | unsigned long start_pfn, |
283 | unsigned long end_pfn) | |
284 | { | |
49ba3c6b | 285 | for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { |
7ce700bf | 286 | if (unlikely(!pfn_to_online_page(start_pfn))) |
815121d2 YI |
287 | continue; |
288 | ||
289 | if (unlikely(pfn_to_nid(start_pfn) != nid)) | |
290 | continue; | |
291 | ||
9b05158f | 292 | if (zone != page_zone(pfn_to_page(start_pfn))) |
815121d2 YI |
293 | continue; |
294 | ||
295 | return start_pfn; | |
296 | } | |
297 | ||
298 | return 0; | |
299 | } | |
300 | ||
301 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | |
d09b0137 | 302 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
815121d2 YI |
303 | unsigned long start_pfn, |
304 | unsigned long end_pfn) | |
305 | { | |
815121d2 YI |
306 | unsigned long pfn; |
307 | ||
308 | /* pfn is the end pfn of a memory section. */ | |
309 | pfn = end_pfn - 1; | |
49ba3c6b | 310 | for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { |
7ce700bf | 311 | if (unlikely(!pfn_to_online_page(pfn))) |
815121d2 YI |
312 | continue; |
313 | ||
314 | if (unlikely(pfn_to_nid(pfn) != nid)) | |
315 | continue; | |
316 | ||
9b05158f | 317 | if (zone != page_zone(pfn_to_page(pfn))) |
815121d2 YI |
318 | continue; |
319 | ||
320 | return pfn; | |
321 | } | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
326 | static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, | |
327 | unsigned long end_pfn) | |
328 | { | |
815121d2 | 329 | unsigned long pfn; |
815121d2 YI |
330 | int nid = zone_to_nid(zone); |
331 | ||
5d12071c | 332 | if (zone->zone_start_pfn == start_pfn) { |
815121d2 YI |
333 | /* |
334 | * If the section is smallest section in the zone, it need | |
335 | * shrink zone->zone_start_pfn and zone->zone_spanned_pages. | |
336 | * In this case, we find second smallest valid mem_section | |
337 | * for shrinking zone. | |
338 | */ | |
339 | pfn = find_smallest_section_pfn(nid, zone, end_pfn, | |
5d12071c | 340 | zone_end_pfn(zone)); |
815121d2 | 341 | if (pfn) { |
5d12071c | 342 | zone->spanned_pages = zone_end_pfn(zone) - pfn; |
815121d2 | 343 | zone->zone_start_pfn = pfn; |
950b68d9 DH |
344 | } else { |
345 | zone->zone_start_pfn = 0; | |
346 | zone->spanned_pages = 0; | |
815121d2 | 347 | } |
5d12071c | 348 | } else if (zone_end_pfn(zone) == end_pfn) { |
815121d2 YI |
349 | /* |
350 | * If the section is biggest section in the zone, it need | |
351 | * shrink zone->spanned_pages. | |
352 | * In this case, we find second biggest valid mem_section for | |
353 | * shrinking zone. | |
354 | */ | |
5d12071c | 355 | pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, |
815121d2 YI |
356 | start_pfn); |
357 | if (pfn) | |
5d12071c | 358 | zone->spanned_pages = pfn - zone->zone_start_pfn + 1; |
950b68d9 DH |
359 | else { |
360 | zone->zone_start_pfn = 0; | |
361 | zone->spanned_pages = 0; | |
362 | } | |
815121d2 | 363 | } |
815121d2 YI |
364 | } |
365 | ||
00d6c019 | 366 | static void update_pgdat_span(struct pglist_data *pgdat) |
815121d2 | 367 | { |
00d6c019 DH |
368 | unsigned long node_start_pfn = 0, node_end_pfn = 0; |
369 | struct zone *zone; | |
370 | ||
371 | for (zone = pgdat->node_zones; | |
372 | zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { | |
6c922cf7 | 373 | unsigned long end_pfn = zone_end_pfn(zone); |
00d6c019 DH |
374 | |
375 | /* No need to lock the zones, they can't change. */ | |
656d5711 DH |
376 | if (!zone->spanned_pages) |
377 | continue; | |
378 | if (!node_end_pfn) { | |
379 | node_start_pfn = zone->zone_start_pfn; | |
6c922cf7 | 380 | node_end_pfn = end_pfn; |
656d5711 DH |
381 | continue; |
382 | } | |
383 | ||
6c922cf7 ML |
384 | if (end_pfn > node_end_pfn) |
385 | node_end_pfn = end_pfn; | |
00d6c019 DH |
386 | if (zone->zone_start_pfn < node_start_pfn) |
387 | node_start_pfn = zone->zone_start_pfn; | |
815121d2 YI |
388 | } |
389 | ||
00d6c019 DH |
390 | pgdat->node_start_pfn = node_start_pfn; |
391 | pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; | |
815121d2 YI |
392 | } |
393 | ||
feee6b29 DH |
394 | void __ref remove_pfn_range_from_zone(struct zone *zone, |
395 | unsigned long start_pfn, | |
396 | unsigned long nr_pages) | |
815121d2 | 397 | { |
b7e3debd | 398 | const unsigned long end_pfn = start_pfn + nr_pages; |
815121d2 | 399 | struct pglist_data *pgdat = zone->zone_pgdat; |
27cacaad | 400 | unsigned long pfn, cur_nr_pages; |
815121d2 | 401 | |
d33695b1 | 402 | /* Poison struct pages because they are now uninitialized again. */ |
b7e3debd BW |
403 | for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) { |
404 | cond_resched(); | |
405 | ||
406 | /* Select all remaining pages up to the next section boundary */ | |
407 | cur_nr_pages = | |
408 | min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); | |
409 | page_init_poison(pfn_to_page(pfn), | |
410 | sizeof(struct page) * cur_nr_pages); | |
411 | } | |
d33695b1 | 412 | |
7ce700bf DH |
413 | #ifdef CONFIG_ZONE_DEVICE |
414 | /* | |
415 | * Zone shrinking code cannot properly deal with ZONE_DEVICE. So | |
416 | * we will not try to shrink the zones - which is okay as | |
417 | * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. | |
418 | */ | |
419 | if (zone_idx(zone) == ZONE_DEVICE) | |
420 | return; | |
421 | #endif | |
422 | ||
feee6b29 DH |
423 | clear_zone_contiguous(zone); |
424 | ||
815121d2 | 425 | shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); |
00d6c019 | 426 | update_pgdat_span(pgdat); |
feee6b29 DH |
427 | |
428 | set_zone_contiguous(zone); | |
815121d2 YI |
429 | } |
430 | ||
feee6b29 DH |
431 | static void __remove_section(unsigned long pfn, unsigned long nr_pages, |
432 | unsigned long map_offset, | |
433 | struct vmem_altmap *altmap) | |
ea01ea93 | 434 | { |
10404901 | 435 | struct mem_section *ms = __pfn_to_section(pfn); |
ea01ea93 | 436 | |
9d1d887d DH |
437 | if (WARN_ON_ONCE(!valid_section(ms))) |
438 | return; | |
ea01ea93 | 439 | |
ba72b4c8 | 440 | sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); |
ea01ea93 BP |
441 | } |
442 | ||
ea01ea93 | 443 | /** |
feee6b29 | 444 | * __remove_pages() - remove sections of pages |
7ea62160 | 445 | * @pfn: starting pageframe (must be aligned to start of a section) |
ea01ea93 | 446 | * @nr_pages: number of pages to remove (must be multiple of section size) |
e8b098fc | 447 | * @altmap: alternative device page map or %NULL if default memmap is used |
ea01ea93 BP |
448 | * |
449 | * Generic helper function to remove section mappings and sysfs entries | |
450 | * for the section of the memory we are removing. Caller needs to make | |
451 | * sure that pages are marked reserved and zones are adjust properly by | |
452 | * calling offline_pages(). | |
453 | */ | |
feee6b29 DH |
454 | void __remove_pages(unsigned long pfn, unsigned long nr_pages, |
455 | struct vmem_altmap *altmap) | |
ea01ea93 | 456 | { |
52fb87c8 DH |
457 | const unsigned long end_pfn = pfn + nr_pages; |
458 | unsigned long cur_nr_pages; | |
4b94ffdc | 459 | unsigned long map_offset = 0; |
4b94ffdc | 460 | |
96da4350 | 461 | map_offset = vmem_altmap_offset(altmap); |
ea01ea93 | 462 | |
7ea62160 DW |
463 | if (check_pfn_span(pfn, nr_pages, "remove")) |
464 | return; | |
ea01ea93 | 465 | |
52fb87c8 | 466 | for (; pfn < end_pfn; pfn += cur_nr_pages) { |
dd33ad7b | 467 | cond_resched(); |
52fb87c8 | 468 | /* Select all remaining pages up to the next section boundary */ |
a11b9419 DH |
469 | cur_nr_pages = min(end_pfn - pfn, |
470 | SECTION_ALIGN_UP(pfn + 1) - pfn); | |
52fb87c8 | 471 | __remove_section(pfn, cur_nr_pages, map_offset, altmap); |
4b94ffdc | 472 | map_offset = 0; |
ea01ea93 | 473 | } |
ea01ea93 | 474 | } |
ea01ea93 | 475 | |
9d0ad8ca DK |
476 | int set_online_page_callback(online_page_callback_t callback) |
477 | { | |
478 | int rc = -EINVAL; | |
479 | ||
bfc8c901 VD |
480 | get_online_mems(); |
481 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
482 | |
483 | if (online_page_callback == generic_online_page) { | |
484 | online_page_callback = callback; | |
485 | rc = 0; | |
486 | } | |
487 | ||
bfc8c901 VD |
488 | mutex_unlock(&online_page_callback_lock); |
489 | put_online_mems(); | |
9d0ad8ca DK |
490 | |
491 | return rc; | |
492 | } | |
493 | EXPORT_SYMBOL_GPL(set_online_page_callback); | |
494 | ||
495 | int restore_online_page_callback(online_page_callback_t callback) | |
496 | { | |
497 | int rc = -EINVAL; | |
498 | ||
bfc8c901 VD |
499 | get_online_mems(); |
500 | mutex_lock(&online_page_callback_lock); | |
9d0ad8ca DK |
501 | |
502 | if (online_page_callback == callback) { | |
503 | online_page_callback = generic_online_page; | |
504 | rc = 0; | |
505 | } | |
506 | ||
bfc8c901 VD |
507 | mutex_unlock(&online_page_callback_lock); |
508 | put_online_mems(); | |
9d0ad8ca DK |
509 | |
510 | return rc; | |
511 | } | |
512 | EXPORT_SYMBOL_GPL(restore_online_page_callback); | |
513 | ||
18db1491 | 514 | void generic_online_page(struct page *page, unsigned int order) |
9d0ad8ca | 515 | { |
c87cbc1f VB |
516 | /* |
517 | * Freeing the page with debug_pagealloc enabled will try to unmap it, | |
518 | * so we should map it first. This is better than introducing a special | |
519 | * case in page freeing fast path. | |
520 | */ | |
77bc7fd6 | 521 | debug_pagealloc_map_pages(page, 1 << order); |
a9cd410a AK |
522 | __free_pages_core(page, order); |
523 | totalram_pages_add(1UL << order); | |
524 | #ifdef CONFIG_HIGHMEM | |
525 | if (PageHighMem(page)) | |
526 | totalhigh_pages_add(1UL << order); | |
527 | #endif | |
528 | } | |
18db1491 | 529 | EXPORT_SYMBOL_GPL(generic_online_page); |
a9cd410a | 530 | |
aac65321 | 531 | static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) |
3947be19 | 532 | { |
b2c2ab20 DH |
533 | const unsigned long end_pfn = start_pfn + nr_pages; |
534 | unsigned long pfn; | |
b2c2ab20 DH |
535 | |
536 | /* | |
aac65321 DH |
537 | * Online the pages in MAX_ORDER - 1 aligned chunks. The callback might |
538 | * decide to not expose all pages to the buddy (e.g., expose them | |
539 | * later). We account all pages as being online and belonging to this | |
540 | * zone ("present"). | |
a08a2ae3 OS |
541 | * When using memmap_on_memory, the range might not be aligned to |
542 | * MAX_ORDER_NR_PAGES - 1, but pageblock aligned. __ffs() will detect | |
543 | * this and the first chunk to online will be pageblock_nr_pages. | |
b2c2ab20 | 544 | */ |
a08a2ae3 OS |
545 | for (pfn = start_pfn; pfn < end_pfn;) { |
546 | int order = min(MAX_ORDER - 1UL, __ffs(pfn)); | |
547 | ||
548 | (*online_page_callback)(pfn_to_page(pfn), order); | |
549 | pfn += (1UL << order); | |
550 | } | |
2d070eab | 551 | |
b2c2ab20 DH |
552 | /* mark all involved sections as online */ |
553 | online_mem_sections(start_pfn, end_pfn); | |
75884fb1 KH |
554 | } |
555 | ||
d9713679 LJ |
556 | /* check which state of node_states will be changed when online memory */ |
557 | static void node_states_check_changes_online(unsigned long nr_pages, | |
558 | struct zone *zone, struct memory_notify *arg) | |
559 | { | |
560 | int nid = zone_to_nid(zone); | |
d9713679 | 561 | |
98fa15f3 AK |
562 | arg->status_change_nid = NUMA_NO_NODE; |
563 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
564 | arg->status_change_nid_high = NUMA_NO_NODE; | |
d9713679 | 565 | |
8efe33f4 OS |
566 | if (!node_state(nid, N_MEMORY)) |
567 | arg->status_change_nid = nid; | |
568 | if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) | |
d9713679 | 569 | arg->status_change_nid_normal = nid; |
6715ddf9 | 570 | #ifdef CONFIG_HIGHMEM |
d3ba3ae1 | 571 | if (zone_idx(zone) <= ZONE_HIGHMEM && !node_state(nid, N_HIGH_MEMORY)) |
6715ddf9 | 572 | arg->status_change_nid_high = nid; |
6715ddf9 | 573 | #endif |
d9713679 LJ |
574 | } |
575 | ||
576 | static void node_states_set_node(int node, struct memory_notify *arg) | |
577 | { | |
578 | if (arg->status_change_nid_normal >= 0) | |
579 | node_set_state(node, N_NORMAL_MEMORY); | |
580 | ||
6715ddf9 LJ |
581 | if (arg->status_change_nid_high >= 0) |
582 | node_set_state(node, N_HIGH_MEMORY); | |
583 | ||
83d83612 OS |
584 | if (arg->status_change_nid >= 0) |
585 | node_set_state(node, N_MEMORY); | |
d9713679 LJ |
586 | } |
587 | ||
f1dd2cd1 MH |
588 | static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, |
589 | unsigned long nr_pages) | |
590 | { | |
591 | unsigned long old_end_pfn = zone_end_pfn(zone); | |
592 | ||
593 | if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) | |
594 | zone->zone_start_pfn = start_pfn; | |
595 | ||
596 | zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; | |
597 | } | |
598 | ||
599 | static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, | |
600 | unsigned long nr_pages) | |
601 | { | |
602 | unsigned long old_end_pfn = pgdat_end_pfn(pgdat); | |
603 | ||
604 | if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) | |
605 | pgdat->node_start_pfn = start_pfn; | |
606 | ||
607 | pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; | |
f1dd2cd1 | 608 | |
3fccb74c | 609 | } |
1f90a347 DW |
610 | |
611 | static void section_taint_zone_device(unsigned long pfn) | |
612 | { | |
613 | struct mem_section *ms = __pfn_to_section(pfn); | |
614 | ||
615 | ms->section_mem_map |= SECTION_TAINT_ZONE_DEVICE; | |
616 | } | |
617 | ||
3fccb74c DH |
618 | /* |
619 | * Associate the pfn range with the given zone, initializing the memmaps | |
620 | * and resizing the pgdat/zone data to span the added pages. After this | |
621 | * call, all affected pages are PG_reserved. | |
d882c006 DH |
622 | * |
623 | * All aligned pageblocks are initialized to the specified migratetype | |
624 | * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related | |
625 | * zone stats (e.g., nr_isolate_pageblock) are touched. | |
3fccb74c | 626 | */ |
a99583e7 | 627 | void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
d882c006 DH |
628 | unsigned long nr_pages, |
629 | struct vmem_altmap *altmap, int migratetype) | |
f1dd2cd1 MH |
630 | { |
631 | struct pglist_data *pgdat = zone->zone_pgdat; | |
632 | int nid = pgdat->node_id; | |
df429ac0 | 633 | |
f1dd2cd1 MH |
634 | clear_zone_contiguous(zone); |
635 | ||
fa004ab7 WY |
636 | if (zone_is_empty(zone)) |
637 | init_currently_empty_zone(zone, start_pfn, nr_pages); | |
f1dd2cd1 | 638 | resize_zone_range(zone, start_pfn, nr_pages); |
f1dd2cd1 | 639 | resize_pgdat_range(pgdat, start_pfn, nr_pages); |
f1dd2cd1 | 640 | |
1f90a347 DW |
641 | /* |
642 | * Subsection population requires care in pfn_to_online_page(). | |
643 | * Set the taint to enable the slow path detection of | |
644 | * ZONE_DEVICE pages in an otherwise ZONE_{NORMAL,MOVABLE} | |
645 | * section. | |
646 | */ | |
647 | if (zone_is_zone_device(zone)) { | |
648 | if (!IS_ALIGNED(start_pfn, PAGES_PER_SECTION)) | |
649 | section_taint_zone_device(start_pfn); | |
650 | if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) | |
651 | section_taint_zone_device(start_pfn + nr_pages); | |
652 | } | |
653 | ||
f1dd2cd1 MH |
654 | /* |
655 | * TODO now we have a visible range of pages which are not associated | |
656 | * with their zone properly. Not nice but set_pfnblock_flags_mask | |
657 | * expects the zone spans the pfn range. All the pages in the range | |
658 | * are reserved so nobody should be touching them so we should be safe | |
659 | */ | |
ab28cb6e | 660 | memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, |
d882c006 | 661 | MEMINIT_HOTPLUG, altmap, migratetype); |
f1dd2cd1 MH |
662 | |
663 | set_zone_contiguous(zone); | |
664 | } | |
665 | ||
c246a213 MH |
666 | /* |
667 | * Returns a default kernel memory zone for the given pfn range. | |
668 | * If no kernel zone covers this pfn range it will automatically go | |
669 | * to the ZONE_NORMAL. | |
670 | */ | |
c6f03e29 | 671 | static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, |
c246a213 MH |
672 | unsigned long nr_pages) |
673 | { | |
674 | struct pglist_data *pgdat = NODE_DATA(nid); | |
675 | int zid; | |
676 | ||
677 | for (zid = 0; zid <= ZONE_NORMAL; zid++) { | |
678 | struct zone *zone = &pgdat->node_zones[zid]; | |
679 | ||
680 | if (zone_intersects(zone, start_pfn, nr_pages)) | |
681 | return zone; | |
682 | } | |
683 | ||
684 | return &pgdat->node_zones[ZONE_NORMAL]; | |
685 | } | |
686 | ||
c6f03e29 MH |
687 | static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, |
688 | unsigned long nr_pages) | |
e5e68930 | 689 | { |
c6f03e29 MH |
690 | struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, |
691 | nr_pages); | |
692 | struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
693 | bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); | |
694 | bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); | |
e5e68930 MH |
695 | |
696 | /* | |
c6f03e29 MH |
697 | * We inherit the existing zone in a simple case where zones do not |
698 | * overlap in the given range | |
e5e68930 | 699 | */ |
c6f03e29 MH |
700 | if (in_kernel ^ in_movable) |
701 | return (in_kernel) ? kernel_zone : movable_zone; | |
9f123ab5 | 702 | |
c6f03e29 MH |
703 | /* |
704 | * If the range doesn't belong to any zone or two zones overlap in the | |
705 | * given range then we use movable zone only if movable_node is | |
706 | * enabled because we always online to a kernel zone by default. | |
707 | */ | |
708 | return movable_node_enabled ? movable_zone : kernel_zone; | |
9f123ab5 MH |
709 | } |
710 | ||
68d68ff6 | 711 | struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, |
e5e68930 | 712 | unsigned long nr_pages) |
f1dd2cd1 | 713 | { |
c6f03e29 MH |
714 | if (online_type == MMOP_ONLINE_KERNEL) |
715 | return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); | |
f1dd2cd1 | 716 | |
c6f03e29 MH |
717 | if (online_type == MMOP_ONLINE_MOVABLE) |
718 | return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; | |
df429ac0 | 719 | |
c6f03e29 | 720 | return default_zone_for_pfn(nid, start_pfn, nr_pages); |
e5e68930 MH |
721 | } |
722 | ||
a08a2ae3 OS |
723 | /* |
724 | * This function should only be called by memory_block_{online,offline}, | |
725 | * and {online,offline}_pages. | |
726 | */ | |
727 | void adjust_present_page_count(struct zone *zone, long nr_pages) | |
f9901144 | 728 | { |
f9901144 | 729 | zone->present_pages += nr_pages; |
f9901144 | 730 | zone->zone_pgdat->node_present_pages += nr_pages; |
f9901144 DH |
731 | } |
732 | ||
a08a2ae3 OS |
733 | int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, |
734 | struct zone *zone) | |
735 | { | |
736 | unsigned long end_pfn = pfn + nr_pages; | |
737 | int ret; | |
738 | ||
739 | ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
740 | if (ret) | |
741 | return ret; | |
742 | ||
743 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); | |
744 | ||
745 | /* | |
746 | * It might be that the vmemmap_pages fully span sections. If that is | |
747 | * the case, mark those sections online here as otherwise they will be | |
748 | * left offline. | |
749 | */ | |
750 | if (nr_pages >= PAGES_PER_SECTION) | |
751 | online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
752 | ||
753 | return ret; | |
754 | } | |
755 | ||
756 | void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) | |
757 | { | |
758 | unsigned long end_pfn = pfn + nr_pages; | |
759 | ||
760 | /* | |
761 | * It might be that the vmemmap_pages fully span sections. If that is | |
762 | * the case, mark those sections offline here as otherwise they will be | |
763 | * left online. | |
764 | */ | |
765 | if (nr_pages >= PAGES_PER_SECTION) | |
766 | offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION)); | |
767 | ||
768 | /* | |
769 | * The pages associated with this vmemmap have been offlined, so | |
770 | * we can reset its state here. | |
771 | */ | |
772 | remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); | |
773 | kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); | |
774 | } | |
775 | ||
776 | int __ref online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone) | |
75884fb1 | 777 | { |
aa47228a | 778 | unsigned long flags; |
6811378e | 779 | int need_zonelists_rebuild = 0; |
a08a2ae3 | 780 | const int nid = zone_to_nid(zone); |
7b78d335 YG |
781 | int ret; |
782 | struct memory_notify arg; | |
d0dc12e8 | 783 | |
dd8e2f23 OS |
784 | /* |
785 | * {on,off}lining is constrained to full memory sections (or more | |
041711ce | 786 | * precisely to memory blocks from the user space POV). |
dd8e2f23 OS |
787 | * memmap_on_memory is an exception because it reserves initial part |
788 | * of the physical memory space for vmemmaps. That space is pageblock | |
789 | * aligned. | |
790 | */ | |
4986fac1 | 791 | if (WARN_ON_ONCE(!nr_pages || |
dd8e2f23 OS |
792 | !IS_ALIGNED(pfn, pageblock_nr_pages) || |
793 | !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) | |
4986fac1 DH |
794 | return -EINVAL; |
795 | ||
381eab4a DH |
796 | mem_hotplug_begin(); |
797 | ||
f1dd2cd1 | 798 | /* associate pfn range with the zone */ |
b30c5927 | 799 | move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); |
f1dd2cd1 | 800 | |
7b78d335 YG |
801 | arg.start_pfn = pfn; |
802 | arg.nr_pages = nr_pages; | |
d9713679 | 803 | node_states_check_changes_online(nr_pages, zone, &arg); |
7b78d335 | 804 | |
7b78d335 YG |
805 | ret = memory_notify(MEM_GOING_ONLINE, &arg); |
806 | ret = notifier_to_errno(ret); | |
e33e33b4 CY |
807 | if (ret) |
808 | goto failed_addition; | |
809 | ||
b30c5927 DH |
810 | /* |
811 | * Fixup the number of isolated pageblocks before marking the sections | |
812 | * onlining, such that undo_isolate_page_range() works correctly. | |
813 | */ | |
814 | spin_lock_irqsave(&zone->lock, flags); | |
815 | zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; | |
816 | spin_unlock_irqrestore(&zone->lock, flags); | |
817 | ||
6811378e YG |
818 | /* |
819 | * If this zone is not populated, then it is not in zonelist. | |
820 | * This means the page allocator ignores this zone. | |
821 | * So, zonelist must be updated after online. | |
822 | */ | |
6dcd73d7 | 823 | if (!populated_zone(zone)) { |
6811378e | 824 | need_zonelists_rebuild = 1; |
72675e13 | 825 | setup_zone_pageset(zone); |
6dcd73d7 | 826 | } |
6811378e | 827 | |
aac65321 | 828 | online_pages_range(pfn, nr_pages); |
f9901144 | 829 | adjust_present_page_count(zone, nr_pages); |
aa47228a | 830 | |
b30c5927 DH |
831 | node_states_set_node(nid, &arg); |
832 | if (need_zonelists_rebuild) | |
833 | build_all_zonelists(NULL); | |
b30c5927 DH |
834 | |
835 | /* Basic onlining is complete, allow allocation of onlined pages. */ | |
836 | undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); | |
837 | ||
93146d98 | 838 | /* |
b86c5fc4 DH |
839 | * Freshly onlined pages aren't shuffled (e.g., all pages are placed to |
840 | * the tail of the freelist when undoing isolation). Shuffle the whole | |
841 | * zone to make sure the just onlined pages are properly distributed | |
842 | * across the whole freelist - to create an initial shuffle. | |
93146d98 | 843 | */ |
e900a918 DW |
844 | shuffle_zone(zone); |
845 | ||
b92ca18e | 846 | /* reinitialise watermarks and update pcp limits */ |
1b79acc9 KM |
847 | init_per_zone_wmark_min(); |
848 | ||
ca9a46f8 DH |
849 | kswapd_run(nid); |
850 | kcompactd_run(nid); | |
61b13993 | 851 | |
2d1d43f6 | 852 | writeback_set_ratelimit(); |
7b78d335 | 853 | |
ca9a46f8 | 854 | memory_notify(MEM_ONLINE, &arg); |
381eab4a | 855 | mem_hotplug_done(); |
30467e0b | 856 | return 0; |
e33e33b4 CY |
857 | |
858 | failed_addition: | |
859 | pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", | |
860 | (unsigned long long) pfn << PAGE_SHIFT, | |
861 | (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); | |
862 | memory_notify(MEM_CANCEL_ONLINE, &arg); | |
feee6b29 | 863 | remove_pfn_range_from_zone(zone, pfn, nr_pages); |
381eab4a | 864 | mem_hotplug_done(); |
e33e33b4 | 865 | return ret; |
3947be19 | 866 | } |
53947027 | 867 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
bc02af93 | 868 | |
0bd85420 TC |
869 | static void reset_node_present_pages(pg_data_t *pgdat) |
870 | { | |
871 | struct zone *z; | |
872 | ||
873 | for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) | |
874 | z->present_pages = 0; | |
875 | ||
876 | pgdat->node_present_pages = 0; | |
877 | } | |
878 | ||
e1319331 | 879 | /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ |
c68ab18c | 880 | static pg_data_t __ref *hotadd_new_pgdat(int nid) |
9af3c2de YG |
881 | { |
882 | struct pglist_data *pgdat; | |
9af3c2de | 883 | |
a1e565aa TC |
884 | pgdat = NODE_DATA(nid); |
885 | if (!pgdat) { | |
886 | pgdat = arch_alloc_nodedata(nid); | |
887 | if (!pgdat) | |
888 | return NULL; | |
9af3c2de | 889 | |
33fce011 WY |
890 | pgdat->per_cpu_nodestats = |
891 | alloc_percpu(struct per_cpu_nodestat); | |
a1e565aa | 892 | arch_refresh_nodedata(nid, pgdat); |
b0dc3a34 | 893 | } else { |
33fce011 | 894 | int cpu; |
e716f2eb | 895 | /* |
97a225e6 JK |
896 | * Reset the nr_zones, order and highest_zoneidx before reuse. |
897 | * Note that kswapd will init kswapd_highest_zoneidx properly | |
e716f2eb MG |
898 | * when it starts in the near future. |
899 | */ | |
b0dc3a34 | 900 | pgdat->nr_zones = 0; |
38087d9b | 901 | pgdat->kswapd_order = 0; |
97a225e6 | 902 | pgdat->kswapd_highest_zoneidx = 0; |
33fce011 WY |
903 | for_each_online_cpu(cpu) { |
904 | struct per_cpu_nodestat *p; | |
905 | ||
906 | p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); | |
907 | memset(p, 0, sizeof(*p)); | |
908 | } | |
a1e565aa | 909 | } |
9af3c2de YG |
910 | |
911 | /* we can use NODE_DATA(nid) from here */ | |
03e85f9d | 912 | pgdat->node_id = nid; |
c68ab18c | 913 | pgdat->node_start_pfn = 0; |
03e85f9d | 914 | |
9af3c2de | 915 | /* init node's zones as empty zones, we don't have any present pages.*/ |
03e85f9d | 916 | free_area_init_core_hotplug(nid); |
9af3c2de | 917 | |
959ecc48 KH |
918 | /* |
919 | * The node we allocated has no zone fallback lists. For avoiding | |
920 | * to access not-initialized zonelist, build here. | |
921 | */ | |
72675e13 | 922 | build_all_zonelists(pgdat); |
959ecc48 | 923 | |
0bd85420 TC |
924 | /* |
925 | * When memory is hot-added, all the memory is in offline state. So | |
926 | * clear all zones' present_pages because they will be updated in | |
927 | * online_pages() and offline_pages(). | |
928 | */ | |
03e85f9d | 929 | reset_node_managed_pages(pgdat); |
0bd85420 TC |
930 | reset_node_present_pages(pgdat); |
931 | ||
9af3c2de YG |
932 | return pgdat; |
933 | } | |
934 | ||
b9ff0360 | 935 | static void rollback_node_hotadd(int nid) |
9af3c2de | 936 | { |
b9ff0360 OS |
937 | pg_data_t *pgdat = NODE_DATA(nid); |
938 | ||
9af3c2de | 939 | arch_refresh_nodedata(nid, NULL); |
5830169f | 940 | free_percpu(pgdat->per_cpu_nodestats); |
9af3c2de | 941 | arch_free_nodedata(pgdat); |
9af3c2de YG |
942 | } |
943 | ||
0a547039 | 944 | |
01b0f197 TK |
945 | /** |
946 | * try_online_node - online a node if offlined | |
e8b098fc | 947 | * @nid: the node ID |
b9ff0360 | 948 | * @set_node_online: Whether we want to online the node |
cf23422b | 949 | * called by cpu_up() to online a node without onlined memory. |
b9ff0360 OS |
950 | * |
951 | * Returns: | |
952 | * 1 -> a new node has been allocated | |
953 | * 0 -> the node is already online | |
954 | * -ENOMEM -> the node could not be allocated | |
cf23422b | 955 | */ |
c68ab18c | 956 | static int __try_online_node(int nid, bool set_node_online) |
cf23422b | 957 | { |
b9ff0360 OS |
958 | pg_data_t *pgdat; |
959 | int ret = 1; | |
cf23422b | 960 | |
01b0f197 TK |
961 | if (node_online(nid)) |
962 | return 0; | |
963 | ||
c68ab18c | 964 | pgdat = hotadd_new_pgdat(nid); |
7553e8f2 | 965 | if (!pgdat) { |
01b0f197 | 966 | pr_err("Cannot online node %d due to NULL pgdat\n", nid); |
cf23422b | 967 | ret = -ENOMEM; |
968 | goto out; | |
969 | } | |
b9ff0360 OS |
970 | |
971 | if (set_node_online) { | |
972 | node_set_online(nid); | |
973 | ret = register_one_node(nid); | |
974 | BUG_ON(ret); | |
975 | } | |
cf23422b | 976 | out: |
b9ff0360 OS |
977 | return ret; |
978 | } | |
979 | ||
980 | /* | |
981 | * Users of this function always want to online/register the node | |
982 | */ | |
983 | int try_online_node(int nid) | |
984 | { | |
985 | int ret; | |
986 | ||
987 | mem_hotplug_begin(); | |
c68ab18c | 988 | ret = __try_online_node(nid, true); |
bfc8c901 | 989 | mem_hotplug_done(); |
cf23422b | 990 | return ret; |
991 | } | |
992 | ||
27356f54 TK |
993 | static int check_hotplug_memory_range(u64 start, u64 size) |
994 | { | |
ba325585 | 995 | /* memory range must be block size aligned */ |
cec3ebd0 DH |
996 | if (!size || !IS_ALIGNED(start, memory_block_size_bytes()) || |
997 | !IS_ALIGNED(size, memory_block_size_bytes())) { | |
ba325585 | 998 | pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", |
cec3ebd0 | 999 | memory_block_size_bytes(), start, size); |
27356f54 TK |
1000 | return -EINVAL; |
1001 | } | |
1002 | ||
1003 | return 0; | |
1004 | } | |
1005 | ||
31bc3858 VK |
1006 | static int online_memory_block(struct memory_block *mem, void *arg) |
1007 | { | |
1adf8b46 | 1008 | mem->online_type = mhp_default_online_type; |
dc18d706 | 1009 | return device_online(&mem->dev); |
31bc3858 VK |
1010 | } |
1011 | ||
a08a2ae3 OS |
1012 | bool mhp_supports_memmap_on_memory(unsigned long size) |
1013 | { | |
1014 | unsigned long nr_vmemmap_pages = size / PAGE_SIZE; | |
1015 | unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page); | |
1016 | unsigned long remaining_size = size - vmemmap_size; | |
1017 | ||
1018 | /* | |
1019 | * Besides having arch support and the feature enabled at runtime, we | |
1020 | * need a few more assumptions to hold true: | |
1021 | * | |
1022 | * a) We span a single memory block: memory onlining/offlinin;g happens | |
1023 | * in memory block granularity. We don't want the vmemmap of online | |
1024 | * memory blocks to reside on offline memory blocks. In the future, | |
1025 | * we might want to support variable-sized memory blocks to make the | |
1026 | * feature more versatile. | |
1027 | * | |
1028 | * b) The vmemmap pages span complete PMDs: We don't want vmemmap code | |
1029 | * to populate memory from the altmap for unrelated parts (i.e., | |
1030 | * other memory blocks) | |
1031 | * | |
1032 | * c) The vmemmap pages (and thereby the pages that will be exposed to | |
1033 | * the buddy) have to cover full pageblocks: memory onlining/offlining | |
1034 | * code requires applicable ranges to be page-aligned, for example, to | |
1035 | * set the migratetypes properly. | |
1036 | * | |
1037 | * TODO: Although we have a check here to make sure that vmemmap pages | |
1038 | * fully populate a PMD, it is not the right place to check for | |
1039 | * this. A much better solution involves improving vmemmap code | |
1040 | * to fallback to base pages when trying to populate vmemmap using | |
1041 | * altmap as an alternative source of memory, and we do not exactly | |
1042 | * populate a single PMD. | |
1043 | */ | |
1044 | return memmap_on_memory && | |
2d7a2171 | 1045 | !hugetlb_free_vmemmap_enabled && |
a08a2ae3 OS |
1046 | IS_ENABLED(CONFIG_MHP_MEMMAP_ON_MEMORY) && |
1047 | size == memory_block_size_bytes() && | |
1048 | IS_ALIGNED(vmemmap_size, PMD_SIZE) && | |
1049 | IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)); | |
1050 | } | |
1051 | ||
8df1d0e4 DH |
1052 | /* |
1053 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1054 | * and online/offline operations (triggered e.g. by sysfs). | |
1055 | * | |
1056 | * we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG | |
1057 | */ | |
b6117199 | 1058 | int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) |
bc02af93 | 1059 | { |
d15dfd31 | 1060 | struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) }; |
a08a2ae3 | 1061 | struct vmem_altmap mhp_altmap = {}; |
62cedb9f | 1062 | u64 start, size; |
b9ff0360 | 1063 | bool new_node = false; |
bc02af93 YG |
1064 | int ret; |
1065 | ||
62cedb9f DV |
1066 | start = res->start; |
1067 | size = resource_size(res); | |
1068 | ||
27356f54 TK |
1069 | ret = check_hotplug_memory_range(start, size); |
1070 | if (ret) | |
1071 | return ret; | |
1072 | ||
fa6d9ec7 VV |
1073 | if (!node_possible(nid)) { |
1074 | WARN(1, "node %d was absent from the node_possible_map\n", nid); | |
1075 | return -EINVAL; | |
1076 | } | |
1077 | ||
bfc8c901 | 1078 | mem_hotplug_begin(); |
ac13c462 | 1079 | |
52219aea DH |
1080 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1081 | memblock_add_node(start, size, nid); | |
7f36e3e5 | 1082 | |
c68ab18c | 1083 | ret = __try_online_node(nid, false); |
b9ff0360 OS |
1084 | if (ret < 0) |
1085 | goto error; | |
1086 | new_node = ret; | |
9af3c2de | 1087 | |
a08a2ae3 OS |
1088 | /* |
1089 | * Self hosted memmap array | |
1090 | */ | |
1091 | if (mhp_flags & MHP_MEMMAP_ON_MEMORY) { | |
1092 | if (!mhp_supports_memmap_on_memory(size)) { | |
1093 | ret = -EINVAL; | |
1094 | goto error; | |
1095 | } | |
1096 | mhp_altmap.free = PHYS_PFN(size); | |
1097 | mhp_altmap.base_pfn = PHYS_PFN(start); | |
1098 | params.altmap = &mhp_altmap; | |
1099 | } | |
1100 | ||
bc02af93 | 1101 | /* call arch's memory hotadd */ |
f5637d3b | 1102 | ret = arch_add_memory(nid, start, size, ¶ms); |
9af3c2de YG |
1103 | if (ret < 0) |
1104 | goto error; | |
1105 | ||
db051a0d | 1106 | /* create memory block devices after memory was added */ |
a08a2ae3 | 1107 | ret = create_memory_block_devices(start, size, mhp_altmap.alloc); |
db051a0d DH |
1108 | if (ret) { |
1109 | arch_remove_memory(nid, start, size, NULL); | |
1110 | goto error; | |
1111 | } | |
1112 | ||
a1e565aa | 1113 | if (new_node) { |
d5b6f6a3 | 1114 | /* If sysfs file of new node can't be created, cpu on the node |
0fc44159 YG |
1115 | * can't be hot-added. There is no rollback way now. |
1116 | * So, check by BUG_ON() to catch it reluctantly.. | |
d5b6f6a3 | 1117 | * We online node here. We can't roll back from here. |
0fc44159 | 1118 | */ |
d5b6f6a3 OS |
1119 | node_set_online(nid); |
1120 | ret = __register_one_node(nid); | |
0fc44159 YG |
1121 | BUG_ON(ret); |
1122 | } | |
1123 | ||
d5b6f6a3 | 1124 | /* link memory sections under this node.*/ |
90c7eaeb LD |
1125 | link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), |
1126 | MEMINIT_HOTPLUG); | |
d5b6f6a3 | 1127 | |
d96ae530 | 1128 | /* create new memmap entry */ |
7b7b2721 DH |
1129 | if (!strcmp(res->name, "System RAM")) |
1130 | firmware_map_add_hotplug(start, start + size, "System RAM"); | |
d96ae530 | 1131 | |
381eab4a DH |
1132 | /* device_online() will take the lock when calling online_pages() */ |
1133 | mem_hotplug_done(); | |
1134 | ||
9ca6551e DH |
1135 | /* |
1136 | * In case we're allowed to merge the resource, flag it and trigger | |
1137 | * merging now that adding succeeded. | |
1138 | */ | |
26011267 | 1139 | if (mhp_flags & MHP_MERGE_RESOURCE) |
9ca6551e DH |
1140 | merge_system_ram_resource(res); |
1141 | ||
31bc3858 | 1142 | /* online pages if requested */ |
1adf8b46 | 1143 | if (mhp_default_online_type != MMOP_OFFLINE) |
fbcf73ce | 1144 | walk_memory_blocks(start, size, NULL, online_memory_block); |
31bc3858 | 1145 | |
381eab4a | 1146 | return ret; |
9af3c2de YG |
1147 | error: |
1148 | /* rollback pgdat allocation and others */ | |
b9ff0360 OS |
1149 | if (new_node) |
1150 | rollback_node_hotadd(nid); | |
52219aea DH |
1151 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) |
1152 | memblock_remove(start, size); | |
bfc8c901 | 1153 | mem_hotplug_done(); |
bc02af93 YG |
1154 | return ret; |
1155 | } | |
62cedb9f | 1156 | |
8df1d0e4 | 1157 | /* requires device_hotplug_lock, see add_memory_resource() */ |
b6117199 | 1158 | int __ref __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
62cedb9f DV |
1159 | { |
1160 | struct resource *res; | |
1161 | int ret; | |
1162 | ||
7b7b2721 | 1163 | res = register_memory_resource(start, size, "System RAM"); |
6f754ba4 VK |
1164 | if (IS_ERR(res)) |
1165 | return PTR_ERR(res); | |
62cedb9f | 1166 | |
b6117199 | 1167 | ret = add_memory_resource(nid, res, mhp_flags); |
62cedb9f DV |
1168 | if (ret < 0) |
1169 | release_memory_resource(res); | |
1170 | return ret; | |
1171 | } | |
8df1d0e4 | 1172 | |
b6117199 | 1173 | int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags) |
8df1d0e4 DH |
1174 | { |
1175 | int rc; | |
1176 | ||
1177 | lock_device_hotplug(); | |
b6117199 | 1178 | rc = __add_memory(nid, start, size, mhp_flags); |
8df1d0e4 DH |
1179 | unlock_device_hotplug(); |
1180 | ||
1181 | return rc; | |
1182 | } | |
bc02af93 | 1183 | EXPORT_SYMBOL_GPL(add_memory); |
0c0e6195 | 1184 | |
7b7b2721 DH |
1185 | /* |
1186 | * Add special, driver-managed memory to the system as system RAM. Such | |
1187 | * memory is not exposed via the raw firmware-provided memmap as system | |
1188 | * RAM, instead, it is detected and added by a driver - during cold boot, | |
1189 | * after a reboot, and after kexec. | |
1190 | * | |
1191 | * Reasons why this memory should not be used for the initial memmap of a | |
1192 | * kexec kernel or for placing kexec images: | |
1193 | * - The booting kernel is in charge of determining how this memory will be | |
1194 | * used (e.g., use persistent memory as system RAM) | |
1195 | * - Coordination with a hypervisor is required before this memory | |
1196 | * can be used (e.g., inaccessible parts). | |
1197 | * | |
1198 | * For this memory, no entries in /sys/firmware/memmap ("raw firmware-provided | |
1199 | * memory map") are created. Also, the created memory resource is flagged | |
7cf603d1 | 1200 | * with IORESOURCE_SYSRAM_DRIVER_MANAGED, so in-kernel users can special-case |
7b7b2721 DH |
1201 | * this memory as well (esp., not place kexec images onto it). |
1202 | * | |
1203 | * The resource_name (visible via /proc/iomem) has to have the format | |
1204 | * "System RAM ($DRIVER)". | |
1205 | */ | |
1206 | int add_memory_driver_managed(int nid, u64 start, u64 size, | |
b6117199 | 1207 | const char *resource_name, mhp_t mhp_flags) |
7b7b2721 DH |
1208 | { |
1209 | struct resource *res; | |
1210 | int rc; | |
1211 | ||
1212 | if (!resource_name || | |
1213 | strstr(resource_name, "System RAM (") != resource_name || | |
1214 | resource_name[strlen(resource_name) - 1] != ')') | |
1215 | return -EINVAL; | |
1216 | ||
1217 | lock_device_hotplug(); | |
1218 | ||
1219 | res = register_memory_resource(start, size, resource_name); | |
1220 | if (IS_ERR(res)) { | |
1221 | rc = PTR_ERR(res); | |
1222 | goto out_unlock; | |
1223 | } | |
1224 | ||
b6117199 | 1225 | rc = add_memory_resource(nid, res, mhp_flags); |
7b7b2721 DH |
1226 | if (rc < 0) |
1227 | release_memory_resource(res); | |
1228 | ||
1229 | out_unlock: | |
1230 | unlock_device_hotplug(); | |
1231 | return rc; | |
1232 | } | |
1233 | EXPORT_SYMBOL_GPL(add_memory_driver_managed); | |
1234 | ||
bca3feaa AK |
1235 | /* |
1236 | * Platforms should define arch_get_mappable_range() that provides | |
1237 | * maximum possible addressable physical memory range for which the | |
1238 | * linear mapping could be created. The platform returned address | |
1239 | * range must adhere to these following semantics. | |
1240 | * | |
1241 | * - range.start <= range.end | |
1242 | * - Range includes both end points [range.start..range.end] | |
1243 | * | |
1244 | * There is also a fallback definition provided here, allowing the | |
1245 | * entire possible physical address range in case any platform does | |
1246 | * not define arch_get_mappable_range(). | |
1247 | */ | |
1248 | struct range __weak arch_get_mappable_range(void) | |
1249 | { | |
1250 | struct range mhp_range = { | |
1251 | .start = 0UL, | |
1252 | .end = -1ULL, | |
1253 | }; | |
1254 | return mhp_range; | |
1255 | } | |
1256 | ||
1257 | struct range mhp_get_pluggable_range(bool need_mapping) | |
1258 | { | |
1259 | const u64 max_phys = (1ULL << MAX_PHYSMEM_BITS) - 1; | |
1260 | struct range mhp_range; | |
1261 | ||
1262 | if (need_mapping) { | |
1263 | mhp_range = arch_get_mappable_range(); | |
1264 | if (mhp_range.start > max_phys) { | |
1265 | mhp_range.start = 0; | |
1266 | mhp_range.end = 0; | |
1267 | } | |
1268 | mhp_range.end = min_t(u64, mhp_range.end, max_phys); | |
1269 | } else { | |
1270 | mhp_range.start = 0; | |
1271 | mhp_range.end = max_phys; | |
1272 | } | |
1273 | return mhp_range; | |
1274 | } | |
1275 | EXPORT_SYMBOL_GPL(mhp_get_pluggable_range); | |
1276 | ||
1277 | bool mhp_range_allowed(u64 start, u64 size, bool need_mapping) | |
1278 | { | |
1279 | struct range mhp_range = mhp_get_pluggable_range(need_mapping); | |
1280 | u64 end = start + size; | |
1281 | ||
1282 | if (start < end && start >= mhp_range.start && (end - 1) <= mhp_range.end) | |
1283 | return true; | |
1284 | ||
1285 | pr_warn("Hotplug memory [%#llx-%#llx] exceeds maximum addressable range [%#llx-%#llx]\n", | |
1286 | start, end, mhp_range.start, mhp_range.end); | |
1287 | return false; | |
1288 | } | |
1289 | ||
0c0e6195 KH |
1290 | #ifdef CONFIG_MEMORY_HOTREMOVE |
1291 | /* | |
92917998 DH |
1292 | * Confirm all pages in a range [start, end) belong to the same zone (skipping |
1293 | * memory holes). When true, return the zone. | |
0c0e6195 | 1294 | */ |
92917998 DH |
1295 | struct zone *test_pages_in_a_zone(unsigned long start_pfn, |
1296 | unsigned long end_pfn) | |
0c0e6195 | 1297 | { |
5f0f2887 | 1298 | unsigned long pfn, sec_end_pfn; |
0c0e6195 KH |
1299 | struct zone *zone = NULL; |
1300 | struct page *page; | |
1301 | int i; | |
deb88a2a | 1302 | for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); |
0c0e6195 | 1303 | pfn < end_pfn; |
deb88a2a | 1304 | pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { |
5f0f2887 AB |
1305 | /* Make sure the memory section is present first */ |
1306 | if (!present_section_nr(pfn_to_section_nr(pfn))) | |
0c0e6195 | 1307 | continue; |
5f0f2887 AB |
1308 | for (; pfn < sec_end_pfn && pfn < end_pfn; |
1309 | pfn += MAX_ORDER_NR_PAGES) { | |
1310 | i = 0; | |
1311 | /* This is just a CONFIG_HOLES_IN_ZONE check.*/ | |
1312 | while ((i < MAX_ORDER_NR_PAGES) && | |
1313 | !pfn_valid_within(pfn + i)) | |
1314 | i++; | |
d6d8c8a4 | 1315 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) |
5f0f2887 | 1316 | continue; |
24feb47c MZ |
1317 | /* Check if we got outside of the zone */ |
1318 | if (zone && !zone_spans_pfn(zone, pfn + i)) | |
92917998 | 1319 | return NULL; |
5f0f2887 AB |
1320 | page = pfn_to_page(pfn + i); |
1321 | if (zone && page_zone(page) != zone) | |
92917998 | 1322 | return NULL; |
5f0f2887 AB |
1323 | zone = page_zone(page); |
1324 | } | |
0c0e6195 | 1325 | } |
deb88a2a | 1326 | |
92917998 | 1327 | return zone; |
0c0e6195 KH |
1328 | } |
1329 | ||
1330 | /* | |
0efadf48 | 1331 | * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, |
aa218795 DH |
1332 | * non-lru movable pages and hugepages). Will skip over most unmovable |
1333 | * pages (esp., pages that can be skipped when offlining), but bail out on | |
1334 | * definitely unmovable pages. | |
1335 | * | |
1336 | * Returns: | |
1337 | * 0 in case a movable page is found and movable_pfn was updated. | |
1338 | * -ENOENT in case no movable page was found. | |
1339 | * -EBUSY in case a definitely unmovable page was found. | |
0c0e6195 | 1340 | */ |
aa218795 DH |
1341 | static int scan_movable_pages(unsigned long start, unsigned long end, |
1342 | unsigned long *movable_pfn) | |
0c0e6195 KH |
1343 | { |
1344 | unsigned long pfn; | |
eeb0efd0 | 1345 | |
0c0e6195 | 1346 | for (pfn = start; pfn < end; pfn++) { |
eeb0efd0 OS |
1347 | struct page *page, *head; |
1348 | unsigned long skip; | |
1349 | ||
1350 | if (!pfn_valid(pfn)) | |
1351 | continue; | |
1352 | page = pfn_to_page(pfn); | |
1353 | if (PageLRU(page)) | |
aa218795 | 1354 | goto found; |
eeb0efd0 | 1355 | if (__PageMovable(page)) |
aa218795 DH |
1356 | goto found; |
1357 | ||
1358 | /* | |
1359 | * PageOffline() pages that are not marked __PageMovable() and | |
1360 | * have a reference count > 0 (after MEM_GOING_OFFLINE) are | |
1361 | * definitely unmovable. If their reference count would be 0, | |
1362 | * they could at least be skipped when offlining memory. | |
1363 | */ | |
1364 | if (PageOffline(page) && page_count(page)) | |
1365 | return -EBUSY; | |
eeb0efd0 OS |
1366 | |
1367 | if (!PageHuge(page)) | |
1368 | continue; | |
1369 | head = compound_head(page); | |
8f251a3d MK |
1370 | /* |
1371 | * This test is racy as we hold no reference or lock. The | |
1372 | * hugetlb page could have been free'ed and head is no longer | |
1373 | * a hugetlb page before the following check. In such unlikely | |
1374 | * cases false positives and negatives are possible. Calling | |
1375 | * code must deal with these scenarios. | |
1376 | */ | |
1377 | if (HPageMigratable(head)) | |
aa218795 | 1378 | goto found; |
d8c6546b | 1379 | skip = compound_nr(head) - (page - head); |
eeb0efd0 | 1380 | pfn += skip - 1; |
0c0e6195 | 1381 | } |
aa218795 DH |
1382 | return -ENOENT; |
1383 | found: | |
1384 | *movable_pfn = pfn; | |
0c0e6195 KH |
1385 | return 0; |
1386 | } | |
1387 | ||
0c0e6195 KH |
1388 | static int |
1389 | do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |
1390 | { | |
1391 | unsigned long pfn; | |
6c357848 | 1392 | struct page *page, *head; |
0c0e6195 KH |
1393 | int ret = 0; |
1394 | LIST_HEAD(source); | |
786dee86 LM |
1395 | static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL, |
1396 | DEFAULT_RATELIMIT_BURST); | |
0c0e6195 | 1397 | |
a85009c3 | 1398 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
0c0e6195 KH |
1399 | if (!pfn_valid(pfn)) |
1400 | continue; | |
1401 | page = pfn_to_page(pfn); | |
6c357848 | 1402 | head = compound_head(page); |
c8721bbb NH |
1403 | |
1404 | if (PageHuge(page)) { | |
d8c6546b | 1405 | pfn = page_to_pfn(head) + compound_nr(head) - 1; |
daf3538a | 1406 | isolate_huge_page(head, &source); |
c8721bbb | 1407 | continue; |
94723aaf | 1408 | } else if (PageTransHuge(page)) |
6c357848 | 1409 | pfn = page_to_pfn(head) + thp_nr_pages(page) - 1; |
c8721bbb | 1410 | |
b15c8726 MH |
1411 | /* |
1412 | * HWPoison pages have elevated reference counts so the migration would | |
1413 | * fail on them. It also doesn't make any sense to migrate them in the | |
1414 | * first place. Still try to unmap such a page in case it is still mapped | |
1415 | * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep | |
1416 | * the unmap as the catch all safety net). | |
1417 | */ | |
1418 | if (PageHWPoison(page)) { | |
1419 | if (WARN_ON(PageLRU(page))) | |
1420 | isolate_lru_page(page); | |
1421 | if (page_mapped(page)) | |
013339df | 1422 | try_to_unmap(page, TTU_IGNORE_MLOCK); |
b15c8726 MH |
1423 | continue; |
1424 | } | |
1425 | ||
700c2a46 | 1426 | if (!get_page_unless_zero(page)) |
0c0e6195 KH |
1427 | continue; |
1428 | /* | |
0efadf48 YX |
1429 | * We can skip free pages. And we can deal with pages on |
1430 | * LRU and non-lru movable pages. | |
0c0e6195 | 1431 | */ |
0efadf48 YX |
1432 | if (PageLRU(page)) |
1433 | ret = isolate_lru_page(page); | |
1434 | else | |
1435 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | |
0c0e6195 | 1436 | if (!ret) { /* Success */ |
62695a84 | 1437 | list_add_tail(&page->lru, &source); |
0efadf48 YX |
1438 | if (!__PageMovable(page)) |
1439 | inc_node_page_state(page, NR_ISOLATED_ANON + | |
9de4f22a | 1440 | page_is_file_lru(page)); |
6d9c285a | 1441 | |
0c0e6195 | 1442 | } else { |
786dee86 LM |
1443 | if (__ratelimit(&migrate_rs)) { |
1444 | pr_warn("failed to isolate pfn %lx\n", pfn); | |
1445 | dump_page(page, "isolation failed"); | |
1446 | } | |
0c0e6195 | 1447 | } |
1723058e | 1448 | put_page(page); |
0c0e6195 | 1449 | } |
f3ab2636 | 1450 | if (!list_empty(&source)) { |
203e6e5c JK |
1451 | nodemask_t nmask = node_states[N_MEMORY]; |
1452 | struct migration_target_control mtc = { | |
1453 | .nmask = &nmask, | |
1454 | .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, | |
1455 | }; | |
1456 | ||
1457 | /* | |
1458 | * We have checked that migration range is on a single zone so | |
1459 | * we can use the nid of the first page to all the others. | |
1460 | */ | |
1461 | mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); | |
1462 | ||
1463 | /* | |
1464 | * try to allocate from a different node but reuse this node | |
1465 | * if there are no other online nodes to be used (e.g. we are | |
1466 | * offlining a part of the only existing node) | |
1467 | */ | |
1468 | node_clear(mtc.nid, nmask); | |
1469 | if (nodes_empty(nmask)) | |
1470 | node_set(mtc.nid, nmask); | |
1471 | ret = migrate_pages(&source, alloc_migration_target, NULL, | |
1472 | (unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | |
2932c8b0 MH |
1473 | if (ret) { |
1474 | list_for_each_entry(page, &source, lru) { | |
786dee86 LM |
1475 | if (__ratelimit(&migrate_rs)) { |
1476 | pr_warn("migrating pfn %lx failed ret:%d\n", | |
1477 | page_to_pfn(page), ret); | |
1478 | dump_page(page, "migration failure"); | |
1479 | } | |
2932c8b0 | 1480 | } |
c8721bbb | 1481 | putback_movable_pages(&source); |
2932c8b0 | 1482 | } |
0c0e6195 | 1483 | } |
1723058e | 1484 | |
0c0e6195 KH |
1485 | return ret; |
1486 | } | |
1487 | ||
c5320926 TC |
1488 | static int __init cmdline_parse_movable_node(char *p) |
1489 | { | |
55ac590c | 1490 | movable_node_enabled = true; |
c5320926 TC |
1491 | return 0; |
1492 | } | |
1493 | early_param("movable_node", cmdline_parse_movable_node); | |
1494 | ||
d9713679 LJ |
1495 | /* check which state of node_states will be changed when offline memory */ |
1496 | static void node_states_check_changes_offline(unsigned long nr_pages, | |
1497 | struct zone *zone, struct memory_notify *arg) | |
1498 | { | |
1499 | struct pglist_data *pgdat = zone->zone_pgdat; | |
1500 | unsigned long present_pages = 0; | |
86b27bea | 1501 | enum zone_type zt; |
d9713679 | 1502 | |
98fa15f3 AK |
1503 | arg->status_change_nid = NUMA_NO_NODE; |
1504 | arg->status_change_nid_normal = NUMA_NO_NODE; | |
1505 | arg->status_change_nid_high = NUMA_NO_NODE; | |
d9713679 LJ |
1506 | |
1507 | /* | |
86b27bea OS |
1508 | * Check whether node_states[N_NORMAL_MEMORY] will be changed. |
1509 | * If the memory to be offline is within the range | |
1510 | * [0..ZONE_NORMAL], and it is the last present memory there, | |
1511 | * the zones in that range will become empty after the offlining, | |
1512 | * thus we can determine that we need to clear the node from | |
1513 | * node_states[N_NORMAL_MEMORY]. | |
d9713679 | 1514 | */ |
86b27bea | 1515 | for (zt = 0; zt <= ZONE_NORMAL; zt++) |
d9713679 | 1516 | present_pages += pgdat->node_zones[zt].present_pages; |
86b27bea | 1517 | if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) |
d9713679 | 1518 | arg->status_change_nid_normal = zone_to_nid(zone); |
d9713679 | 1519 | |
6715ddf9 LJ |
1520 | #ifdef CONFIG_HIGHMEM |
1521 | /* | |
86b27bea OS |
1522 | * node_states[N_HIGH_MEMORY] contains nodes which |
1523 | * have normal memory or high memory. | |
1524 | * Here we add the present_pages belonging to ZONE_HIGHMEM. | |
1525 | * If the zone is within the range of [0..ZONE_HIGHMEM), and | |
1526 | * we determine that the zones in that range become empty, | |
1527 | * we need to clear the node for N_HIGH_MEMORY. | |
6715ddf9 | 1528 | */ |
86b27bea OS |
1529 | present_pages += pgdat->node_zones[ZONE_HIGHMEM].present_pages; |
1530 | if (zone_idx(zone) <= ZONE_HIGHMEM && nr_pages >= present_pages) | |
6715ddf9 | 1531 | arg->status_change_nid_high = zone_to_nid(zone); |
6715ddf9 LJ |
1532 | #endif |
1533 | ||
d9713679 | 1534 | /* |
86b27bea OS |
1535 | * We have accounted the pages from [0..ZONE_NORMAL), and |
1536 | * in case of CONFIG_HIGHMEM the pages from ZONE_HIGHMEM | |
1537 | * as well. | |
1538 | * Here we count the possible pages from ZONE_MOVABLE. | |
1539 | * If after having accounted all the pages, we see that the nr_pages | |
1540 | * to be offlined is over or equal to the accounted pages, | |
1541 | * we know that the node will become empty, and so, we can clear | |
1542 | * it for N_MEMORY as well. | |
d9713679 | 1543 | */ |
86b27bea | 1544 | present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; |
d9713679 | 1545 | |
d9713679 LJ |
1546 | if (nr_pages >= present_pages) |
1547 | arg->status_change_nid = zone_to_nid(zone); | |
d9713679 LJ |
1548 | } |
1549 | ||
1550 | static void node_states_clear_node(int node, struct memory_notify *arg) | |
1551 | { | |
1552 | if (arg->status_change_nid_normal >= 0) | |
1553 | node_clear_state(node, N_NORMAL_MEMORY); | |
1554 | ||
cf01f6f5 | 1555 | if (arg->status_change_nid_high >= 0) |
d9713679 | 1556 | node_clear_state(node, N_HIGH_MEMORY); |
6715ddf9 | 1557 | |
cf01f6f5 | 1558 | if (arg->status_change_nid >= 0) |
6715ddf9 | 1559 | node_clear_state(node, N_MEMORY); |
d9713679 LJ |
1560 | } |
1561 | ||
c5e79ef5 DH |
1562 | static int count_system_ram_pages_cb(unsigned long start_pfn, |
1563 | unsigned long nr_pages, void *data) | |
1564 | { | |
1565 | unsigned long *nr_system_ram_pages = data; | |
1566 | ||
1567 | *nr_system_ram_pages += nr_pages; | |
1568 | return 0; | |
1569 | } | |
1570 | ||
73a11c96 | 1571 | int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages) |
0c0e6195 | 1572 | { |
73a11c96 | 1573 | const unsigned long end_pfn = start_pfn + nr_pages; |
0a1a9a00 | 1574 | unsigned long pfn, system_ram_pages = 0; |
d702909f | 1575 | unsigned long flags; |
0c0e6195 | 1576 | struct zone *zone; |
7b78d335 | 1577 | struct memory_notify arg; |
ea15153c | 1578 | int ret, node; |
79605093 | 1579 | char *reason; |
0c0e6195 | 1580 | |
dd8e2f23 OS |
1581 | /* |
1582 | * {on,off}lining is constrained to full memory sections (or more | |
041711ce | 1583 | * precisely to memory blocks from the user space POV). |
dd8e2f23 OS |
1584 | * memmap_on_memory is an exception because it reserves initial part |
1585 | * of the physical memory space for vmemmaps. That space is pageblock | |
1586 | * aligned. | |
1587 | */ | |
4986fac1 | 1588 | if (WARN_ON_ONCE(!nr_pages || |
dd8e2f23 OS |
1589 | !IS_ALIGNED(start_pfn, pageblock_nr_pages) || |
1590 | !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) | |
4986fac1 DH |
1591 | return -EINVAL; |
1592 | ||
381eab4a DH |
1593 | mem_hotplug_begin(); |
1594 | ||
c5e79ef5 DH |
1595 | /* |
1596 | * Don't allow to offline memory blocks that contain holes. | |
1597 | * Consequently, memory blocks with holes can never get onlined | |
1598 | * via the hotplug path - online_pages() - as hotplugged memory has | |
1599 | * no holes. This way, we e.g., don't have to worry about marking | |
1600 | * memory holes PG_reserved, don't need pfn_valid() checks, and can | |
1601 | * avoid using walk_system_ram_range() later. | |
1602 | */ | |
73a11c96 | 1603 | walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, |
c5e79ef5 | 1604 | count_system_ram_pages_cb); |
73a11c96 | 1605 | if (system_ram_pages != nr_pages) { |
c5e79ef5 DH |
1606 | ret = -EINVAL; |
1607 | reason = "memory holes"; | |
1608 | goto failed_removal; | |
1609 | } | |
1610 | ||
0c0e6195 KH |
1611 | /* This makes hotplug much easier...and readable. |
1612 | we assume this for now. .*/ | |
92917998 DH |
1613 | zone = test_pages_in_a_zone(start_pfn, end_pfn); |
1614 | if (!zone) { | |
79605093 MH |
1615 | ret = -EINVAL; |
1616 | reason = "multizone range"; | |
1617 | goto failed_removal; | |
381eab4a | 1618 | } |
7b78d335 | 1619 | node = zone_to_nid(zone); |
7b78d335 | 1620 | |
ec6e8c7e VB |
1621 | /* |
1622 | * Disable pcplists so that page isolation cannot race with freeing | |
1623 | * in a way that pages from isolated pageblock are left on pcplists. | |
1624 | */ | |
1625 | zone_pcp_disable(zone); | |
d479960e | 1626 | lru_cache_disable(); |
ec6e8c7e | 1627 | |
0c0e6195 | 1628 | /* set above range as isolated */ |
b023f468 | 1629 | ret = start_isolate_page_range(start_pfn, end_pfn, |
d381c547 | 1630 | MIGRATE_MOVABLE, |
756d25be | 1631 | MEMORY_OFFLINE | REPORT_FAILURE); |
3fa0c7c7 | 1632 | if (ret) { |
79605093 | 1633 | reason = "failure to isolate range"; |
ec6e8c7e | 1634 | goto failed_removal_pcplists_disabled; |
381eab4a | 1635 | } |
7b78d335 YG |
1636 | |
1637 | arg.start_pfn = start_pfn; | |
1638 | arg.nr_pages = nr_pages; | |
d9713679 | 1639 | node_states_check_changes_offline(nr_pages, zone, &arg); |
7b78d335 YG |
1640 | |
1641 | ret = memory_notify(MEM_GOING_OFFLINE, &arg); | |
1642 | ret = notifier_to_errno(ret); | |
79605093 MH |
1643 | if (ret) { |
1644 | reason = "notifier failure"; | |
1645 | goto failed_removal_isolated; | |
1646 | } | |
7b78d335 | 1647 | |
bb8965bd | 1648 | do { |
aa218795 DH |
1649 | pfn = start_pfn; |
1650 | do { | |
bb8965bd MH |
1651 | if (signal_pending(current)) { |
1652 | ret = -EINTR; | |
1653 | reason = "signal backoff"; | |
1654 | goto failed_removal_isolated; | |
1655 | } | |
72b39cfc | 1656 | |
bb8965bd | 1657 | cond_resched(); |
bb8965bd | 1658 | |
aa218795 DH |
1659 | ret = scan_movable_pages(pfn, end_pfn, &pfn); |
1660 | if (!ret) { | |
bb8965bd MH |
1661 | /* |
1662 | * TODO: fatal migration failures should bail | |
1663 | * out | |
1664 | */ | |
1665 | do_migrate_range(pfn, end_pfn); | |
1666 | } | |
aa218795 DH |
1667 | } while (!ret); |
1668 | ||
1669 | if (ret != -ENOENT) { | |
1670 | reason = "unmovable page"; | |
1671 | goto failed_removal_isolated; | |
bb8965bd | 1672 | } |
0c0e6195 | 1673 | |
bb8965bd MH |
1674 | /* |
1675 | * Dissolve free hugepages in the memory block before doing | |
1676 | * offlining actually in order to make hugetlbfs's object | |
1677 | * counting consistent. | |
1678 | */ | |
1679 | ret = dissolve_free_huge_pages(start_pfn, end_pfn); | |
1680 | if (ret) { | |
1681 | reason = "failure to dissolve huge pages"; | |
1682 | goto failed_removal_isolated; | |
1683 | } | |
0a1a9a00 | 1684 | |
0a1a9a00 | 1685 | ret = test_pages_isolated(start_pfn, end_pfn, MEMORY_OFFLINE); |
ec6e8c7e | 1686 | |
5557c766 | 1687 | } while (ret); |
72b39cfc | 1688 | |
0a1a9a00 DH |
1689 | /* Mark all sections offline and remove free pages from the buddy. */ |
1690 | __offline_isolated_pages(start_pfn, end_pfn); | |
7c33023a | 1691 | pr_debug("Offlined Pages %ld\n", nr_pages); |
0a1a9a00 | 1692 | |
9b7ea46a | 1693 | /* |
b30c5927 DH |
1694 | * The memory sections are marked offline, and the pageblock flags |
1695 | * effectively stale; nobody should be touching them. Fixup the number | |
1696 | * of isolated pageblocks, memory onlining will properly revert this. | |
9b7ea46a QC |
1697 | */ |
1698 | spin_lock_irqsave(&zone->lock, flags); | |
ea15153c | 1699 | zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; |
9b7ea46a QC |
1700 | spin_unlock_irqrestore(&zone->lock, flags); |
1701 | ||
d479960e | 1702 | lru_cache_enable(); |
ec6e8c7e VB |
1703 | zone_pcp_enable(zone); |
1704 | ||
0c0e6195 | 1705 | /* removal success */ |
0a1a9a00 | 1706 | adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); |
f9901144 | 1707 | adjust_present_page_count(zone, -nr_pages); |
7b78d335 | 1708 | |
b92ca18e | 1709 | /* reinitialise watermarks and update pcp limits */ |
1b79acc9 KM |
1710 | init_per_zone_wmark_min(); |
1711 | ||
1e8537ba | 1712 | if (!populated_zone(zone)) { |
340175b7 | 1713 | zone_pcp_reset(zone); |
72675e13 | 1714 | build_all_zonelists(NULL); |
b92ca18e | 1715 | } |
340175b7 | 1716 | |
d9713679 | 1717 | node_states_clear_node(node, &arg); |
698b1b30 | 1718 | if (arg.status_change_nid >= 0) { |
8fe23e05 | 1719 | kswapd_stop(node); |
698b1b30 VB |
1720 | kcompactd_stop(node); |
1721 | } | |
bce7394a | 1722 | |
0c0e6195 | 1723 | writeback_set_ratelimit(); |
7b78d335 YG |
1724 | |
1725 | memory_notify(MEM_OFFLINE, &arg); | |
feee6b29 | 1726 | remove_pfn_range_from_zone(zone, start_pfn, nr_pages); |
381eab4a | 1727 | mem_hotplug_done(); |
0c0e6195 KH |
1728 | return 0; |
1729 | ||
79605093 MH |
1730 | failed_removal_isolated: |
1731 | undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); | |
c4efe484 | 1732 | memory_notify(MEM_CANCEL_OFFLINE, &arg); |
ec6e8c7e VB |
1733 | failed_removal_pcplists_disabled: |
1734 | zone_pcp_enable(zone); | |
0c0e6195 | 1735 | failed_removal: |
79605093 | 1736 | pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", |
e33e33b4 | 1737 | (unsigned long long) start_pfn << PAGE_SHIFT, |
79605093 MH |
1738 | ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, |
1739 | reason); | |
0c0e6195 | 1740 | /* pushback to free area */ |
381eab4a | 1741 | mem_hotplug_done(); |
0c0e6195 KH |
1742 | return ret; |
1743 | } | |
71088785 | 1744 | |
d6de9d53 | 1745 | static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) |
bbc76be6 WC |
1746 | { |
1747 | int ret = !is_memblock_offlined(mem); | |
1748 | ||
349daa0f RD |
1749 | if (unlikely(ret)) { |
1750 | phys_addr_t beginpa, endpa; | |
1751 | ||
1752 | beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); | |
b6c88d3b | 1753 | endpa = beginpa + memory_block_size_bytes() - 1; |
756a025f | 1754 | pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", |
349daa0f | 1755 | &beginpa, &endpa); |
bbc76be6 | 1756 | |
eca499ab PT |
1757 | return -EBUSY; |
1758 | } | |
1759 | return 0; | |
bbc76be6 WC |
1760 | } |
1761 | ||
a08a2ae3 OS |
1762 | static int get_nr_vmemmap_pages_cb(struct memory_block *mem, void *arg) |
1763 | { | |
1764 | /* | |
1765 | * If not set, continue with the next block. | |
1766 | */ | |
1767 | return mem->nr_vmemmap_pages; | |
1768 | } | |
1769 | ||
0f1cfe9d | 1770 | static int check_cpu_on_node(pg_data_t *pgdat) |
60a5a19e | 1771 | { |
60a5a19e TC |
1772 | int cpu; |
1773 | ||
1774 | for_each_present_cpu(cpu) { | |
1775 | if (cpu_to_node(cpu) == pgdat->node_id) | |
1776 | /* | |
1777 | * the cpu on this node isn't removed, and we can't | |
1778 | * offline this node. | |
1779 | */ | |
1780 | return -EBUSY; | |
1781 | } | |
1782 | ||
1783 | return 0; | |
1784 | } | |
1785 | ||
2c91f8fc DH |
1786 | static int check_no_memblock_for_node_cb(struct memory_block *mem, void *arg) |
1787 | { | |
1788 | int nid = *(int *)arg; | |
1789 | ||
1790 | /* | |
1791 | * If a memory block belongs to multiple nodes, the stored nid is not | |
1792 | * reliable. However, such blocks are always online (e.g., cannot get | |
1793 | * offlined) and, therefore, are still spanned by the node. | |
1794 | */ | |
1795 | return mem->nid == nid ? -EEXIST : 0; | |
1796 | } | |
1797 | ||
0f1cfe9d TK |
1798 | /** |
1799 | * try_offline_node | |
e8b098fc | 1800 | * @nid: the node ID |
0f1cfe9d TK |
1801 | * |
1802 | * Offline a node if all memory sections and cpus of the node are removed. | |
1803 | * | |
1804 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1805 | * and online/offline operations before this call. | |
1806 | */ | |
90b30cdc | 1807 | void try_offline_node(int nid) |
60a5a19e | 1808 | { |
d822b86a | 1809 | pg_data_t *pgdat = NODE_DATA(nid); |
2c91f8fc | 1810 | int rc; |
60a5a19e | 1811 | |
2c91f8fc DH |
1812 | /* |
1813 | * If the node still spans pages (especially ZONE_DEVICE), don't | |
1814 | * offline it. A node spans memory after move_pfn_range_to_zone(), | |
1815 | * e.g., after the memory block was onlined. | |
1816 | */ | |
1817 | if (pgdat->node_spanned_pages) | |
1818 | return; | |
60a5a19e | 1819 | |
2c91f8fc DH |
1820 | /* |
1821 | * Especially offline memory blocks might not be spanned by the | |
1822 | * node. They will get spanned by the node once they get onlined. | |
1823 | * However, they link to the node in sysfs and can get onlined later. | |
1824 | */ | |
1825 | rc = for_each_memory_block(&nid, check_no_memblock_for_node_cb); | |
1826 | if (rc) | |
60a5a19e | 1827 | return; |
60a5a19e | 1828 | |
46a3679b | 1829 | if (check_cpu_on_node(pgdat)) |
60a5a19e TC |
1830 | return; |
1831 | ||
1832 | /* | |
1833 | * all memory/cpu of this node are removed, we can offline this | |
1834 | * node now. | |
1835 | */ | |
1836 | node_set_offline(nid); | |
1837 | unregister_one_node(nid); | |
1838 | } | |
90b30cdc | 1839 | EXPORT_SYMBOL(try_offline_node); |
60a5a19e | 1840 | |
eca499ab | 1841 | static int __ref try_remove_memory(int nid, u64 start, u64 size) |
bbc76be6 | 1842 | { |
eca499ab | 1843 | int rc = 0; |
a08a2ae3 OS |
1844 | struct vmem_altmap mhp_altmap = {}; |
1845 | struct vmem_altmap *altmap = NULL; | |
1846 | unsigned long nr_vmemmap_pages; | |
993c1aad | 1847 | |
27356f54 TK |
1848 | BUG_ON(check_hotplug_memory_range(start, size)); |
1849 | ||
6677e3ea | 1850 | /* |
242831eb | 1851 | * All memory blocks must be offlined before removing memory. Check |
eca499ab | 1852 | * whether all memory blocks in question are offline and return error |
242831eb | 1853 | * if this is not the case. |
6677e3ea | 1854 | */ |
fbcf73ce | 1855 | rc = walk_memory_blocks(start, size, NULL, check_memblock_offlined_cb); |
eca499ab | 1856 | if (rc) |
b4223a51 | 1857 | return rc; |
6677e3ea | 1858 | |
a08a2ae3 OS |
1859 | /* |
1860 | * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in | |
1861 | * the same granularity it was added - a single memory block. | |
1862 | */ | |
1863 | if (memmap_on_memory) { | |
1864 | nr_vmemmap_pages = walk_memory_blocks(start, size, NULL, | |
1865 | get_nr_vmemmap_pages_cb); | |
1866 | if (nr_vmemmap_pages) { | |
1867 | if (size != memory_block_size_bytes()) { | |
1868 | pr_warn("Refuse to remove %#llx - %#llx," | |
1869 | "wrong granularity\n", | |
1870 | start, start + size); | |
1871 | return -EINVAL; | |
1872 | } | |
1873 | ||
1874 | /* | |
1875 | * Let remove_pmd_table->free_hugepage_table do the | |
1876 | * right thing if we used vmem_altmap when hot-adding | |
1877 | * the range. | |
1878 | */ | |
1879 | mhp_altmap.alloc = nr_vmemmap_pages; | |
1880 | altmap = &mhp_altmap; | |
1881 | } | |
1882 | } | |
1883 | ||
46c66c4b YI |
1884 | /* remove memmap entry */ |
1885 | firmware_map_remove(start, start + size, "System RAM"); | |
4c4b7f9b | 1886 | |
f1037ec0 DW |
1887 | /* |
1888 | * Memory block device removal under the device_hotplug_lock is | |
1889 | * a barrier against racing online attempts. | |
1890 | */ | |
4c4b7f9b | 1891 | remove_memory_block_devices(start, size); |
46c66c4b | 1892 | |
f1037ec0 DW |
1893 | mem_hotplug_begin(); |
1894 | ||
a08a2ae3 | 1895 | arch_remove_memory(nid, start, size, altmap); |
52219aea DH |
1896 | |
1897 | if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) { | |
1898 | memblock_free(start, size); | |
1899 | memblock_remove(start, size); | |
1900 | } | |
1901 | ||
cb8e3c8b | 1902 | release_mem_region_adjustable(start, size); |
24d335ca | 1903 | |
60a5a19e TC |
1904 | try_offline_node(nid); |
1905 | ||
bfc8c901 | 1906 | mem_hotplug_done(); |
b4223a51 | 1907 | return 0; |
71088785 | 1908 | } |
d15e5926 | 1909 | |
eca499ab PT |
1910 | /** |
1911 | * remove_memory | |
1912 | * @nid: the node ID | |
1913 | * @start: physical address of the region to remove | |
1914 | * @size: size of the region to remove | |
1915 | * | |
1916 | * NOTE: The caller must call lock_device_hotplug() to serialize hotplug | |
1917 | * and online/offline operations before this call, as required by | |
1918 | * try_offline_node(). | |
1919 | */ | |
1920 | void __remove_memory(int nid, u64 start, u64 size) | |
1921 | { | |
1922 | ||
1923 | /* | |
29a90db9 | 1924 | * trigger BUG() if some memory is not offlined prior to calling this |
eca499ab PT |
1925 | * function |
1926 | */ | |
1927 | if (try_remove_memory(nid, start, size)) | |
1928 | BUG(); | |
1929 | } | |
1930 | ||
1931 | /* | |
1932 | * Remove memory if every memory block is offline, otherwise return -EBUSY is | |
1933 | * some memory is not offline | |
1934 | */ | |
1935 | int remove_memory(int nid, u64 start, u64 size) | |
d15e5926 | 1936 | { |
eca499ab PT |
1937 | int rc; |
1938 | ||
d15e5926 | 1939 | lock_device_hotplug(); |
eca499ab | 1940 | rc = try_remove_memory(nid, start, size); |
d15e5926 | 1941 | unlock_device_hotplug(); |
eca499ab PT |
1942 | |
1943 | return rc; | |
d15e5926 | 1944 | } |
71088785 | 1945 | EXPORT_SYMBOL_GPL(remove_memory); |
08b3acd7 | 1946 | |
8dc4bb58 DH |
1947 | static int try_offline_memory_block(struct memory_block *mem, void *arg) |
1948 | { | |
1949 | uint8_t online_type = MMOP_ONLINE_KERNEL; | |
1950 | uint8_t **online_types = arg; | |
1951 | struct page *page; | |
1952 | int rc; | |
1953 | ||
1954 | /* | |
1955 | * Sense the online_type via the zone of the memory block. Offlining | |
1956 | * with multiple zones within one memory block will be rejected | |
1957 | * by offlining code ... so we don't care about that. | |
1958 | */ | |
1959 | page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr)); | |
1960 | if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE) | |
1961 | online_type = MMOP_ONLINE_MOVABLE; | |
1962 | ||
1963 | rc = device_offline(&mem->dev); | |
1964 | /* | |
1965 | * Default is MMOP_OFFLINE - change it only if offlining succeeded, | |
1966 | * so try_reonline_memory_block() can do the right thing. | |
1967 | */ | |
1968 | if (!rc) | |
1969 | **online_types = online_type; | |
1970 | ||
1971 | (*online_types)++; | |
1972 | /* Ignore if already offline. */ | |
1973 | return rc < 0 ? rc : 0; | |
1974 | } | |
1975 | ||
1976 | static int try_reonline_memory_block(struct memory_block *mem, void *arg) | |
1977 | { | |
1978 | uint8_t **online_types = arg; | |
1979 | int rc; | |
1980 | ||
1981 | if (**online_types != MMOP_OFFLINE) { | |
1982 | mem->online_type = **online_types; | |
1983 | rc = device_online(&mem->dev); | |
1984 | if (rc < 0) | |
1985 | pr_warn("%s: Failed to re-online memory: %d", | |
1986 | __func__, rc); | |
1987 | } | |
1988 | ||
1989 | /* Continue processing all remaining memory blocks. */ | |
1990 | (*online_types)++; | |
1991 | return 0; | |
1992 | } | |
1993 | ||
08b3acd7 | 1994 | /* |
8dc4bb58 DH |
1995 | * Try to offline and remove memory. Might take a long time to finish in case |
1996 | * memory is still in use. Primarily useful for memory devices that logically | |
1997 | * unplugged all memory (so it's no longer in use) and want to offline + remove | |
1998 | * that memory. | |
08b3acd7 DH |
1999 | */ |
2000 | int offline_and_remove_memory(int nid, u64 start, u64 size) | |
2001 | { | |
8dc4bb58 DH |
2002 | const unsigned long mb_count = size / memory_block_size_bytes(); |
2003 | uint8_t *online_types, *tmp; | |
2004 | int rc; | |
08b3acd7 DH |
2005 | |
2006 | if (!IS_ALIGNED(start, memory_block_size_bytes()) || | |
8dc4bb58 DH |
2007 | !IS_ALIGNED(size, memory_block_size_bytes()) || !size) |
2008 | return -EINVAL; | |
2009 | ||
2010 | /* | |
2011 | * We'll remember the old online type of each memory block, so we can | |
2012 | * try to revert whatever we did when offlining one memory block fails | |
2013 | * after offlining some others succeeded. | |
2014 | */ | |
2015 | online_types = kmalloc_array(mb_count, sizeof(*online_types), | |
2016 | GFP_KERNEL); | |
2017 | if (!online_types) | |
2018 | return -ENOMEM; | |
2019 | /* | |
2020 | * Initialize all states to MMOP_OFFLINE, so when we abort processing in | |
2021 | * try_offline_memory_block(), we'll skip all unprocessed blocks in | |
2022 | * try_reonline_memory_block(). | |
2023 | */ | |
2024 | memset(online_types, MMOP_OFFLINE, mb_count); | |
08b3acd7 DH |
2025 | |
2026 | lock_device_hotplug(); | |
8dc4bb58 DH |
2027 | |
2028 | tmp = online_types; | |
2029 | rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block); | |
08b3acd7 DH |
2030 | |
2031 | /* | |
8dc4bb58 | 2032 | * In case we succeeded to offline all memory, remove it. |
08b3acd7 DH |
2033 | * This cannot fail as it cannot get onlined in the meantime. |
2034 | */ | |
2035 | if (!rc) { | |
2036 | rc = try_remove_memory(nid, start, size); | |
8dc4bb58 DH |
2037 | if (rc) |
2038 | pr_err("%s: Failed to remove memory: %d", __func__, rc); | |
2039 | } | |
2040 | ||
2041 | /* | |
2042 | * Rollback what we did. While memory onlining might theoretically fail | |
2043 | * (nacked by a notifier), it barely ever happens. | |
2044 | */ | |
2045 | if (rc) { | |
2046 | tmp = online_types; | |
2047 | walk_memory_blocks(start, size, &tmp, | |
2048 | try_reonline_memory_block); | |
08b3acd7 DH |
2049 | } |
2050 | unlock_device_hotplug(); | |
2051 | ||
8dc4bb58 | 2052 | kfree(online_types); |
08b3acd7 DH |
2053 | return rc; |
2054 | } | |
2055 | EXPORT_SYMBOL_GPL(offline_and_remove_memory); | |
aba6efc4 | 2056 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |