ksm: enable KSM page migration
[linux-2.6-block.git] / mm / memory_hotplug.c
CommitLineData
3947be19
DH
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
3947be19
DH
7#include <linux/stddef.h>
8#include <linux/mm.h>
9#include <linux/swap.h>
10#include <linux/interrupt.h>
11#include <linux/pagemap.h>
12#include <linux/bootmem.h>
13#include <linux/compiler.h>
b95f1b31 14#include <linux/export.h>
3947be19 15#include <linux/pagevec.h>
2d1d43f6 16#include <linux/writeback.h>
3947be19
DH
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
0a547039 24#include <linux/ioport.h>
0c0e6195
KH
25#include <linux/delay.h>
26#include <linux/migrate.h>
27#include <linux/page-isolation.h>
71088785 28#include <linux/pfn.h>
6ad696d2 29#include <linux/suspend.h>
6d9c285a 30#include <linux/mm_inline.h>
d96ae530 31#include <linux/firmware-map.h>
60a5a19e 32#include <linux/stop_machine.h>
3947be19
DH
33
34#include <asm/tlbflush.h>
35
1e5ad9a3
AB
36#include "internal.h"
37
9d0ad8ca
DK
38/*
39 * online_page_callback contains pointer to current page onlining function.
40 * Initially it is generic_online_page(). If it is required it could be
41 * changed by calling set_online_page_callback() for callback registration
42 * and restore_online_page_callback() for generic callback restore.
43 */
44
45static void generic_online_page(struct page *page);
46
47static online_page_callback_t online_page_callback = generic_online_page;
48
20d6c96b
KM
49DEFINE_MUTEX(mem_hotplug_mutex);
50
51void lock_memory_hotplug(void)
52{
53 mutex_lock(&mem_hotplug_mutex);
54
55 /* for exclusive hibernation if CONFIG_HIBERNATION=y */
56 lock_system_sleep();
57}
58
59void unlock_memory_hotplug(void)
60{
61 unlock_system_sleep();
62 mutex_unlock(&mem_hotplug_mutex);
63}
64
65
45e0b78b
KM
66/* add this memory to iomem resource */
67static struct resource *register_memory_resource(u64 start, u64 size)
68{
69 struct resource *res;
70 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
71 BUG_ON(!res);
72
73 res->name = "System RAM";
74 res->start = start;
75 res->end = start + size - 1;
887c3cb1 76 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
45e0b78b 77 if (request_resource(&iomem_resource, res) < 0) {
a62e2f4f 78 printk("System RAM resource %pR cannot be added\n", res);
45e0b78b
KM
79 kfree(res);
80 res = NULL;
81 }
82 return res;
83}
84
85static void release_memory_resource(struct resource *res)
86{
87 if (!res)
88 return;
89 release_resource(res);
90 kfree(res);
91 return;
92}
93
53947027 94#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
46723bfa
YI
95void get_page_bootmem(unsigned long info, struct page *page,
96 unsigned long type)
04753278 97{
5f24ce5f 98 page->lru.next = (struct list_head *) type;
04753278
YG
99 SetPagePrivate(page);
100 set_page_private(page, info);
101 atomic_inc(&page->_count);
102}
103
23ce932a
RM
104/* reference to __meminit __free_pages_bootmem is valid
105 * so use __ref to tell modpost not to generate a warning */
106void __ref put_page_bootmem(struct page *page)
04753278 107{
5f24ce5f 108 unsigned long type;
9feedc9d 109 static DEFINE_MUTEX(ppb_lock);
04753278 110
5f24ce5f
AA
111 type = (unsigned long) page->lru.next;
112 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
113 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
04753278
YG
114
115 if (atomic_dec_return(&page->_count) == 1) {
116 ClearPagePrivate(page);
117 set_page_private(page, 0);
5f24ce5f 118 INIT_LIST_HEAD(&page->lru);
9feedc9d
JL
119
120 /*
121 * Please refer to comment for __free_pages_bootmem()
122 * for why we serialize here.
123 */
124 mutex_lock(&ppb_lock);
04753278 125 __free_pages_bootmem(page, 0);
9feedc9d 126 mutex_unlock(&ppb_lock);
c60514b6 127 totalram_pages++;
04753278
YG
128 }
129
130}
131
46723bfa
YI
132#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
133#ifndef CONFIG_SPARSEMEM_VMEMMAP
d92bc318 134static void register_page_bootmem_info_section(unsigned long start_pfn)
04753278
YG
135{
136 unsigned long *usemap, mapsize, section_nr, i;
137 struct mem_section *ms;
138 struct page *page, *memmap;
139
04753278
YG
140 section_nr = pfn_to_section_nr(start_pfn);
141 ms = __nr_to_section(section_nr);
142
143 /* Get section's memmap address */
144 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
145
146 /*
147 * Get page for the memmap's phys address
148 * XXX: need more consideration for sparse_vmemmap...
149 */
150 page = virt_to_page(memmap);
151 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
152 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
153
154 /* remember memmap's page */
155 for (i = 0; i < mapsize; i++, page++)
156 get_page_bootmem(section_nr, page, SECTION_INFO);
157
158 usemap = __nr_to_section(section_nr)->pageblock_flags;
159 page = virt_to_page(usemap);
160
161 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
162
163 for (i = 0; i < mapsize; i++, page++)
af370fb8 164 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
04753278
YG
165
166}
46723bfa
YI
167#else /* CONFIG_SPARSEMEM_VMEMMAP */
168static void register_page_bootmem_info_section(unsigned long start_pfn)
169{
170 unsigned long *usemap, mapsize, section_nr, i;
171 struct mem_section *ms;
172 struct page *page, *memmap;
173
174 if (!pfn_valid(start_pfn))
175 return;
176
177 section_nr = pfn_to_section_nr(start_pfn);
178 ms = __nr_to_section(section_nr);
179
180 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
181
182 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
183
184 usemap = __nr_to_section(section_nr)->pageblock_flags;
185 page = virt_to_page(usemap);
186
187 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
188
189 for (i = 0; i < mapsize; i++, page++)
190 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
191}
192#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
04753278
YG
193
194void register_page_bootmem_info_node(struct pglist_data *pgdat)
195{
196 unsigned long i, pfn, end_pfn, nr_pages;
197 int node = pgdat->node_id;
198 struct page *page;
199 struct zone *zone;
200
201 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
202 page = virt_to_page(pgdat);
203
204 for (i = 0; i < nr_pages; i++, page++)
205 get_page_bootmem(node, page, NODE_INFO);
206
207 zone = &pgdat->node_zones[0];
208 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
209 if (zone->wait_table) {
210 nr_pages = zone->wait_table_hash_nr_entries
211 * sizeof(wait_queue_head_t);
212 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
213 page = virt_to_page(zone->wait_table);
214
215 for (i = 0; i < nr_pages; i++, page++)
216 get_page_bootmem(node, page, NODE_INFO);
217 }
218 }
219
220 pfn = pgdat->node_start_pfn;
221 end_pfn = pfn + pgdat->node_spanned_pages;
222
223 /* register_section info */
f14851af 224 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
225 /*
226 * Some platforms can assign the same pfn to multiple nodes - on
227 * node0 as well as nodeN. To avoid registering a pfn against
228 * multiple nodes we check that this pfn does not already
229 * reside in some other node.
230 */
231 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
232 register_page_bootmem_info_section(pfn);
233 }
04753278 234}
46723bfa 235#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
04753278 236
76cdd58e
HC
237static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
238 unsigned long end_pfn)
239{
240 unsigned long old_zone_end_pfn;
241
242 zone_span_writelock(zone);
243
244 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
712cd386 245 if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn)
76cdd58e
HC
246 zone->zone_start_pfn = start_pfn;
247
248 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
249 zone->zone_start_pfn;
250
251 zone_span_writeunlock(zone);
252}
253
511c2aba
LJ
254static void resize_zone(struct zone *zone, unsigned long start_pfn,
255 unsigned long end_pfn)
256{
257 zone_span_writelock(zone);
258
e455a9b9
LJ
259 if (end_pfn - start_pfn) {
260 zone->zone_start_pfn = start_pfn;
261 zone->spanned_pages = end_pfn - start_pfn;
262 } else {
263 /*
264 * make it consist as free_area_init_core(),
265 * if spanned_pages = 0, then keep start_pfn = 0
266 */
267 zone->zone_start_pfn = 0;
268 zone->spanned_pages = 0;
269 }
511c2aba
LJ
270
271 zone_span_writeunlock(zone);
272}
273
274static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
275 unsigned long end_pfn)
276{
277 enum zone_type zid = zone_idx(zone);
278 int nid = zone->zone_pgdat->node_id;
279 unsigned long pfn;
280
281 for (pfn = start_pfn; pfn < end_pfn; pfn++)
282 set_page_links(pfn_to_page(pfn), zid, nid, pfn);
283}
284
e455a9b9 285static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
511c2aba
LJ
286 unsigned long start_pfn, unsigned long end_pfn)
287{
e455a9b9 288 int ret;
511c2aba 289 unsigned long flags;
e455a9b9
LJ
290 unsigned long z1_start_pfn;
291
292 if (!z1->wait_table) {
293 ret = init_currently_empty_zone(z1, start_pfn,
294 end_pfn - start_pfn, MEMMAP_HOTPLUG);
295 if (ret)
296 return ret;
297 }
511c2aba
LJ
298
299 pgdat_resize_lock(z1->zone_pgdat, &flags);
300
301 /* can't move pfns which are higher than @z2 */
302 if (end_pfn > z2->zone_start_pfn + z2->spanned_pages)
303 goto out_fail;
304 /* the move out part mast at the left most of @z2 */
305 if (start_pfn > z2->zone_start_pfn)
306 goto out_fail;
307 /* must included/overlap */
308 if (end_pfn <= z2->zone_start_pfn)
309 goto out_fail;
310
e455a9b9
LJ
311 /* use start_pfn for z1's start_pfn if z1 is empty */
312 if (z1->spanned_pages)
313 z1_start_pfn = z1->zone_start_pfn;
314 else
315 z1_start_pfn = start_pfn;
316
317 resize_zone(z1, z1_start_pfn, end_pfn);
511c2aba
LJ
318 resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages);
319
320 pgdat_resize_unlock(z1->zone_pgdat, &flags);
321
322 fix_zone_id(z1, start_pfn, end_pfn);
323
324 return 0;
325out_fail:
326 pgdat_resize_unlock(z1->zone_pgdat, &flags);
327 return -1;
328}
329
e455a9b9 330static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
511c2aba
LJ
331 unsigned long start_pfn, unsigned long end_pfn)
332{
e455a9b9 333 int ret;
511c2aba 334 unsigned long flags;
e455a9b9
LJ
335 unsigned long z2_end_pfn;
336
337 if (!z2->wait_table) {
338 ret = init_currently_empty_zone(z2, start_pfn,
339 end_pfn - start_pfn, MEMMAP_HOTPLUG);
340 if (ret)
341 return ret;
342 }
511c2aba
LJ
343
344 pgdat_resize_lock(z1->zone_pgdat, &flags);
345
346 /* can't move pfns which are lower than @z1 */
347 if (z1->zone_start_pfn > start_pfn)
348 goto out_fail;
349 /* the move out part mast at the right most of @z1 */
350 if (z1->zone_start_pfn + z1->spanned_pages > end_pfn)
351 goto out_fail;
352 /* must included/overlap */
353 if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages)
354 goto out_fail;
355
e455a9b9
LJ
356 /* use end_pfn for z2's end_pfn if z2 is empty */
357 if (z2->spanned_pages)
358 z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages;
359 else
360 z2_end_pfn = end_pfn;
361
511c2aba 362 resize_zone(z1, z1->zone_start_pfn, start_pfn);
e455a9b9 363 resize_zone(z2, start_pfn, z2_end_pfn);
511c2aba
LJ
364
365 pgdat_resize_unlock(z1->zone_pgdat, &flags);
366
367 fix_zone_id(z2, start_pfn, end_pfn);
368
369 return 0;
370out_fail:
371 pgdat_resize_unlock(z1->zone_pgdat, &flags);
372 return -1;
373}
374
76cdd58e
HC
375static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
376 unsigned long end_pfn)
377{
378 unsigned long old_pgdat_end_pfn =
379 pgdat->node_start_pfn + pgdat->node_spanned_pages;
380
712cd386 381 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
76cdd58e
HC
382 pgdat->node_start_pfn = start_pfn;
383
384 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
385 pgdat->node_start_pfn;
386}
387
31168481 388static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
3947be19
DH
389{
390 struct pglist_data *pgdat = zone->zone_pgdat;
391 int nr_pages = PAGES_PER_SECTION;
392 int nid = pgdat->node_id;
393 int zone_type;
76cdd58e 394 unsigned long flags;
3947be19
DH
395
396 zone_type = zone - pgdat->node_zones;
76cdd58e
HC
397 if (!zone->wait_table) {
398 int ret;
399
400 ret = init_currently_empty_zone(zone, phys_start_pfn,
401 nr_pages, MEMMAP_HOTPLUG);
402 if (ret)
403 return ret;
404 }
405 pgdat_resize_lock(zone->zone_pgdat, &flags);
406 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
407 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
408 phys_start_pfn + nr_pages);
409 pgdat_resize_unlock(zone->zone_pgdat, &flags);
a2f3aa02
DH
410 memmap_init_zone(nr_pages, nid, zone_type,
411 phys_start_pfn, MEMMAP_HOTPLUG);
718127cc 412 return 0;
3947be19
DH
413}
414
c04fc586
GH
415static int __meminit __add_section(int nid, struct zone *zone,
416 unsigned long phys_start_pfn)
3947be19 417{
3947be19 418 int nr_pages = PAGES_PER_SECTION;
3947be19
DH
419 int ret;
420
ebd15302
KH
421 if (pfn_valid(phys_start_pfn))
422 return -EEXIST;
423
0b0acbec 424 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
3947be19
DH
425
426 if (ret < 0)
427 return ret;
428
718127cc
YG
429 ret = __add_zone(zone, phys_start_pfn);
430
431 if (ret < 0)
432 return ret;
433
c04fc586 434 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
3947be19
DH
435}
436
815121d2
YI
437/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
438static int find_smallest_section_pfn(int nid, struct zone *zone,
439 unsigned long start_pfn,
440 unsigned long end_pfn)
441{
442 struct mem_section *ms;
443
444 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
445 ms = __pfn_to_section(start_pfn);
446
447 if (unlikely(!valid_section(ms)))
448 continue;
449
450 if (unlikely(pfn_to_nid(start_pfn) != nid))
451 continue;
452
453 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
454 continue;
455
456 return start_pfn;
457 }
458
459 return 0;
460}
461
462/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
463static int find_biggest_section_pfn(int nid, struct zone *zone,
464 unsigned long start_pfn,
465 unsigned long end_pfn)
466{
467 struct mem_section *ms;
468 unsigned long pfn;
469
470 /* pfn is the end pfn of a memory section. */
471 pfn = end_pfn - 1;
472 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
473 ms = __pfn_to_section(pfn);
474
475 if (unlikely(!valid_section(ms)))
476 continue;
477
478 if (unlikely(pfn_to_nid(pfn) != nid))
479 continue;
480
481 if (zone && zone != page_zone(pfn_to_page(pfn)))
482 continue;
483
484 return pfn;
485 }
486
487 return 0;
488}
489
490static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
491 unsigned long end_pfn)
492{
493 unsigned long zone_start_pfn = zone->zone_start_pfn;
494 unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
495 unsigned long pfn;
496 struct mem_section *ms;
497 int nid = zone_to_nid(zone);
498
499 zone_span_writelock(zone);
500 if (zone_start_pfn == start_pfn) {
501 /*
502 * If the section is smallest section in the zone, it need
503 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
504 * In this case, we find second smallest valid mem_section
505 * for shrinking zone.
506 */
507 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
508 zone_end_pfn);
509 if (pfn) {
510 zone->zone_start_pfn = pfn;
511 zone->spanned_pages = zone_end_pfn - pfn;
512 }
513 } else if (zone_end_pfn == end_pfn) {
514 /*
515 * If the section is biggest section in the zone, it need
516 * shrink zone->spanned_pages.
517 * In this case, we find second biggest valid mem_section for
518 * shrinking zone.
519 */
520 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
521 start_pfn);
522 if (pfn)
523 zone->spanned_pages = pfn - zone_start_pfn + 1;
524 }
525
526 /*
527 * The section is not biggest or smallest mem_section in the zone, it
528 * only creates a hole in the zone. So in this case, we need not
529 * change the zone. But perhaps, the zone has only hole data. Thus
530 * it check the zone has only hole or not.
531 */
532 pfn = zone_start_pfn;
533 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
534 ms = __pfn_to_section(pfn);
535
536 if (unlikely(!valid_section(ms)))
537 continue;
538
539 if (page_zone(pfn_to_page(pfn)) != zone)
540 continue;
541
542 /* If the section is current section, it continues the loop */
543 if (start_pfn == pfn)
544 continue;
545
546 /* If we find valid section, we have nothing to do */
547 zone_span_writeunlock(zone);
548 return;
549 }
550
551 /* The zone has no valid section */
552 zone->zone_start_pfn = 0;
553 zone->spanned_pages = 0;
554 zone_span_writeunlock(zone);
555}
556
557static void shrink_pgdat_span(struct pglist_data *pgdat,
558 unsigned long start_pfn, unsigned long end_pfn)
559{
560 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
561 unsigned long pgdat_end_pfn =
562 pgdat->node_start_pfn + pgdat->node_spanned_pages;
563 unsigned long pfn;
564 struct mem_section *ms;
565 int nid = pgdat->node_id;
566
567 if (pgdat_start_pfn == start_pfn) {
568 /*
569 * If the section is smallest section in the pgdat, it need
570 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
571 * In this case, we find second smallest valid mem_section
572 * for shrinking zone.
573 */
574 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
575 pgdat_end_pfn);
576 if (pfn) {
577 pgdat->node_start_pfn = pfn;
578 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
579 }
580 } else if (pgdat_end_pfn == end_pfn) {
581 /*
582 * If the section is biggest section in the pgdat, it need
583 * shrink pgdat->node_spanned_pages.
584 * In this case, we find second biggest valid mem_section for
585 * shrinking zone.
586 */
587 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
588 start_pfn);
589 if (pfn)
590 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
591 }
592
593 /*
594 * If the section is not biggest or smallest mem_section in the pgdat,
595 * it only creates a hole in the pgdat. So in this case, we need not
596 * change the pgdat.
597 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
598 * has only hole or not.
599 */
600 pfn = pgdat_start_pfn;
601 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
602 ms = __pfn_to_section(pfn);
603
604 if (unlikely(!valid_section(ms)))
605 continue;
606
607 if (pfn_to_nid(pfn) != nid)
608 continue;
609
610 /* If the section is current section, it continues the loop */
611 if (start_pfn == pfn)
612 continue;
613
614 /* If we find valid section, we have nothing to do */
615 return;
616 }
617
618 /* The pgdat has no valid section */
619 pgdat->node_start_pfn = 0;
620 pgdat->node_spanned_pages = 0;
621}
622
623static void __remove_zone(struct zone *zone, unsigned long start_pfn)
624{
625 struct pglist_data *pgdat = zone->zone_pgdat;
626 int nr_pages = PAGES_PER_SECTION;
627 int zone_type;
628 unsigned long flags;
629
630 zone_type = zone - pgdat->node_zones;
631
632 pgdat_resize_lock(zone->zone_pgdat, &flags);
633 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
634 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
635 pgdat_resize_unlock(zone->zone_pgdat, &flags);
636}
637
ea01ea93
BP
638static int __remove_section(struct zone *zone, struct mem_section *ms)
639{
815121d2
YI
640 unsigned long start_pfn;
641 int scn_nr;
ea01ea93
BP
642 int ret = -EINVAL;
643
644 if (!valid_section(ms))
645 return ret;
646
647 ret = unregister_memory_section(ms);
648 if (ret)
649 return ret;
650
815121d2
YI
651 scn_nr = __section_nr(ms);
652 start_pfn = section_nr_to_pfn(scn_nr);
653 __remove_zone(zone, start_pfn);
654
ea01ea93 655 sparse_remove_one_section(zone, ms);
ea01ea93
BP
656 return 0;
657}
658
3947be19
DH
659/*
660 * Reasonably generic function for adding memory. It is
661 * expected that archs that support memory hotplug will
662 * call this function after deciding the zone to which to
663 * add the new pages.
664 */
c04fc586
GH
665int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
666 unsigned long nr_pages)
3947be19
DH
667{
668 unsigned long i;
669 int err = 0;
6f712711
KH
670 int start_sec, end_sec;
671 /* during initialize mem_map, align hot-added range to section */
672 start_sec = pfn_to_section_nr(phys_start_pfn);
673 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
3947be19 674
6f712711 675 for (i = start_sec; i <= end_sec; i++) {
c04fc586 676 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
3947be19 677
6f712711 678 /*
183ff22b 679 * EEXIST is finally dealt with by ioresource collision
6f712711
KH
680 * check. see add_memory() => register_memory_resource()
681 * Warning will be printed if there is collision.
bed120c6
JS
682 */
683 if (err && (err != -EEXIST))
3947be19 684 break;
6f712711 685 err = 0;
3947be19
DH
686 }
687
688 return err;
689}
bed120c6 690EXPORT_SYMBOL_GPL(__add_pages);
3947be19 691
ea01ea93
BP
692/**
693 * __remove_pages() - remove sections of pages from a zone
694 * @zone: zone from which pages need to be removed
695 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
696 * @nr_pages: number of pages to remove (must be multiple of section size)
697 *
698 * Generic helper function to remove section mappings and sysfs entries
699 * for the section of the memory we are removing. Caller needs to make
700 * sure that pages are marked reserved and zones are adjust properly by
701 * calling offline_pages().
702 */
703int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
704 unsigned long nr_pages)
705{
706 unsigned long i, ret = 0;
707 int sections_to_remove;
708
709 /*
710 * We can only remove entire sections
711 */
712 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
713 BUG_ON(nr_pages % PAGES_PER_SECTION);
714
d760afd4
YI
715 release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
716
ea01ea93
BP
717 sections_to_remove = nr_pages / PAGES_PER_SECTION;
718 for (i = 0; i < sections_to_remove; i++) {
719 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
720 ret = __remove_section(zone, __pfn_to_section(pfn));
721 if (ret)
722 break;
723 }
724 return ret;
725}
726EXPORT_SYMBOL_GPL(__remove_pages);
727
9d0ad8ca
DK
728int set_online_page_callback(online_page_callback_t callback)
729{
730 int rc = -EINVAL;
731
732 lock_memory_hotplug();
733
734 if (online_page_callback == generic_online_page) {
735 online_page_callback = callback;
736 rc = 0;
737 }
738
739 unlock_memory_hotplug();
740
741 return rc;
742}
743EXPORT_SYMBOL_GPL(set_online_page_callback);
744
745int restore_online_page_callback(online_page_callback_t callback)
746{
747 int rc = -EINVAL;
748
749 lock_memory_hotplug();
750
751 if (online_page_callback == callback) {
752 online_page_callback = generic_online_page;
753 rc = 0;
754 }
755
756 unlock_memory_hotplug();
757
758 return rc;
759}
760EXPORT_SYMBOL_GPL(restore_online_page_callback);
761
762void __online_page_set_limits(struct page *page)
180c06ef 763{
4738e1b9
JB
764 unsigned long pfn = page_to_pfn(page);
765
4738e1b9
JB
766 if (pfn >= num_physpages)
767 num_physpages = pfn + 1;
9d0ad8ca
DK
768}
769EXPORT_SYMBOL_GPL(__online_page_set_limits);
770
771void __online_page_increment_counters(struct page *page)
772{
773 totalram_pages++;
180c06ef
JF
774
775#ifdef CONFIG_HIGHMEM
776 if (PageHighMem(page))
777 totalhigh_pages++;
778#endif
9d0ad8ca
DK
779}
780EXPORT_SYMBOL_GPL(__online_page_increment_counters);
180c06ef 781
9d0ad8ca
DK
782void __online_page_free(struct page *page)
783{
180c06ef
JF
784 ClearPageReserved(page);
785 init_page_count(page);
786 __free_page(page);
787}
9d0ad8ca
DK
788EXPORT_SYMBOL_GPL(__online_page_free);
789
790static void generic_online_page(struct page *page)
791{
792 __online_page_set_limits(page);
793 __online_page_increment_counters(page);
794 __online_page_free(page);
795}
180c06ef 796
75884fb1
KH
797static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
798 void *arg)
3947be19
DH
799{
800 unsigned long i;
75884fb1
KH
801 unsigned long onlined_pages = *(unsigned long *)arg;
802 struct page *page;
803 if (PageReserved(pfn_to_page(start_pfn)))
804 for (i = 0; i < nr_pages; i++) {
805 page = pfn_to_page(start_pfn + i);
9d0ad8ca 806 (*online_page_callback)(page);
75884fb1
KH
807 onlined_pages++;
808 }
809 *(unsigned long *)arg = onlined_pages;
810 return 0;
811}
812
09285af7 813#ifdef CONFIG_MOVABLE_NODE
79a4dcef
TC
814/*
815 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
816 * normal memory.
817 */
09285af7
LJ
818static bool can_online_high_movable(struct zone *zone)
819{
820 return true;
821}
79a4dcef 822#else /* CONFIG_MOVABLE_NODE */
74d42d8f
LJ
823/* ensure every online node has NORMAL memory */
824static bool can_online_high_movable(struct zone *zone)
825{
826 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
827}
79a4dcef 828#endif /* CONFIG_MOVABLE_NODE */
74d42d8f 829
d9713679
LJ
830/* check which state of node_states will be changed when online memory */
831static void node_states_check_changes_online(unsigned long nr_pages,
832 struct zone *zone, struct memory_notify *arg)
833{
834 int nid = zone_to_nid(zone);
835 enum zone_type zone_last = ZONE_NORMAL;
836
837 /*
6715ddf9
LJ
838 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
839 * contains nodes which have zones of 0...ZONE_NORMAL,
840 * set zone_last to ZONE_NORMAL.
d9713679 841 *
6715ddf9
LJ
842 * If we don't have HIGHMEM nor movable node,
843 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
844 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 845 */
6715ddf9 846 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
847 zone_last = ZONE_MOVABLE;
848
849 /*
850 * if the memory to be online is in a zone of 0...zone_last, and
851 * the zones of 0...zone_last don't have memory before online, we will
852 * need to set the node to node_states[N_NORMAL_MEMORY] after
853 * the memory is online.
854 */
855 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
856 arg->status_change_nid_normal = nid;
857 else
858 arg->status_change_nid_normal = -1;
859
6715ddf9
LJ
860#ifdef CONFIG_HIGHMEM
861 /*
862 * If we have movable node, node_states[N_HIGH_MEMORY]
863 * contains nodes which have zones of 0...ZONE_HIGHMEM,
864 * set zone_last to ZONE_HIGHMEM.
865 *
866 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
867 * contains nodes which have zones of 0...ZONE_MOVABLE,
868 * set zone_last to ZONE_MOVABLE.
869 */
870 zone_last = ZONE_HIGHMEM;
871 if (N_MEMORY == N_HIGH_MEMORY)
872 zone_last = ZONE_MOVABLE;
873
874 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
875 arg->status_change_nid_high = nid;
876 else
877 arg->status_change_nid_high = -1;
878#else
879 arg->status_change_nid_high = arg->status_change_nid_normal;
880#endif
881
d9713679
LJ
882 /*
883 * if the node don't have memory befor online, we will need to
6715ddf9 884 * set the node to node_states[N_MEMORY] after the memory
d9713679
LJ
885 * is online.
886 */
6715ddf9 887 if (!node_state(nid, N_MEMORY))
d9713679
LJ
888 arg->status_change_nid = nid;
889 else
890 arg->status_change_nid = -1;
891}
892
893static void node_states_set_node(int node, struct memory_notify *arg)
894{
895 if (arg->status_change_nid_normal >= 0)
896 node_set_state(node, N_NORMAL_MEMORY);
897
6715ddf9
LJ
898 if (arg->status_change_nid_high >= 0)
899 node_set_state(node, N_HIGH_MEMORY);
900
901 node_set_state(node, N_MEMORY);
d9713679
LJ
902}
903
75884fb1 904
511c2aba 905int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
75884fb1 906{
3947be19
DH
907 unsigned long onlined_pages = 0;
908 struct zone *zone;
6811378e 909 int need_zonelists_rebuild = 0;
7b78d335
YG
910 int nid;
911 int ret;
912 struct memory_notify arg;
913
925268a0 914 lock_memory_hotplug();
d9713679
LJ
915 /*
916 * This doesn't need a lock to do pfn_to_page().
917 * The section can't be removed here because of the
918 * memory_block->state_mutex.
919 */
920 zone = page_zone(pfn_to_page(pfn));
921
74d42d8f
LJ
922 if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) &&
923 !can_online_high_movable(zone)) {
924 unlock_memory_hotplug();
925 return -1;
926 }
927
511c2aba
LJ
928 if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) {
929 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) {
930 unlock_memory_hotplug();
931 return -1;
932 }
933 }
934 if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) {
935 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) {
936 unlock_memory_hotplug();
937 return -1;
938 }
939 }
940
941 /* Previous code may changed the zone of the pfn range */
942 zone = page_zone(pfn_to_page(pfn));
943
7b78d335
YG
944 arg.start_pfn = pfn;
945 arg.nr_pages = nr_pages;
d9713679 946 node_states_check_changes_online(nr_pages, zone, &arg);
7b78d335
YG
947
948 nid = page_to_nid(pfn_to_page(pfn));
3947be19 949
7b78d335
YG
950 ret = memory_notify(MEM_GOING_ONLINE, &arg);
951 ret = notifier_to_errno(ret);
952 if (ret) {
953 memory_notify(MEM_CANCEL_ONLINE, &arg);
925268a0 954 unlock_memory_hotplug();
7b78d335
YG
955 return ret;
956 }
6811378e
YG
957 /*
958 * If this zone is not populated, then it is not in zonelist.
959 * This means the page allocator ignores this zone.
960 * So, zonelist must be updated after online.
961 */
4eaf3f64 962 mutex_lock(&zonelists_mutex);
6dcd73d7 963 if (!populated_zone(zone)) {
6811378e 964 need_zonelists_rebuild = 1;
6dcd73d7
WC
965 build_all_zonelists(NULL, zone);
966 }
6811378e 967
908eedc6 968 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
75884fb1 969 online_pages_range);
fd8a4221 970 if (ret) {
6dcd73d7
WC
971 if (need_zonelists_rebuild)
972 zone_pcp_reset(zone);
4eaf3f64 973 mutex_unlock(&zonelists_mutex);
a62e2f4f
BH
974 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
975 (unsigned long long) pfn << PAGE_SHIFT,
976 (((unsigned long long) pfn + nr_pages)
977 << PAGE_SHIFT) - 1);
fd8a4221 978 memory_notify(MEM_CANCEL_ONLINE, &arg);
925268a0 979 unlock_memory_hotplug();
fd8a4221
GL
980 return ret;
981 }
982
9feedc9d 983 zone->managed_pages += onlined_pages;
3947be19 984 zone->present_pages += onlined_pages;
f2937be5 985 zone->zone_pgdat->node_present_pages += onlined_pages;
08dff7b7 986 if (onlined_pages) {
d9713679 987 node_states_set_node(zone_to_nid(zone), &arg);
08dff7b7 988 if (need_zonelists_rebuild)
6dcd73d7 989 build_all_zonelists(NULL, NULL);
08dff7b7
JL
990 else
991 zone_pcp_update(zone);
992 }
3947be19 993
4eaf3f64 994 mutex_unlock(&zonelists_mutex);
1b79acc9
KM
995
996 init_per_zone_wmark_min();
997
08dff7b7 998 if (onlined_pages)
7ea1530a 999 kswapd_run(zone_to_nid(zone));
61b13993 1000
1f522509 1001 vm_total_pages = nr_free_pagecache_pages();
2f7f24ec 1002
2d1d43f6 1003 writeback_set_ratelimit();
7b78d335
YG
1004
1005 if (onlined_pages)
1006 memory_notify(MEM_ONLINE, &arg);
925268a0 1007 unlock_memory_hotplug();
7b78d335 1008
3947be19
DH
1009 return 0;
1010}
53947027 1011#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
bc02af93 1012
e1319331
HS
1013/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1014static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
9af3c2de
YG
1015{
1016 struct pglist_data *pgdat;
1017 unsigned long zones_size[MAX_NR_ZONES] = {0};
1018 unsigned long zholes_size[MAX_NR_ZONES] = {0};
1019 unsigned long start_pfn = start >> PAGE_SHIFT;
1020
a1e565aa
TC
1021 pgdat = NODE_DATA(nid);
1022 if (!pgdat) {
1023 pgdat = arch_alloc_nodedata(nid);
1024 if (!pgdat)
1025 return NULL;
9af3c2de 1026
a1e565aa
TC
1027 arch_refresh_nodedata(nid, pgdat);
1028 }
9af3c2de
YG
1029
1030 /* we can use NODE_DATA(nid) from here */
1031
1032 /* init node's zones as empty zones, we don't have any present pages.*/
9109fb7b 1033 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
9af3c2de 1034
959ecc48
KH
1035 /*
1036 * The node we allocated has no zone fallback lists. For avoiding
1037 * to access not-initialized zonelist, build here.
1038 */
f957db4f 1039 mutex_lock(&zonelists_mutex);
9adb62a5 1040 build_all_zonelists(pgdat, NULL);
f957db4f 1041 mutex_unlock(&zonelists_mutex);
959ecc48 1042
9af3c2de
YG
1043 return pgdat;
1044}
1045
1046static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1047{
1048 arch_refresh_nodedata(nid, NULL);
1049 arch_free_nodedata(pgdat);
1050 return;
1051}
1052
0a547039 1053
cf23422b 1054/*
1055 * called by cpu_up() to online a node without onlined memory.
1056 */
1057int mem_online_node(int nid)
1058{
1059 pg_data_t *pgdat;
1060 int ret;
1061
20d6c96b 1062 lock_memory_hotplug();
cf23422b 1063 pgdat = hotadd_new_pgdat(nid, 0);
7553e8f2 1064 if (!pgdat) {
cf23422b 1065 ret = -ENOMEM;
1066 goto out;
1067 }
1068 node_set_online(nid);
1069 ret = register_one_node(nid);
1070 BUG_ON(ret);
1071
1072out:
20d6c96b 1073 unlock_memory_hotplug();
cf23422b 1074 return ret;
1075}
1076
31168481
AV
1077/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1078int __ref add_memory(int nid, u64 start, u64 size)
bc02af93 1079{
9af3c2de 1080 pg_data_t *pgdat = NULL;
a1e565aa
TC
1081 bool new_pgdat;
1082 bool new_node;
ebd15302 1083 struct resource *res;
bc02af93
YG
1084 int ret;
1085
20d6c96b 1086 lock_memory_hotplug();
6ad696d2 1087
ebd15302 1088 res = register_memory_resource(start, size);
6ad696d2 1089 ret = -EEXIST;
ebd15302 1090 if (!res)
6ad696d2 1091 goto out;
ebd15302 1092
a1e565aa
TC
1093 { /* Stupid hack to suppress address-never-null warning */
1094 void *p = NODE_DATA(nid);
1095 new_pgdat = !p;
1096 }
1097 new_node = !node_online(nid);
1098 if (new_node) {
9af3c2de 1099 pgdat = hotadd_new_pgdat(nid, start);
6ad696d2 1100 ret = -ENOMEM;
9af3c2de 1101 if (!pgdat)
41b9e2d7 1102 goto error;
9af3c2de
YG
1103 }
1104
bc02af93
YG
1105 /* call arch's memory hotadd */
1106 ret = arch_add_memory(nid, start, size);
1107
9af3c2de
YG
1108 if (ret < 0)
1109 goto error;
1110
0fc44159 1111 /* we online node here. we can't roll back from here. */
9af3c2de
YG
1112 node_set_online(nid);
1113
a1e565aa 1114 if (new_node) {
0fc44159
YG
1115 ret = register_one_node(nid);
1116 /*
1117 * If sysfs file of new node can't create, cpu on the node
1118 * can't be hot-added. There is no rollback way now.
1119 * So, check by BUG_ON() to catch it reluctantly..
1120 */
1121 BUG_ON(ret);
1122 }
1123
d96ae530 1124 /* create new memmap entry */
1125 firmware_map_add_hotplug(start, start + size, "System RAM");
1126
6ad696d2
AK
1127 goto out;
1128
9af3c2de
YG
1129error:
1130 /* rollback pgdat allocation and others */
1131 if (new_pgdat)
1132 rollback_node_hotadd(nid, pgdat);
a864b9d0 1133 release_memory_resource(res);
9af3c2de 1134
6ad696d2 1135out:
20d6c96b 1136 unlock_memory_hotplug();
bc02af93
YG
1137 return ret;
1138}
1139EXPORT_SYMBOL_GPL(add_memory);
0c0e6195
KH
1140
1141#ifdef CONFIG_MEMORY_HOTREMOVE
5c755e9f
BP
1142/*
1143 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1144 * set and the size of the free page is given by page_order(). Using this,
1145 * the function determines if the pageblock contains only free pages.
1146 * Due to buddy contraints, a free page at least the size of a pageblock will
1147 * be located at the start of the pageblock
1148 */
1149static inline int pageblock_free(struct page *page)
1150{
1151 return PageBuddy(page) && page_order(page) >= pageblock_order;
1152}
1153
1154/* Return the start of the next active pageblock after a given page */
1155static struct page *next_active_pageblock(struct page *page)
1156{
5c755e9f
BP
1157 /* Ensure the starting page is pageblock-aligned */
1158 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1159
5c755e9f 1160 /* If the entire pageblock is free, move to the end of free page */
0dcc48c1
KH
1161 if (pageblock_free(page)) {
1162 int order;
1163 /* be careful. we don't have locks, page_order can be changed.*/
1164 order = page_order(page);
1165 if ((order < MAX_ORDER) && (order >= pageblock_order))
1166 return page + (1 << order);
1167 }
5c755e9f 1168
0dcc48c1 1169 return page + pageblock_nr_pages;
5c755e9f
BP
1170}
1171
1172/* Checks if this range of memory is likely to be hot-removable. */
1173int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1174{
5c755e9f
BP
1175 struct page *page = pfn_to_page(start_pfn);
1176 struct page *end_page = page + nr_pages;
1177
1178 /* Check the starting page of each pageblock within the range */
1179 for (; page < end_page; page = next_active_pageblock(page)) {
49ac8255 1180 if (!is_pageblock_removable_nolock(page))
5c755e9f 1181 return 0;
49ac8255 1182 cond_resched();
5c755e9f
BP
1183 }
1184
1185 /* All pageblocks in the memory block are likely to be hot-removable */
1186 return 1;
1187}
1188
0c0e6195
KH
1189/*
1190 * Confirm all pages in a range [start, end) is belongs to the same zone.
1191 */
1192static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1193{
1194 unsigned long pfn;
1195 struct zone *zone = NULL;
1196 struct page *page;
1197 int i;
1198 for (pfn = start_pfn;
1199 pfn < end_pfn;
1200 pfn += MAX_ORDER_NR_PAGES) {
1201 i = 0;
1202 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1203 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
1204 i++;
1205 if (i == MAX_ORDER_NR_PAGES)
1206 continue;
1207 page = pfn_to_page(pfn + i);
1208 if (zone && page_zone(page) != zone)
1209 return 0;
1210 zone = page_zone(page);
1211 }
1212 return 1;
1213}
1214
1215/*
1216 * Scanning pfn is much easier than scanning lru list.
1217 * Scan pfn from start to end and Find LRU page.
1218 */
7bbc0905 1219static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
0c0e6195
KH
1220{
1221 unsigned long pfn;
1222 struct page *page;
1223 for (pfn = start; pfn < end; pfn++) {
1224 if (pfn_valid(pfn)) {
1225 page = pfn_to_page(pfn);
1226 if (PageLRU(page))
1227 return pfn;
1228 }
1229 }
1230 return 0;
1231}
1232
0c0e6195
KH
1233#define NR_OFFLINE_AT_ONCE_PAGES (256)
1234static int
1235do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1236{
1237 unsigned long pfn;
1238 struct page *page;
1239 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1240 int not_managed = 0;
1241 int ret = 0;
1242 LIST_HEAD(source);
1243
1244 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1245 if (!pfn_valid(pfn))
1246 continue;
1247 page = pfn_to_page(pfn);
700c2a46 1248 if (!get_page_unless_zero(page))
0c0e6195
KH
1249 continue;
1250 /*
1251 * We can skip free pages. And we can only deal with pages on
1252 * LRU.
1253 */
62695a84 1254 ret = isolate_lru_page(page);
0c0e6195 1255 if (!ret) { /* Success */
700c2a46 1256 put_page(page);
62695a84 1257 list_add_tail(&page->lru, &source);
0c0e6195 1258 move_pages--;
6d9c285a
KM
1259 inc_zone_page_state(page, NR_ISOLATED_ANON +
1260 page_is_file_cache(page));
1261
0c0e6195 1262 } else {
0c0e6195 1263#ifdef CONFIG_DEBUG_VM
718a3821
WF
1264 printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
1265 pfn);
1266 dump_page(page);
0c0e6195 1267#endif
700c2a46 1268 put_page(page);
25985edc 1269 /* Because we don't have big zone->lock. we should
809c4449
BL
1270 check this again here. */
1271 if (page_count(page)) {
1272 not_managed++;
f3ab2636 1273 ret = -EBUSY;
809c4449
BL
1274 break;
1275 }
0c0e6195
KH
1276 }
1277 }
f3ab2636
BL
1278 if (!list_empty(&source)) {
1279 if (not_managed) {
1280 putback_lru_pages(&source);
1281 goto out;
1282 }
74c08f98
MK
1283
1284 /*
1285 * alloc_migrate_target should be improooooved!!
1286 * migrate_pages returns # of failed pages.
1287 */
1288 ret = migrate_pages(&source, alloc_migrate_target, 0,
7b2a2d4a
MG
1289 true, MIGRATE_SYNC,
1290 MR_MEMORY_HOTPLUG);
f3ab2636 1291 if (ret)
0c0e6195 1292 putback_lru_pages(&source);
0c0e6195 1293 }
0c0e6195
KH
1294out:
1295 return ret;
1296}
1297
1298/*
1299 * remove from free_area[] and mark all as Reserved.
1300 */
1301static int
1302offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1303 void *data)
1304{
1305 __offline_isolated_pages(start, start + nr_pages);
1306 return 0;
1307}
1308
1309static void
1310offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1311{
908eedc6 1312 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
0c0e6195
KH
1313 offline_isolated_pages_cb);
1314}
1315
1316/*
1317 * Check all pages in range, recoreded as memory resource, are isolated.
1318 */
1319static int
1320check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1321 void *data)
1322{
1323 int ret;
1324 long offlined = *(long *)data;
b023f468 1325 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
0c0e6195
KH
1326 offlined = nr_pages;
1327 if (!ret)
1328 *(long *)data += offlined;
1329 return ret;
1330}
1331
1332static long
1333check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1334{
1335 long offlined = 0;
1336 int ret;
1337
908eedc6 1338 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
0c0e6195
KH
1339 check_pages_isolated_cb);
1340 if (ret < 0)
1341 offlined = (long)ret;
1342 return offlined;
1343}
1344
09285af7 1345#ifdef CONFIG_MOVABLE_NODE
79a4dcef
TC
1346/*
1347 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1348 * normal memory.
1349 */
09285af7
LJ
1350static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1351{
1352 return true;
1353}
79a4dcef 1354#else /* CONFIG_MOVABLE_NODE */
74d42d8f
LJ
1355/* ensure the node has NORMAL memory if it is still online */
1356static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1357{
1358 struct pglist_data *pgdat = zone->zone_pgdat;
1359 unsigned long present_pages = 0;
1360 enum zone_type zt;
1361
1362 for (zt = 0; zt <= ZONE_NORMAL; zt++)
1363 present_pages += pgdat->node_zones[zt].present_pages;
1364
1365 if (present_pages > nr_pages)
1366 return true;
1367
1368 present_pages = 0;
1369 for (; zt <= ZONE_MOVABLE; zt++)
1370 present_pages += pgdat->node_zones[zt].present_pages;
1371
1372 /*
1373 * we can't offline the last normal memory until all
1374 * higher memory is offlined.
1375 */
1376 return present_pages == 0;
1377}
79a4dcef 1378#endif /* CONFIG_MOVABLE_NODE */
74d42d8f 1379
d9713679
LJ
1380/* check which state of node_states will be changed when offline memory */
1381static void node_states_check_changes_offline(unsigned long nr_pages,
1382 struct zone *zone, struct memory_notify *arg)
1383{
1384 struct pglist_data *pgdat = zone->zone_pgdat;
1385 unsigned long present_pages = 0;
1386 enum zone_type zt, zone_last = ZONE_NORMAL;
1387
1388 /*
6715ddf9
LJ
1389 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1390 * contains nodes which have zones of 0...ZONE_NORMAL,
1391 * set zone_last to ZONE_NORMAL.
d9713679 1392 *
6715ddf9
LJ
1393 * If we don't have HIGHMEM nor movable node,
1394 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1395 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
d9713679 1396 */
6715ddf9 1397 if (N_MEMORY == N_NORMAL_MEMORY)
d9713679
LJ
1398 zone_last = ZONE_MOVABLE;
1399
1400 /*
1401 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1402 * If the memory to be offline is in a zone of 0...zone_last,
1403 * and it is the last present memory, 0...zone_last will
1404 * become empty after offline , thus we can determind we will
1405 * need to clear the node from node_states[N_NORMAL_MEMORY].
1406 */
1407 for (zt = 0; zt <= zone_last; zt++)
1408 present_pages += pgdat->node_zones[zt].present_pages;
1409 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1410 arg->status_change_nid_normal = zone_to_nid(zone);
1411 else
1412 arg->status_change_nid_normal = -1;
1413
6715ddf9
LJ
1414#ifdef CONFIG_HIGHMEM
1415 /*
1416 * If we have movable node, node_states[N_HIGH_MEMORY]
1417 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1418 * set zone_last to ZONE_HIGHMEM.
1419 *
1420 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1421 * contains nodes which have zones of 0...ZONE_MOVABLE,
1422 * set zone_last to ZONE_MOVABLE.
1423 */
1424 zone_last = ZONE_HIGHMEM;
1425 if (N_MEMORY == N_HIGH_MEMORY)
1426 zone_last = ZONE_MOVABLE;
1427
1428 for (; zt <= zone_last; zt++)
1429 present_pages += pgdat->node_zones[zt].present_pages;
1430 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1431 arg->status_change_nid_high = zone_to_nid(zone);
1432 else
1433 arg->status_change_nid_high = -1;
1434#else
1435 arg->status_change_nid_high = arg->status_change_nid_normal;
1436#endif
1437
d9713679
LJ
1438 /*
1439 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1440 */
1441 zone_last = ZONE_MOVABLE;
1442
1443 /*
1444 * check whether node_states[N_HIGH_MEMORY] will be changed
1445 * If we try to offline the last present @nr_pages from the node,
1446 * we can determind we will need to clear the node from
1447 * node_states[N_HIGH_MEMORY].
1448 */
1449 for (; zt <= zone_last; zt++)
1450 present_pages += pgdat->node_zones[zt].present_pages;
1451 if (nr_pages >= present_pages)
1452 arg->status_change_nid = zone_to_nid(zone);
1453 else
1454 arg->status_change_nid = -1;
1455}
1456
1457static void node_states_clear_node(int node, struct memory_notify *arg)
1458{
1459 if (arg->status_change_nid_normal >= 0)
1460 node_clear_state(node, N_NORMAL_MEMORY);
1461
6715ddf9
LJ
1462 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1463 (arg->status_change_nid_high >= 0))
d9713679 1464 node_clear_state(node, N_HIGH_MEMORY);
6715ddf9
LJ
1465
1466 if ((N_MEMORY != N_HIGH_MEMORY) &&
1467 (arg->status_change_nid >= 0))
1468 node_clear_state(node, N_MEMORY);
d9713679
LJ
1469}
1470
a16cee10 1471static int __ref __offline_pages(unsigned long start_pfn,
0c0e6195
KH
1472 unsigned long end_pfn, unsigned long timeout)
1473{
1474 unsigned long pfn, nr_pages, expire;
1475 long offlined_pages;
7b78d335 1476 int ret, drain, retry_max, node;
0c0e6195 1477 struct zone *zone;
7b78d335 1478 struct memory_notify arg;
0c0e6195
KH
1479
1480 BUG_ON(start_pfn >= end_pfn);
1481 /* at least, alignment against pageblock is necessary */
1482 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1483 return -EINVAL;
1484 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1485 return -EINVAL;
1486 /* This makes hotplug much easier...and readable.
1487 we assume this for now. .*/
1488 if (!test_pages_in_a_zone(start_pfn, end_pfn))
1489 return -EINVAL;
7b78d335 1490
20d6c96b 1491 lock_memory_hotplug();
6ad696d2 1492
7b78d335
YG
1493 zone = page_zone(pfn_to_page(start_pfn));
1494 node = zone_to_nid(zone);
1495 nr_pages = end_pfn - start_pfn;
1496
74d42d8f
LJ
1497 ret = -EINVAL;
1498 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
1499 goto out;
1500
0c0e6195 1501 /* set above range as isolated */
b023f468
WC
1502 ret = start_isolate_page_range(start_pfn, end_pfn,
1503 MIGRATE_MOVABLE, true);
0c0e6195 1504 if (ret)
6ad696d2 1505 goto out;
7b78d335
YG
1506
1507 arg.start_pfn = start_pfn;
1508 arg.nr_pages = nr_pages;
d9713679 1509 node_states_check_changes_offline(nr_pages, zone, &arg);
7b78d335
YG
1510
1511 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1512 ret = notifier_to_errno(ret);
1513 if (ret)
1514 goto failed_removal;
1515
0c0e6195
KH
1516 pfn = start_pfn;
1517 expire = jiffies + timeout;
1518 drain = 0;
1519 retry_max = 5;
1520repeat:
1521 /* start memory hot removal */
1522 ret = -EAGAIN;
1523 if (time_after(jiffies, expire))
1524 goto failed_removal;
1525 ret = -EINTR;
1526 if (signal_pending(current))
1527 goto failed_removal;
1528 ret = 0;
1529 if (drain) {
1530 lru_add_drain_all();
0c0e6195 1531 cond_resched();
9f8f2172 1532 drain_all_pages();
0c0e6195
KH
1533 }
1534
1535 pfn = scan_lru_pages(start_pfn, end_pfn);
1536 if (pfn) { /* We have page on LRU */
1537 ret = do_migrate_range(pfn, end_pfn);
1538 if (!ret) {
1539 drain = 1;
1540 goto repeat;
1541 } else {
1542 if (ret < 0)
1543 if (--retry_max == 0)
1544 goto failed_removal;
1545 yield();
1546 drain = 1;
1547 goto repeat;
1548 }
1549 }
b3834be5 1550 /* drain all zone's lru pagevec, this is asynchronous... */
0c0e6195 1551 lru_add_drain_all();
0c0e6195 1552 yield();
b3834be5 1553 /* drain pcp pages, this is synchronous. */
9f8f2172 1554 drain_all_pages();
0c0e6195
KH
1555 /* check again */
1556 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1557 if (offlined_pages < 0) {
1558 ret = -EBUSY;
1559 goto failed_removal;
1560 }
1561 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
b3834be5 1562 /* Ok, all of our target is isolated.
0c0e6195
KH
1563 We cannot do rollback at this point. */
1564 offline_isolated_pages(start_pfn, end_pfn);
dbc0e4ce 1565 /* reset pagetype flags and makes migrate type to be MOVABLE */
0815f3d8 1566 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
0c0e6195 1567 /* removal success */
9feedc9d 1568 zone->managed_pages -= offlined_pages;
0c0e6195
KH
1569 zone->present_pages -= offlined_pages;
1570 zone->zone_pgdat->node_present_pages -= offlined_pages;
1571 totalram_pages -= offlined_pages;
7b78d335 1572
1b79acc9
KM
1573 init_per_zone_wmark_min();
1574
1e8537ba 1575 if (!populated_zone(zone)) {
340175b7 1576 zone_pcp_reset(zone);
1e8537ba
XQ
1577 mutex_lock(&zonelists_mutex);
1578 build_all_zonelists(NULL, NULL);
1579 mutex_unlock(&zonelists_mutex);
1580 } else
1581 zone_pcp_update(zone);
340175b7 1582
d9713679
LJ
1583 node_states_clear_node(node, &arg);
1584 if (arg.status_change_nid >= 0)
8fe23e05 1585 kswapd_stop(node);
bce7394a 1586
0c0e6195
KH
1587 vm_total_pages = nr_free_pagecache_pages();
1588 writeback_set_ratelimit();
7b78d335
YG
1589
1590 memory_notify(MEM_OFFLINE, &arg);
20d6c96b 1591 unlock_memory_hotplug();
0c0e6195
KH
1592 return 0;
1593
1594failed_removal:
a62e2f4f
BH
1595 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
1596 (unsigned long long) start_pfn << PAGE_SHIFT,
1597 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
7b78d335 1598 memory_notify(MEM_CANCEL_OFFLINE, &arg);
0c0e6195 1599 /* pushback to free area */
0815f3d8 1600 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
7b78d335 1601
6ad696d2 1602out:
20d6c96b 1603 unlock_memory_hotplug();
0c0e6195
KH
1604 return ret;
1605}
71088785 1606
a16cee10
WC
1607int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1608{
1609 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1610}
1611
bbc76be6
WC
1612/**
1613 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1614 * @start_pfn: start pfn of the memory range
1615 * @end_pfn: end pft of the memory range
1616 * @arg: argument passed to func
1617 * @func: callback for each memory section walked
1618 *
1619 * This function walks through all present mem sections in range
1620 * [start_pfn, end_pfn) and call func on each mem section.
1621 *
1622 * Returns the return value of func.
1623 */
1624static int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1625 void *arg, int (*func)(struct memory_block *, void *))
71088785 1626{
e90bdb7f
WC
1627 struct memory_block *mem = NULL;
1628 struct mem_section *section;
e90bdb7f
WC
1629 unsigned long pfn, section_nr;
1630 int ret;
e90bdb7f
WC
1631
1632 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1633 section_nr = pfn_to_section_nr(pfn);
1634 if (!present_section_nr(section_nr))
1635 continue;
1636
1637 section = __nr_to_section(section_nr);
1638 /* same memblock? */
1639 if (mem)
1640 if ((section_nr >= mem->start_section_nr) &&
1641 (section_nr <= mem->end_section_nr))
1642 continue;
1643
1644 mem = find_memory_block_hinted(section, mem);
1645 if (!mem)
1646 continue;
1647
bbc76be6 1648 ret = func(mem, arg);
e90bdb7f 1649 if (ret) {
bbc76be6
WC
1650 kobject_put(&mem->dev.kobj);
1651 return ret;
e90bdb7f
WC
1652 }
1653 }
1654
1655 if (mem)
1656 kobject_put(&mem->dev.kobj);
1657
bbc76be6
WC
1658 return 0;
1659}
1660
1661/**
1662 * offline_memory_block_cb - callback function for offlining memory block
1663 * @mem: the memory block to be offlined
1664 * @arg: buffer to hold error msg
1665 *
1666 * Always return 0, and put the error msg in arg if any.
1667 */
1668static int offline_memory_block_cb(struct memory_block *mem, void *arg)
1669{
1670 int *ret = arg;
1671 int error = offline_memory_block(mem);
1672
1673 if (error != 0 && *ret == 0)
1674 *ret = error;
1675
1676 return 0;
1677}
1678
1679static int is_memblock_offlined_cb(struct memory_block *mem, void *arg)
1680{
1681 int ret = !is_memblock_offlined(mem);
1682
1683 if (unlikely(ret))
1684 pr_warn("removing memory fails, because memory "
1685 "[%#010llx-%#010llx] is onlined\n",
1686 PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)),
1687 PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1);
1688
1689 return ret;
1690}
1691
60a5a19e
TC
1692static int check_cpu_on_node(void *data)
1693{
1694 struct pglist_data *pgdat = data;
1695 int cpu;
1696
1697 for_each_present_cpu(cpu) {
1698 if (cpu_to_node(cpu) == pgdat->node_id)
1699 /*
1700 * the cpu on this node isn't removed, and we can't
1701 * offline this node.
1702 */
1703 return -EBUSY;
1704 }
1705
1706 return 0;
1707}
1708
e13fe869
WC
1709static void unmap_cpu_on_node(void *data)
1710{
1711#ifdef CONFIG_ACPI_NUMA
1712 struct pglist_data *pgdat = data;
1713 int cpu;
1714
1715 for_each_possible_cpu(cpu)
1716 if (cpu_to_node(cpu) == pgdat->node_id)
1717 numa_clear_node(cpu);
1718#endif
1719}
1720
1721static int check_and_unmap_cpu_on_node(void *data)
1722{
1723 int ret = check_cpu_on_node(data);
1724
1725 if (ret)
1726 return ret;
1727
1728 /*
1729 * the node will be offlined when we come here, so we can clear
1730 * the cpu_to_node() now.
1731 */
1732
1733 unmap_cpu_on_node(data);
1734 return 0;
1735}
1736
60a5a19e 1737/* offline the node if all memory sections of this node are removed */
90b30cdc 1738void try_offline_node(int nid)
60a5a19e 1739{
d822b86a
WC
1740 pg_data_t *pgdat = NODE_DATA(nid);
1741 unsigned long start_pfn = pgdat->node_start_pfn;
1742 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
60a5a19e 1743 unsigned long pfn;
d822b86a
WC
1744 struct page *pgdat_page = virt_to_page(pgdat);
1745 int i;
60a5a19e
TC
1746
1747 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1748 unsigned long section_nr = pfn_to_section_nr(pfn);
1749
1750 if (!present_section_nr(section_nr))
1751 continue;
1752
1753 if (pfn_to_nid(pfn) != nid)
1754 continue;
1755
1756 /*
1757 * some memory sections of this node are not removed, and we
1758 * can't offline node now.
1759 */
1760 return;
1761 }
1762
e13fe869 1763 if (stop_machine(check_and_unmap_cpu_on_node, pgdat, NULL))
60a5a19e
TC
1764 return;
1765
1766 /*
1767 * all memory/cpu of this node are removed, we can offline this
1768 * node now.
1769 */
1770 node_set_offline(nid);
1771 unregister_one_node(nid);
d822b86a
WC
1772
1773 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
1774 /* node data is allocated from boot memory */
1775 return;
1776
1777 /* free waittable in each zone */
1778 for (i = 0; i < MAX_NR_ZONES; i++) {
1779 struct zone *zone = pgdat->node_zones + i;
1780
1781 if (zone->wait_table)
1782 vfree(zone->wait_table);
1783 }
1784
1785 /*
1786 * Since there is no way to guarentee the address of pgdat/zone is not
1787 * on stack of any kernel threads or used by other kernel objects
1788 * without reference counting or other symchronizing method, do not
1789 * reset node_data and free pgdat here. Just reset it to 0 and reuse
1790 * the memory when the node is online again.
1791 */
1792 memset(pgdat, 0, sizeof(*pgdat));
60a5a19e 1793}
90b30cdc 1794EXPORT_SYMBOL(try_offline_node);
60a5a19e
TC
1795
1796int __ref remove_memory(int nid, u64 start, u64 size)
bbc76be6
WC
1797{
1798 unsigned long start_pfn, end_pfn;
1799 int ret = 0;
1800 int retry = 1;
1801
1802 start_pfn = PFN_DOWN(start);
1803 end_pfn = start_pfn + PFN_DOWN(size);
1804
1805 /*
1806 * When CONFIG_MEMCG is on, one memory block may be used by other
1807 * blocks to store page cgroup when onlining pages. But we don't know
1808 * in what order pages are onlined. So we iterate twice to offline
1809 * memory:
1810 * 1st iterate: offline every non primary memory block.
1811 * 2nd iterate: offline primary (i.e. first added) memory block.
1812 */
1813repeat:
1814 walk_memory_range(start_pfn, end_pfn, &ret,
1815 offline_memory_block_cb);
1816 if (ret) {
1817 if (!retry)
1818 return ret;
1819
1820 retry = 0;
1821 ret = 0;
993c1aad
WC
1822 goto repeat;
1823 }
1824
6677e3ea
YI
1825 lock_memory_hotplug();
1826
1827 /*
1828 * we have offlined all memory blocks like this:
1829 * 1. lock memory hotplug
1830 * 2. offline a memory block
1831 * 3. unlock memory hotplug
1832 *
1833 * repeat step1-3 to offline the memory block. All memory blocks
1834 * must be offlined before removing memory. But we don't hold the
1835 * lock in the whole operation. So we should check whether all
1836 * memory blocks are offlined.
1837 */
1838
bbc76be6
WC
1839 ret = walk_memory_range(start_pfn, end_pfn, NULL,
1840 is_memblock_offlined_cb);
1841 if (ret) {
1842 unlock_memory_hotplug();
1843 return ret;
6677e3ea
YI
1844 }
1845
46c66c4b
YI
1846 /* remove memmap entry */
1847 firmware_map_remove(start, start + size, "System RAM");
1848
24d335ca
WC
1849 arch_remove_memory(start, size);
1850
60a5a19e
TC
1851 try_offline_node(nid);
1852
6677e3ea
YI
1853 unlock_memory_hotplug();
1854
e90bdb7f 1855 return 0;
71088785 1856}
48e94196 1857#else
a16cee10
WC
1858int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1859{
1860 return -EINVAL;
1861}
60a5a19e 1862int remove_memory(int nid, u64 start, u64 size)
48e94196
KH
1863{
1864 return -EINVAL;
1865}
0c0e6195 1866#endif /* CONFIG_MEMORY_HOTREMOVE */
71088785 1867EXPORT_SYMBOL_GPL(remove_memory);