2 * linux/mm/memory_hotplug.c
7 #include <linux/stddef.h>
9 #include <linux/swap.h>
10 #include <linux/interrupt.h>
11 #include <linux/pagemap.h>
12 #include <linux/compiler.h>
13 #include <linux/export.h>
14 #include <linux/pagevec.h>
15 #include <linux/writeback.h>
16 #include <linux/slab.h>
17 #include <linux/sysctl.h>
18 #include <linux/cpu.h>
19 #include <linux/memory.h>
20 #include <linux/memory_hotplug.h>
21 #include <linux/highmem.h>
22 #include <linux/vmalloc.h>
23 #include <linux/ioport.h>
24 #include <linux/delay.h>
25 #include <linux/migrate.h>
26 #include <linux/page-isolation.h>
27 #include <linux/pfn.h>
28 #include <linux/suspend.h>
29 #include <linux/mm_inline.h>
30 #include <linux/firmware-map.h>
31 #include <linux/stop_machine.h>
32 #include <linux/hugetlb.h>
33 #include <linux/memblock.h>
34 #include <linux/bootmem.h>
36 #include <asm/tlbflush.h>
41 * online_page_callback contains pointer to current page onlining function.
42 * Initially it is generic_online_page(). If it is required it could be
43 * changed by calling set_online_page_callback() for callback registration
44 * and restore_online_page_callback() for generic callback restore.
47 static void generic_online_page(struct page *page);
49 static online_page_callback_t online_page_callback = generic_online_page;
50 static DEFINE_MUTEX(online_page_callback_lock);
52 /* The same as the cpu_hotplug lock, but for memory hotplug. */
54 struct task_struct *active_writer;
55 struct mutex lock; /* Synchronizes accesses to refcount, */
57 * Also blocks the new readers during
58 * an ongoing mem hotplug operation.
62 #ifdef CONFIG_DEBUG_LOCK_ALLOC
63 struct lockdep_map dep_map;
66 .active_writer = NULL,
67 .lock = __MUTEX_INITIALIZER(mem_hotplug.lock),
69 #ifdef CONFIG_DEBUG_LOCK_ALLOC
70 .dep_map = {.name = "mem_hotplug.lock" },
74 /* Lockdep annotations for get/put_online_mems() and mem_hotplug_begin/end() */
75 #define memhp_lock_acquire_read() lock_map_acquire_read(&mem_hotplug.dep_map)
76 #define memhp_lock_acquire() lock_map_acquire(&mem_hotplug.dep_map)
77 #define memhp_lock_release() lock_map_release(&mem_hotplug.dep_map)
79 void get_online_mems(void)
82 if (mem_hotplug.active_writer == current)
84 memhp_lock_acquire_read();
85 mutex_lock(&mem_hotplug.lock);
86 mem_hotplug.refcount++;
87 mutex_unlock(&mem_hotplug.lock);
91 void put_online_mems(void)
93 if (mem_hotplug.active_writer == current)
95 mutex_lock(&mem_hotplug.lock);
97 if (WARN_ON(!mem_hotplug.refcount))
98 mem_hotplug.refcount++; /* try to fix things up */
100 if (!--mem_hotplug.refcount && unlikely(mem_hotplug.active_writer))
101 wake_up_process(mem_hotplug.active_writer);
102 mutex_unlock(&mem_hotplug.lock);
103 memhp_lock_release();
107 static void mem_hotplug_begin(void)
109 mem_hotplug.active_writer = current;
111 memhp_lock_acquire();
113 mutex_lock(&mem_hotplug.lock);
114 if (likely(!mem_hotplug.refcount))
116 __set_current_state(TASK_UNINTERRUPTIBLE);
117 mutex_unlock(&mem_hotplug.lock);
122 static void mem_hotplug_done(void)
124 mem_hotplug.active_writer = NULL;
125 mutex_unlock(&mem_hotplug.lock);
126 memhp_lock_release();
129 /* add this memory to iomem resource */
130 static struct resource *register_memory_resource(u64 start, u64 size)
132 struct resource *res;
133 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
136 res->name = "System RAM";
138 res->end = start + size - 1;
139 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
140 if (request_resource(&iomem_resource, res) < 0) {
141 pr_debug("System RAM resource %pR cannot be added\n", res);
148 static void release_memory_resource(struct resource *res)
152 release_resource(res);
157 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
158 void get_page_bootmem(unsigned long info, struct page *page,
161 page->lru.next = (struct list_head *) type;
162 SetPagePrivate(page);
163 set_page_private(page, info);
164 atomic_inc(&page->_count);
167 void put_page_bootmem(struct page *page)
171 type = (unsigned long) page->lru.next;
172 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
173 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
175 if (atomic_dec_return(&page->_count) == 1) {
176 ClearPagePrivate(page);
177 set_page_private(page, 0);
178 INIT_LIST_HEAD(&page->lru);
179 free_reserved_page(page);
183 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
184 #ifndef CONFIG_SPARSEMEM_VMEMMAP
185 static void register_page_bootmem_info_section(unsigned long start_pfn)
187 unsigned long *usemap, mapsize, section_nr, i;
188 struct mem_section *ms;
189 struct page *page, *memmap;
191 section_nr = pfn_to_section_nr(start_pfn);
192 ms = __nr_to_section(section_nr);
194 /* Get section's memmap address */
195 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
198 * Get page for the memmap's phys address
199 * XXX: need more consideration for sparse_vmemmap...
201 page = virt_to_page(memmap);
202 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
203 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
205 /* remember memmap's page */
206 for (i = 0; i < mapsize; i++, page++)
207 get_page_bootmem(section_nr, page, SECTION_INFO);
209 usemap = __nr_to_section(section_nr)->pageblock_flags;
210 page = virt_to_page(usemap);
212 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
214 for (i = 0; i < mapsize; i++, page++)
215 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
218 #else /* CONFIG_SPARSEMEM_VMEMMAP */
219 static void register_page_bootmem_info_section(unsigned long start_pfn)
221 unsigned long *usemap, mapsize, section_nr, i;
222 struct mem_section *ms;
223 struct page *page, *memmap;
225 if (!pfn_valid(start_pfn))
228 section_nr = pfn_to_section_nr(start_pfn);
229 ms = __nr_to_section(section_nr);
231 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
233 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
235 usemap = __nr_to_section(section_nr)->pageblock_flags;
236 page = virt_to_page(usemap);
238 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
240 for (i = 0; i < mapsize; i++, page++)
241 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
243 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
245 void register_page_bootmem_info_node(struct pglist_data *pgdat)
247 unsigned long i, pfn, end_pfn, nr_pages;
248 int node = pgdat->node_id;
252 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
253 page = virt_to_page(pgdat);
255 for (i = 0; i < nr_pages; i++, page++)
256 get_page_bootmem(node, page, NODE_INFO);
258 zone = &pgdat->node_zones[0];
259 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
260 if (zone_is_initialized(zone)) {
261 nr_pages = zone->wait_table_hash_nr_entries
262 * sizeof(wait_queue_head_t);
263 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
264 page = virt_to_page(zone->wait_table);
266 for (i = 0; i < nr_pages; i++, page++)
267 get_page_bootmem(node, page, NODE_INFO);
271 pfn = pgdat->node_start_pfn;
272 end_pfn = pgdat_end_pfn(pgdat);
274 /* register section info */
275 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
277 * Some platforms can assign the same pfn to multiple nodes - on
278 * node0 as well as nodeN. To avoid registering a pfn against
279 * multiple nodes we check that this pfn does not already
280 * reside in some other nodes.
282 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
283 register_page_bootmem_info_section(pfn);
286 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
288 static void __meminit grow_zone_span(struct zone *zone, unsigned long start_pfn,
289 unsigned long end_pfn)
291 unsigned long old_zone_end_pfn;
293 zone_span_writelock(zone);
295 old_zone_end_pfn = zone_end_pfn(zone);
296 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
297 zone->zone_start_pfn = start_pfn;
299 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
300 zone->zone_start_pfn;
302 zone_span_writeunlock(zone);
305 static void resize_zone(struct zone *zone, unsigned long start_pfn,
306 unsigned long end_pfn)
308 zone_span_writelock(zone);
310 if (end_pfn - start_pfn) {
311 zone->zone_start_pfn = start_pfn;
312 zone->spanned_pages = end_pfn - start_pfn;
315 * make it consist as free_area_init_core(),
316 * if spanned_pages = 0, then keep start_pfn = 0
318 zone->zone_start_pfn = 0;
319 zone->spanned_pages = 0;
322 zone_span_writeunlock(zone);
325 static void fix_zone_id(struct zone *zone, unsigned long start_pfn,
326 unsigned long end_pfn)
328 enum zone_type zid = zone_idx(zone);
329 int nid = zone->zone_pgdat->node_id;
332 for (pfn = start_pfn; pfn < end_pfn; pfn++)
333 set_page_links(pfn_to_page(pfn), zid, nid, pfn);
336 /* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
337 * alloc_bootmem_node_nopanic()/memblock_virt_alloc_node_nopanic() */
338 static int __ref ensure_zone_is_initialized(struct zone *zone,
339 unsigned long start_pfn, unsigned long num_pages)
341 if (!zone_is_initialized(zone))
342 return init_currently_empty_zone(zone, start_pfn, num_pages,
347 static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
348 unsigned long start_pfn, unsigned long end_pfn)
352 unsigned long z1_start_pfn;
354 ret = ensure_zone_is_initialized(z1, start_pfn, end_pfn - start_pfn);
358 pgdat_resize_lock(z1->zone_pgdat, &flags);
360 /* can't move pfns which are higher than @z2 */
361 if (end_pfn > zone_end_pfn(z2))
363 /* the move out part must be at the left most of @z2 */
364 if (start_pfn > z2->zone_start_pfn)
366 /* must included/overlap */
367 if (end_pfn <= z2->zone_start_pfn)
370 /* use start_pfn for z1's start_pfn if z1 is empty */
371 if (!zone_is_empty(z1))
372 z1_start_pfn = z1->zone_start_pfn;
374 z1_start_pfn = start_pfn;
376 resize_zone(z1, z1_start_pfn, end_pfn);
377 resize_zone(z2, end_pfn, zone_end_pfn(z2));
379 pgdat_resize_unlock(z1->zone_pgdat, &flags);
381 fix_zone_id(z1, start_pfn, end_pfn);
385 pgdat_resize_unlock(z1->zone_pgdat, &flags);
389 static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
390 unsigned long start_pfn, unsigned long end_pfn)
394 unsigned long z2_end_pfn;
396 ret = ensure_zone_is_initialized(z2, start_pfn, end_pfn - start_pfn);
400 pgdat_resize_lock(z1->zone_pgdat, &flags);
402 /* can't move pfns which are lower than @z1 */
403 if (z1->zone_start_pfn > start_pfn)
405 /* the move out part mast at the right most of @z1 */
406 if (zone_end_pfn(z1) > end_pfn)
408 /* must included/overlap */
409 if (start_pfn >= zone_end_pfn(z1))
412 /* use end_pfn for z2's end_pfn if z2 is empty */
413 if (!zone_is_empty(z2))
414 z2_end_pfn = zone_end_pfn(z2);
416 z2_end_pfn = end_pfn;
418 resize_zone(z1, z1->zone_start_pfn, start_pfn);
419 resize_zone(z2, start_pfn, z2_end_pfn);
421 pgdat_resize_unlock(z1->zone_pgdat, &flags);
423 fix_zone_id(z2, start_pfn, end_pfn);
427 pgdat_resize_unlock(z1->zone_pgdat, &flags);
431 static void __meminit grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
432 unsigned long end_pfn)
434 unsigned long old_pgdat_end_pfn = pgdat_end_pfn(pgdat);
436 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn)
437 pgdat->node_start_pfn = start_pfn;
439 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
440 pgdat->node_start_pfn;
443 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
445 struct pglist_data *pgdat = zone->zone_pgdat;
446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id;
452 zone_type = zone - pgdat->node_zones;
453 ret = ensure_zone_is_initialized(zone, phys_start_pfn, nr_pages);
457 pgdat_resize_lock(zone->zone_pgdat, &flags);
458 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
459 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
460 phys_start_pfn + nr_pages);
461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG);
467 static int __meminit __add_section(int nid, struct zone *zone,
468 unsigned long phys_start_pfn)
472 if (pfn_valid(phys_start_pfn))
475 ret = sparse_add_one_section(zone, phys_start_pfn);
480 ret = __add_zone(zone, phys_start_pfn);
485 return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
489 * Reasonably generic function for adding memory. It is
490 * expected that archs that support memory hotplug will
491 * call this function after deciding the zone to which to
494 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
495 unsigned long nr_pages)
499 int start_sec, end_sec;
500 /* during initialize mem_map, align hot-added range to section */
501 start_sec = pfn_to_section_nr(phys_start_pfn);
502 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
504 for (i = start_sec; i <= end_sec; i++) {
505 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
508 * EEXIST is finally dealt with by ioresource collision
509 * check. see add_memory() => register_memory_resource()
510 * Warning will be printed if there is collision.
512 if (err && (err != -EEXIST))
519 EXPORT_SYMBOL_GPL(__add_pages);
521 #ifdef CONFIG_MEMORY_HOTREMOVE
522 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
523 static int find_smallest_section_pfn(int nid, struct zone *zone,
524 unsigned long start_pfn,
525 unsigned long end_pfn)
527 struct mem_section *ms;
529 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) {
530 ms = __pfn_to_section(start_pfn);
532 if (unlikely(!valid_section(ms)))
535 if (unlikely(pfn_to_nid(start_pfn) != nid))
538 if (zone && zone != page_zone(pfn_to_page(start_pfn)))
547 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
548 static int find_biggest_section_pfn(int nid, struct zone *zone,
549 unsigned long start_pfn,
550 unsigned long end_pfn)
552 struct mem_section *ms;
555 /* pfn is the end pfn of a memory section. */
557 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) {
558 ms = __pfn_to_section(pfn);
560 if (unlikely(!valid_section(ms)))
563 if (unlikely(pfn_to_nid(pfn) != nid))
566 if (zone && zone != page_zone(pfn_to_page(pfn)))
575 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
576 unsigned long end_pfn)
578 unsigned long zone_start_pfn = zone->zone_start_pfn;
579 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
580 unsigned long zone_end_pfn = z;
582 struct mem_section *ms;
583 int nid = zone_to_nid(zone);
585 zone_span_writelock(zone);
586 if (zone_start_pfn == start_pfn) {
588 * If the section is smallest section in the zone, it need
589 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
590 * In this case, we find second smallest valid mem_section
591 * for shrinking zone.
593 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
596 zone->zone_start_pfn = pfn;
597 zone->spanned_pages = zone_end_pfn - pfn;
599 } else if (zone_end_pfn == end_pfn) {
601 * If the section is biggest section in the zone, it need
602 * shrink zone->spanned_pages.
603 * In this case, we find second biggest valid mem_section for
606 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
609 zone->spanned_pages = pfn - zone_start_pfn + 1;
613 * The section is not biggest or smallest mem_section in the zone, it
614 * only creates a hole in the zone. So in this case, we need not
615 * change the zone. But perhaps, the zone has only hole data. Thus
616 * it check the zone has only hole or not.
618 pfn = zone_start_pfn;
619 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) {
620 ms = __pfn_to_section(pfn);
622 if (unlikely(!valid_section(ms)))
625 if (page_zone(pfn_to_page(pfn)) != zone)
628 /* If the section is current section, it continues the loop */
629 if (start_pfn == pfn)
632 /* If we find valid section, we have nothing to do */
633 zone_span_writeunlock(zone);
637 /* The zone has no valid section */
638 zone->zone_start_pfn = 0;
639 zone->spanned_pages = 0;
640 zone_span_writeunlock(zone);
643 static void shrink_pgdat_span(struct pglist_data *pgdat,
644 unsigned long start_pfn, unsigned long end_pfn)
646 unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
647 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
648 unsigned long pgdat_end_pfn = p;
650 struct mem_section *ms;
651 int nid = pgdat->node_id;
653 if (pgdat_start_pfn == start_pfn) {
655 * If the section is smallest section in the pgdat, it need
656 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
657 * In this case, we find second smallest valid mem_section
658 * for shrinking zone.
660 pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
663 pgdat->node_start_pfn = pfn;
664 pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
666 } else if (pgdat_end_pfn == end_pfn) {
668 * If the section is biggest section in the pgdat, it need
669 * shrink pgdat->node_spanned_pages.
670 * In this case, we find second biggest valid mem_section for
673 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
676 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
680 * If the section is not biggest or smallest mem_section in the pgdat,
681 * it only creates a hole in the pgdat. So in this case, we need not
683 * But perhaps, the pgdat has only hole data. Thus it check the pgdat
684 * has only hole or not.
686 pfn = pgdat_start_pfn;
687 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) {
688 ms = __pfn_to_section(pfn);
690 if (unlikely(!valid_section(ms)))
693 if (pfn_to_nid(pfn) != nid)
696 /* If the section is current section, it continues the loop */
697 if (start_pfn == pfn)
700 /* If we find valid section, we have nothing to do */
704 /* The pgdat has no valid section */
705 pgdat->node_start_pfn = 0;
706 pgdat->node_spanned_pages = 0;
709 static void __remove_zone(struct zone *zone, unsigned long start_pfn)
711 struct pglist_data *pgdat = zone->zone_pgdat;
712 int nr_pages = PAGES_PER_SECTION;
716 zone_type = zone - pgdat->node_zones;
718 pgdat_resize_lock(zone->zone_pgdat, &flags);
719 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
720 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
721 pgdat_resize_unlock(zone->zone_pgdat, &flags);
724 static int __remove_section(struct zone *zone, struct mem_section *ms)
726 unsigned long start_pfn;
730 if (!valid_section(ms))
733 ret = unregister_memory_section(ms);
737 scn_nr = __section_nr(ms);
738 start_pfn = section_nr_to_pfn(scn_nr);
739 __remove_zone(zone, start_pfn);
741 sparse_remove_one_section(zone, ms);
746 * __remove_pages() - remove sections of pages from a zone
747 * @zone: zone from which pages need to be removed
748 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
749 * @nr_pages: number of pages to remove (must be multiple of section size)
751 * Generic helper function to remove section mappings and sysfs entries
752 * for the section of the memory we are removing. Caller needs to make
753 * sure that pages are marked reserved and zones are adjust properly by
754 * calling offline_pages().
756 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
757 unsigned long nr_pages)
760 int sections_to_remove;
761 resource_size_t start, size;
765 * We can only remove entire sections
767 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
768 BUG_ON(nr_pages % PAGES_PER_SECTION);
770 start = phys_start_pfn << PAGE_SHIFT;
771 size = nr_pages * PAGE_SIZE;
772 ret = release_mem_region_adjustable(&iomem_resource, start, size);
774 resource_size_t endres = start + size - 1;
776 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
777 &start, &endres, ret);
780 sections_to_remove = nr_pages / PAGES_PER_SECTION;
781 for (i = 0; i < sections_to_remove; i++) {
782 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
783 ret = __remove_section(zone, __pfn_to_section(pfn));
789 EXPORT_SYMBOL_GPL(__remove_pages);
790 #endif /* CONFIG_MEMORY_HOTREMOVE */
792 int set_online_page_callback(online_page_callback_t callback)
797 mutex_lock(&online_page_callback_lock);
799 if (online_page_callback == generic_online_page) {
800 online_page_callback = callback;
804 mutex_unlock(&online_page_callback_lock);
809 EXPORT_SYMBOL_GPL(set_online_page_callback);
811 int restore_online_page_callback(online_page_callback_t callback)
816 mutex_lock(&online_page_callback_lock);
818 if (online_page_callback == callback) {
819 online_page_callback = generic_online_page;
823 mutex_unlock(&online_page_callback_lock);
828 EXPORT_SYMBOL_GPL(restore_online_page_callback);
830 void __online_page_set_limits(struct page *page)
833 EXPORT_SYMBOL_GPL(__online_page_set_limits);
835 void __online_page_increment_counters(struct page *page)
837 adjust_managed_page_count(page, 1);
839 EXPORT_SYMBOL_GPL(__online_page_increment_counters);
841 void __online_page_free(struct page *page)
843 __free_reserved_page(page);
845 EXPORT_SYMBOL_GPL(__online_page_free);
847 static void generic_online_page(struct page *page)
849 __online_page_set_limits(page);
850 __online_page_increment_counters(page);
851 __online_page_free(page);
854 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
858 unsigned long onlined_pages = *(unsigned long *)arg;
860 if (PageReserved(pfn_to_page(start_pfn)))
861 for (i = 0; i < nr_pages; i++) {
862 page = pfn_to_page(start_pfn + i);
863 (*online_page_callback)(page);
866 *(unsigned long *)arg = onlined_pages;
870 #ifdef CONFIG_MOVABLE_NODE
872 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have
875 static bool can_online_high_movable(struct zone *zone)
879 #else /* CONFIG_MOVABLE_NODE */
880 /* ensure every online node has NORMAL memory */
881 static bool can_online_high_movable(struct zone *zone)
883 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY);
885 #endif /* CONFIG_MOVABLE_NODE */
887 /* check which state of node_states will be changed when online memory */
888 static void node_states_check_changes_online(unsigned long nr_pages,
889 struct zone *zone, struct memory_notify *arg)
891 int nid = zone_to_nid(zone);
892 enum zone_type zone_last = ZONE_NORMAL;
895 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
896 * contains nodes which have zones of 0...ZONE_NORMAL,
897 * set zone_last to ZONE_NORMAL.
899 * If we don't have HIGHMEM nor movable node,
900 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
901 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
903 if (N_MEMORY == N_NORMAL_MEMORY)
904 zone_last = ZONE_MOVABLE;
907 * if the memory to be online is in a zone of 0...zone_last, and
908 * the zones of 0...zone_last don't have memory before online, we will
909 * need to set the node to node_states[N_NORMAL_MEMORY] after
910 * the memory is online.
912 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY))
913 arg->status_change_nid_normal = nid;
915 arg->status_change_nid_normal = -1;
917 #ifdef CONFIG_HIGHMEM
919 * If we have movable node, node_states[N_HIGH_MEMORY]
920 * contains nodes which have zones of 0...ZONE_HIGHMEM,
921 * set zone_last to ZONE_HIGHMEM.
923 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
924 * contains nodes which have zones of 0...ZONE_MOVABLE,
925 * set zone_last to ZONE_MOVABLE.
927 zone_last = ZONE_HIGHMEM;
928 if (N_MEMORY == N_HIGH_MEMORY)
929 zone_last = ZONE_MOVABLE;
931 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY))
932 arg->status_change_nid_high = nid;
934 arg->status_change_nid_high = -1;
936 arg->status_change_nid_high = arg->status_change_nid_normal;
940 * if the node don't have memory befor online, we will need to
941 * set the node to node_states[N_MEMORY] after the memory
944 if (!node_state(nid, N_MEMORY))
945 arg->status_change_nid = nid;
947 arg->status_change_nid = -1;
950 static void node_states_set_node(int node, struct memory_notify *arg)
952 if (arg->status_change_nid_normal >= 0)
953 node_set_state(node, N_NORMAL_MEMORY);
955 if (arg->status_change_nid_high >= 0)
956 node_set_state(node, N_HIGH_MEMORY);
958 node_set_state(node, N_MEMORY);
962 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type)
965 unsigned long onlined_pages = 0;
967 int need_zonelists_rebuild = 0;
970 struct memory_notify arg;
974 * This doesn't need a lock to do pfn_to_page().
975 * The section can't be removed here because of the
976 * memory_block->state_mutex.
978 zone = page_zone(pfn_to_page(pfn));
981 if ((zone_idx(zone) > ZONE_NORMAL ||
982 online_type == MMOP_ONLINE_MOVABLE) &&
983 !can_online_high_movable(zone))
986 if (online_type == MMOP_ONLINE_KERNEL &&
987 zone_idx(zone) == ZONE_MOVABLE) {
988 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages))
991 if (online_type == MMOP_ONLINE_MOVABLE &&
992 zone_idx(zone) == ZONE_MOVABLE - 1) {
993 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages))
997 /* Previous code may changed the zone of the pfn range */
998 zone = page_zone(pfn_to_page(pfn));
1000 arg.start_pfn = pfn;
1001 arg.nr_pages = nr_pages;
1002 node_states_check_changes_online(nr_pages, zone, &arg);
1004 nid = pfn_to_nid(pfn);
1006 ret = memory_notify(MEM_GOING_ONLINE, &arg);
1007 ret = notifier_to_errno(ret);
1009 memory_notify(MEM_CANCEL_ONLINE, &arg);
1013 * If this zone is not populated, then it is not in zonelist.
1014 * This means the page allocator ignores this zone.
1015 * So, zonelist must be updated after online.
1017 mutex_lock(&zonelists_mutex);
1018 if (!populated_zone(zone)) {
1019 need_zonelists_rebuild = 1;
1020 build_all_zonelists(NULL, zone);
1023 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
1024 online_pages_range);
1026 if (need_zonelists_rebuild)
1027 zone_pcp_reset(zone);
1028 mutex_unlock(&zonelists_mutex);
1029 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
1030 (unsigned long long) pfn << PAGE_SHIFT,
1031 (((unsigned long long) pfn + nr_pages)
1032 << PAGE_SHIFT) - 1);
1033 memory_notify(MEM_CANCEL_ONLINE, &arg);
1037 zone->present_pages += onlined_pages;
1039 pgdat_resize_lock(zone->zone_pgdat, &flags);
1040 zone->zone_pgdat->node_present_pages += onlined_pages;
1041 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1043 if (onlined_pages) {
1044 node_states_set_node(zone_to_nid(zone), &arg);
1045 if (need_zonelists_rebuild)
1046 build_all_zonelists(NULL, NULL);
1048 zone_pcp_update(zone);
1051 mutex_unlock(&zonelists_mutex);
1053 init_per_zone_wmark_min();
1056 kswapd_run(zone_to_nid(zone));
1058 vm_total_pages = nr_free_pagecache_pages();
1060 writeback_set_ratelimit();
1063 memory_notify(MEM_ONLINE, &arg);
1068 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1070 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1071 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1073 struct pglist_data *pgdat;
1074 unsigned long zones_size[MAX_NR_ZONES] = {0};
1075 unsigned long zholes_size[MAX_NR_ZONES] = {0};
1076 unsigned long start_pfn = PFN_DOWN(start);
1078 pgdat = NODE_DATA(nid);
1080 pgdat = arch_alloc_nodedata(nid);
1084 arch_refresh_nodedata(nid, pgdat);
1087 /* we can use NODE_DATA(nid) from here */
1089 /* init node's zones as empty zones, we don't have any present pages.*/
1090 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
1093 * The node we allocated has no zone fallback lists. For avoiding
1094 * to access not-initialized zonelist, build here.
1096 mutex_lock(&zonelists_mutex);
1097 build_all_zonelists(pgdat, NULL);
1098 mutex_unlock(&zonelists_mutex);
1101 * zone->managed_pages is set to an approximate value in
1102 * free_area_init_core(), which will cause
1103 * /sys/device/system/node/nodeX/meminfo has wrong data.
1104 * So reset it to 0 before any memory is onlined.
1106 reset_node_managed_pages(pgdat);
1111 static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
1113 arch_refresh_nodedata(nid, NULL);
1114 arch_free_nodedata(pgdat);
1120 * try_online_node - online a node if offlined
1122 * called by cpu_up() to online a node without onlined memory.
1124 int try_online_node(int nid)
1129 if (node_online(nid))
1132 mem_hotplug_begin();
1133 pgdat = hotadd_new_pgdat(nid, 0);
1135 pr_err("Cannot online node %d due to NULL pgdat\n", nid);
1139 node_set_online(nid);
1140 ret = register_one_node(nid);
1143 if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
1144 mutex_lock(&zonelists_mutex);
1145 build_all_zonelists(NULL, NULL);
1146 mutex_unlock(&zonelists_mutex);
1154 static int check_hotplug_memory_range(u64 start, u64 size)
1156 u64 start_pfn = PFN_DOWN(start);
1157 u64 nr_pages = size >> PAGE_SHIFT;
1159 /* Memory range must be aligned with section */
1160 if ((start_pfn & ~PAGE_SECTION_MASK) ||
1161 (nr_pages % PAGES_PER_SECTION) || (!nr_pages)) {
1162 pr_err("Section-unaligned hotplug range: start 0x%llx, size 0x%llx\n",
1163 (unsigned long long)start,
1164 (unsigned long long)size);
1172 * If movable zone has already been setup, newly added memory should be check.
1173 * If its address is higher than movable zone, it should be added as movable.
1174 * Without this check, movable zone may overlap with other zone.
1176 static int should_add_memory_movable(int nid, u64 start, u64 size)
1178 unsigned long start_pfn = start >> PAGE_SHIFT;
1179 pg_data_t *pgdat = NODE_DATA(nid);
1180 struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
1182 if (zone_is_empty(movable_zone))
1185 if (movable_zone->zone_start_pfn <= start_pfn)
1191 int zone_for_memory(int nid, u64 start, u64 size, int zone_default)
1193 if (should_add_memory_movable(nid, start, size))
1194 return ZONE_MOVABLE;
1196 return zone_default;
1199 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1200 int __ref add_memory(int nid, u64 start, u64 size)
1202 pg_data_t *pgdat = NULL;
1205 struct resource *res;
1208 ret = check_hotplug_memory_range(start, size);
1212 res = register_memory_resource(start, size);
1217 { /* Stupid hack to suppress address-never-null warning */
1218 void *p = NODE_DATA(nid);
1222 mem_hotplug_begin();
1224 new_node = !node_online(nid);
1226 pgdat = hotadd_new_pgdat(nid, start);
1232 /* call arch's memory hotadd */
1233 ret = arch_add_memory(nid, start, size);
1238 /* we online node here. we can't roll back from here. */
1239 node_set_online(nid);
1242 ret = register_one_node(nid);
1244 * If sysfs file of new node can't create, cpu on the node
1245 * can't be hot-added. There is no rollback way now.
1246 * So, check by BUG_ON() to catch it reluctantly..
1251 /* create new memmap entry */
1252 firmware_map_add_hotplug(start, start + size, "System RAM");
1257 /* rollback pgdat allocation and others */
1259 rollback_node_hotadd(nid, pgdat);
1260 release_memory_resource(res);
1266 EXPORT_SYMBOL_GPL(add_memory);
1268 #ifdef CONFIG_MEMORY_HOTREMOVE
1270 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
1271 * set and the size of the free page is given by page_order(). Using this,
1272 * the function determines if the pageblock contains only free pages.
1273 * Due to buddy contraints, a free page at least the size of a pageblock will
1274 * be located at the start of the pageblock
1276 static inline int pageblock_free(struct page *page)
1278 return PageBuddy(page) && page_order(page) >= pageblock_order;
1281 /* Return the start of the next active pageblock after a given page */
1282 static struct page *next_active_pageblock(struct page *page)
1284 /* Ensure the starting page is pageblock-aligned */
1285 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
1287 /* If the entire pageblock is free, move to the end of free page */
1288 if (pageblock_free(page)) {
1290 /* be careful. we don't have locks, page_order can be changed.*/
1291 order = page_order(page);
1292 if ((order < MAX_ORDER) && (order >= pageblock_order))
1293 return page + (1 << order);
1296 return page + pageblock_nr_pages;
1299 /* Checks if this range of memory is likely to be hot-removable. */
1300 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
1302 struct page *page = pfn_to_page(start_pfn);
1303 struct page *end_page = page + nr_pages;
1305 /* Check the starting page of each pageblock within the range */
1306 for (; page < end_page; page = next_active_pageblock(page)) {
1307 if (!is_pageblock_removable_nolock(page))
1312 /* All pageblocks in the memory block are likely to be hot-removable */
1317 * Confirm all pages in a range [start, end) is belongs to the same zone.
1319 int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
1322 struct zone *zone = NULL;
1325 for (pfn = start_pfn;
1327 pfn += MAX_ORDER_NR_PAGES) {
1329 /* This is just a CONFIG_HOLES_IN_ZONE check.*/
1330 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
1332 if (i == MAX_ORDER_NR_PAGES)
1334 page = pfn_to_page(pfn + i);
1335 if (zone && page_zone(page) != zone)
1337 zone = page_zone(page);
1343 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages
1344 * and hugepages). We scan pfn because it's much easier than scanning over
1345 * linked list. This function returns the pfn of the first found movable
1346 * page if it's found, otherwise 0.
1348 static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
1352 for (pfn = start; pfn < end; pfn++) {
1353 if (pfn_valid(pfn)) {
1354 page = pfn_to_page(pfn);
1357 if (PageHuge(page)) {
1358 if (is_hugepage_active(page))
1361 pfn = round_up(pfn + 1,
1362 1 << compound_order(page)) - 1;
1369 #define NR_OFFLINE_AT_ONCE_PAGES (256)
1371 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
1375 int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
1376 int not_managed = 0;
1380 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
1381 if (!pfn_valid(pfn))
1383 page = pfn_to_page(pfn);
1385 if (PageHuge(page)) {
1386 struct page *head = compound_head(page);
1387 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1;
1388 if (compound_order(head) > PFN_SECTION_SHIFT) {
1392 if (isolate_huge_page(page, &source))
1393 move_pages -= 1 << compound_order(head);
1397 if (!get_page_unless_zero(page))
1400 * We can skip free pages. And we can only deal with pages on
1403 ret = isolate_lru_page(page);
1404 if (!ret) { /* Success */
1406 list_add_tail(&page->lru, &source);
1408 inc_zone_page_state(page, NR_ISOLATED_ANON +
1409 page_is_file_cache(page));
1412 #ifdef CONFIG_DEBUG_VM
1413 printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
1415 dump_page(page, "failed to remove from LRU");
1418 /* Because we don't have big zone->lock. we should
1419 check this again here. */
1420 if (page_count(page)) {
1427 if (!list_empty(&source)) {
1429 putback_movable_pages(&source);
1434 * alloc_migrate_target should be improooooved!!
1435 * migrate_pages returns # of failed pages.
1437 ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
1438 MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
1440 putback_movable_pages(&source);
1447 * remove from free_area[] and mark all as Reserved.
1450 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
1453 __offline_isolated_pages(start, start + nr_pages);
1458 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
1460 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
1461 offline_isolated_pages_cb);
1465 * Check all pages in range, recoreded as memory resource, are isolated.
1468 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
1472 long offlined = *(long *)data;
1473 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true);
1474 offlined = nr_pages;
1476 *(long *)data += offlined;
1481 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
1486 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
1487 check_pages_isolated_cb);
1489 offlined = (long)ret;
1493 #ifdef CONFIG_MOVABLE_NODE
1495 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have
1498 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1502 #else /* CONFIG_MOVABLE_NODE */
1503 /* ensure the node has NORMAL memory if it is still online */
1504 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages)
1506 struct pglist_data *pgdat = zone->zone_pgdat;
1507 unsigned long present_pages = 0;
1510 for (zt = 0; zt <= ZONE_NORMAL; zt++)
1511 present_pages += pgdat->node_zones[zt].present_pages;
1513 if (present_pages > nr_pages)
1517 for (; zt <= ZONE_MOVABLE; zt++)
1518 present_pages += pgdat->node_zones[zt].present_pages;
1521 * we can't offline the last normal memory until all
1522 * higher memory is offlined.
1524 return present_pages == 0;
1526 #endif /* CONFIG_MOVABLE_NODE */
1528 static int __init cmdline_parse_movable_node(char *p)
1530 #ifdef CONFIG_MOVABLE_NODE
1532 * Memory used by the kernel cannot be hot-removed because Linux
1533 * cannot migrate the kernel pages. When memory hotplug is
1534 * enabled, we should prevent memblock from allocating memory
1537 * ACPI SRAT records all hotpluggable memory ranges. But before
1538 * SRAT is parsed, we don't know about it.
1540 * The kernel image is loaded into memory at very early time. We
1541 * cannot prevent this anyway. So on NUMA system, we set any
1542 * node the kernel resides in as un-hotpluggable.
1544 * Since on modern servers, one node could have double-digit
1545 * gigabytes memory, we can assume the memory around the kernel
1546 * image is also un-hotpluggable. So before SRAT is parsed, just
1547 * allocate memory near the kernel image to try the best to keep
1548 * the kernel away from hotpluggable memory.
1550 memblock_set_bottom_up(true);
1551 movable_node_enabled = true;
1553 pr_warn("movable_node option not supported\n");
1557 early_param("movable_node", cmdline_parse_movable_node);
1559 /* check which state of node_states will be changed when offline memory */
1560 static void node_states_check_changes_offline(unsigned long nr_pages,
1561 struct zone *zone, struct memory_notify *arg)
1563 struct pglist_data *pgdat = zone->zone_pgdat;
1564 unsigned long present_pages = 0;
1565 enum zone_type zt, zone_last = ZONE_NORMAL;
1568 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY]
1569 * contains nodes which have zones of 0...ZONE_NORMAL,
1570 * set zone_last to ZONE_NORMAL.
1572 * If we don't have HIGHMEM nor movable node,
1573 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of
1574 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE.
1576 if (N_MEMORY == N_NORMAL_MEMORY)
1577 zone_last = ZONE_MOVABLE;
1580 * check whether node_states[N_NORMAL_MEMORY] will be changed.
1581 * If the memory to be offline is in a zone of 0...zone_last,
1582 * and it is the last present memory, 0...zone_last will
1583 * become empty after offline , thus we can determind we will
1584 * need to clear the node from node_states[N_NORMAL_MEMORY].
1586 for (zt = 0; zt <= zone_last; zt++)
1587 present_pages += pgdat->node_zones[zt].present_pages;
1588 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1589 arg->status_change_nid_normal = zone_to_nid(zone);
1591 arg->status_change_nid_normal = -1;
1593 #ifdef CONFIG_HIGHMEM
1595 * If we have movable node, node_states[N_HIGH_MEMORY]
1596 * contains nodes which have zones of 0...ZONE_HIGHMEM,
1597 * set zone_last to ZONE_HIGHMEM.
1599 * If we don't have movable node, node_states[N_NORMAL_MEMORY]
1600 * contains nodes which have zones of 0...ZONE_MOVABLE,
1601 * set zone_last to ZONE_MOVABLE.
1603 zone_last = ZONE_HIGHMEM;
1604 if (N_MEMORY == N_HIGH_MEMORY)
1605 zone_last = ZONE_MOVABLE;
1607 for (; zt <= zone_last; zt++)
1608 present_pages += pgdat->node_zones[zt].present_pages;
1609 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages)
1610 arg->status_change_nid_high = zone_to_nid(zone);
1612 arg->status_change_nid_high = -1;
1614 arg->status_change_nid_high = arg->status_change_nid_normal;
1618 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE
1620 zone_last = ZONE_MOVABLE;
1623 * check whether node_states[N_HIGH_MEMORY] will be changed
1624 * If we try to offline the last present @nr_pages from the node,
1625 * we can determind we will need to clear the node from
1626 * node_states[N_HIGH_MEMORY].
1628 for (; zt <= zone_last; zt++)
1629 present_pages += pgdat->node_zones[zt].present_pages;
1630 if (nr_pages >= present_pages)
1631 arg->status_change_nid = zone_to_nid(zone);
1633 arg->status_change_nid = -1;
1636 static void node_states_clear_node(int node, struct memory_notify *arg)
1638 if (arg->status_change_nid_normal >= 0)
1639 node_clear_state(node, N_NORMAL_MEMORY);
1641 if ((N_MEMORY != N_NORMAL_MEMORY) &&
1642 (arg->status_change_nid_high >= 0))
1643 node_clear_state(node, N_HIGH_MEMORY);
1645 if ((N_MEMORY != N_HIGH_MEMORY) &&
1646 (arg->status_change_nid >= 0))
1647 node_clear_state(node, N_MEMORY);
1650 static int __ref __offline_pages(unsigned long start_pfn,
1651 unsigned long end_pfn, unsigned long timeout)
1653 unsigned long pfn, nr_pages, expire;
1654 long offlined_pages;
1655 int ret, drain, retry_max, node;
1656 unsigned long flags;
1658 struct memory_notify arg;
1660 /* at least, alignment against pageblock is necessary */
1661 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
1663 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
1665 /* This makes hotplug much easier...and readable.
1666 we assume this for now. .*/
1667 if (!test_pages_in_a_zone(start_pfn, end_pfn))
1670 mem_hotplug_begin();
1672 zone = page_zone(pfn_to_page(start_pfn));
1673 node = zone_to_nid(zone);
1674 nr_pages = end_pfn - start_pfn;
1677 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages))
1680 /* set above range as isolated */
1681 ret = start_isolate_page_range(start_pfn, end_pfn,
1682 MIGRATE_MOVABLE, true);
1686 arg.start_pfn = start_pfn;
1687 arg.nr_pages = nr_pages;
1688 node_states_check_changes_offline(nr_pages, zone, &arg);
1690 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
1691 ret = notifier_to_errno(ret);
1693 goto failed_removal;
1696 expire = jiffies + timeout;
1700 /* start memory hot removal */
1702 if (time_after(jiffies, expire))
1703 goto failed_removal;
1705 if (signal_pending(current))
1706 goto failed_removal;
1709 lru_add_drain_all();
1714 pfn = scan_movable_pages(start_pfn, end_pfn);
1715 if (pfn) { /* We have movable pages */
1716 ret = do_migrate_range(pfn, end_pfn);
1722 if (--retry_max == 0)
1723 goto failed_removal;
1729 /* drain all zone's lru pagevec, this is asynchronous... */
1730 lru_add_drain_all();
1732 /* drain pcp pages, this is synchronous. */
1735 * dissolve free hugepages in the memory block before doing offlining
1736 * actually in order to make hugetlbfs's object counting consistent.
1738 dissolve_free_huge_pages(start_pfn, end_pfn);
1740 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
1741 if (offlined_pages < 0) {
1743 goto failed_removal;
1745 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
1746 /* Ok, all of our target is isolated.
1747 We cannot do rollback at this point. */
1748 offline_isolated_pages(start_pfn, end_pfn);
1749 /* reset pagetype flags and makes migrate type to be MOVABLE */
1750 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1751 /* removal success */
1752 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1753 zone->present_pages -= offlined_pages;
1755 pgdat_resize_lock(zone->zone_pgdat, &flags);
1756 zone->zone_pgdat->node_present_pages -= offlined_pages;
1757 pgdat_resize_unlock(zone->zone_pgdat, &flags);
1759 init_per_zone_wmark_min();
1761 if (!populated_zone(zone)) {
1762 zone_pcp_reset(zone);
1763 mutex_lock(&zonelists_mutex);
1764 build_all_zonelists(NULL, NULL);
1765 mutex_unlock(&zonelists_mutex);
1767 zone_pcp_update(zone);
1769 node_states_clear_node(node, &arg);
1770 if (arg.status_change_nid >= 0)
1773 vm_total_pages = nr_free_pagecache_pages();
1774 writeback_set_ratelimit();
1776 memory_notify(MEM_OFFLINE, &arg);
1781 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
1782 (unsigned long long) start_pfn << PAGE_SHIFT,
1783 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1784 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1785 /* pushback to free area */
1786 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1793 int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
1795 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ);
1797 #endif /* CONFIG_MEMORY_HOTREMOVE */
1800 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn)
1801 * @start_pfn: start pfn of the memory range
1802 * @end_pfn: end pfn of the memory range
1803 * @arg: argument passed to func
1804 * @func: callback for each memory section walked
1806 * This function walks through all present mem sections in range
1807 * [start_pfn, end_pfn) and call func on each mem section.
1809 * Returns the return value of func.
1811 int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
1812 void *arg, int (*func)(struct memory_block *, void *))
1814 struct memory_block *mem = NULL;
1815 struct mem_section *section;
1816 unsigned long pfn, section_nr;
1819 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1820 section_nr = pfn_to_section_nr(pfn);
1821 if (!present_section_nr(section_nr))
1824 section = __nr_to_section(section_nr);
1825 /* same memblock? */
1827 if ((section_nr >= mem->start_section_nr) &&
1828 (section_nr <= mem->end_section_nr))
1831 mem = find_memory_block_hinted(section, mem);
1835 ret = func(mem, arg);
1837 kobject_put(&mem->dev.kobj);
1843 kobject_put(&mem->dev.kobj);
1848 #ifdef CONFIG_MEMORY_HOTREMOVE
1849 static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
1851 int ret = !is_memblock_offlined(mem);
1853 if (unlikely(ret)) {
1854 phys_addr_t beginpa, endpa;
1856 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr));
1857 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1;
1858 pr_warn("removing memory fails, because memory "
1859 "[%pa-%pa] is onlined\n",
1866 static int check_cpu_on_node(pg_data_t *pgdat)
1870 for_each_present_cpu(cpu) {
1871 if (cpu_to_node(cpu) == pgdat->node_id)
1873 * the cpu on this node isn't removed, and we can't
1874 * offline this node.
1882 static void unmap_cpu_on_node(pg_data_t *pgdat)
1884 #ifdef CONFIG_ACPI_NUMA
1887 for_each_possible_cpu(cpu)
1888 if (cpu_to_node(cpu) == pgdat->node_id)
1889 numa_clear_node(cpu);
1893 static int check_and_unmap_cpu_on_node(pg_data_t *pgdat)
1897 ret = check_cpu_on_node(pgdat);
1902 * the node will be offlined when we come here, so we can clear
1903 * the cpu_to_node() now.
1906 unmap_cpu_on_node(pgdat);
1913 * Offline a node if all memory sections and cpus of the node are removed.
1915 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1916 * and online/offline operations before this call.
1918 void try_offline_node(int nid)
1920 pg_data_t *pgdat = NODE_DATA(nid);
1921 unsigned long start_pfn = pgdat->node_start_pfn;
1922 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1926 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1927 unsigned long section_nr = pfn_to_section_nr(pfn);
1929 if (!present_section_nr(section_nr))
1932 if (pfn_to_nid(pfn) != nid)
1936 * some memory sections of this node are not removed, and we
1937 * can't offline node now.
1942 if (check_and_unmap_cpu_on_node(pgdat))
1946 * all memory/cpu of this node are removed, we can offline this
1949 node_set_offline(nid);
1950 unregister_one_node(nid);
1952 /* free waittable in each zone */
1953 for (i = 0; i < MAX_NR_ZONES; i++) {
1954 struct zone *zone = pgdat->node_zones + i;
1957 * wait_table may be allocated from boot memory,
1958 * here only free if it's allocated by vmalloc.
1960 if (is_vmalloc_addr(zone->wait_table))
1961 vfree(zone->wait_table);
1965 * Since there is no way to guarentee the address of pgdat/zone is not
1966 * on stack of any kernel threads or used by other kernel objects
1967 * without reference counting or other symchronizing method, do not
1968 * reset node_data and free pgdat here. Just reset it to 0 and reuse
1969 * the memory when the node is online again.
1971 memset(pgdat, 0, sizeof(*pgdat));
1973 EXPORT_SYMBOL(try_offline_node);
1978 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug
1979 * and online/offline operations before this call, as required by
1980 * try_offline_node().
1982 void __ref remove_memory(int nid, u64 start, u64 size)
1986 BUG_ON(check_hotplug_memory_range(start, size));
1988 mem_hotplug_begin();
1991 * All memory blocks must be offlined before removing memory. Check
1992 * whether all memory blocks in question are offline and trigger a BUG()
1993 * if this is not the case.
1995 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL,
1996 check_memblock_offlined_cb);
2000 /* remove memmap entry */
2001 firmware_map_remove(start, start + size, "System RAM");
2003 arch_remove_memory(start, size);
2005 try_offline_node(nid);
2009 EXPORT_SYMBOL_GPL(remove_memory);
2010 #endif /* CONFIG_MEMORY_HOTREMOVE */