2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
18 #include <asm/processor.h>
22 unsigned long max_low_pfn;
23 unsigned long min_low_pfn;
24 unsigned long max_pfn;
26 static LIST_HEAD(bdata_list);
27 #ifdef CONFIG_CRASH_DUMP
29 * If we have booted due to a crash, max_pfn will be a very low value. We need
30 * to know the amount of memory that the previous kernel used.
32 unsigned long saved_max_pfn;
35 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
37 static int bootmem_debug;
39 static int __init bootmem_debug_setup(char *buf)
44 early_param("bootmem_debug", bootmem_debug_setup);
46 #define bdebug(fmt, args...) ({ \
47 if (unlikely(bootmem_debug)) \
50 __FUNCTION__, ## args); \
53 static unsigned long __init bootmap_bytes(unsigned long pages)
55 unsigned long bytes = (pages + 7) / 8;
57 return ALIGN(bytes, sizeof(long));
61 * bootmem_bootmap_pages - calculate bitmap size in pages
62 * @pages: number of pages the bitmap has to represent
64 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
66 unsigned long bytes = bootmap_bytes(pages);
68 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
74 static void __init link_bootmem(bootmem_data_t *bdata)
78 if (list_empty(&bdata_list)) {
79 list_add(&bdata->list, &bdata_list);
83 list_for_each_entry(ent, &bdata_list, list) {
84 if (bdata->node_boot_start < ent->node_boot_start) {
85 list_add_tail(&bdata->list, &ent->list);
89 list_add_tail(&bdata->list, &bdata_list);
93 * Called once to set up the allocator itself.
95 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
96 unsigned long mapstart, unsigned long start, unsigned long end)
98 unsigned long mapsize;
100 mminit_validate_memmodel_limits(&start, &end);
101 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
102 bdata->node_boot_start = PFN_PHYS(start);
103 bdata->node_low_pfn = end;
107 * Initially all pages are reserved - setup_arch() has to
108 * register free RAM areas explicitly.
110 mapsize = bootmap_bytes(end - start);
111 memset(bdata->node_bootmem_map, 0xff, mapsize);
113 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
114 bdata - bootmem_node_data, start, mapstart, end, mapsize);
120 * init_bootmem_node - register a node as boot memory
121 * @pgdat: node to register
122 * @freepfn: pfn where the bitmap for this node is to be placed
123 * @startpfn: first pfn on the node
124 * @endpfn: first pfn after the node
126 * Returns the number of bytes needed to hold the bitmap for this node.
128 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
129 unsigned long startpfn, unsigned long endpfn)
131 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
135 * init_bootmem - register boot memory
136 * @start: pfn where the bitmap is to be placed
137 * @pages: number of available physical pages
139 * Returns the number of bytes needed to hold the bitmap.
141 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
145 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
148 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
152 unsigned long i, count;
153 unsigned long idx, pages;
157 BUG_ON(!bdata->node_bootmem_map);
160 /* first extant page of the node */
161 pfn = PFN_DOWN(bdata->node_boot_start);
162 idx = bdata->node_low_pfn - pfn;
163 map = bdata->node_bootmem_map;
165 * Check if we are aligned to BITS_PER_LONG pages. If so, we might
166 * be able to free page orders of that size at once.
168 if (!(pfn & (BITS_PER_LONG-1)))
171 for (i = 0; i < idx; ) {
172 unsigned long v = ~map[i / BITS_PER_LONG];
174 if (gofast && v == ~0UL) {
177 page = pfn_to_page(pfn);
178 count += BITS_PER_LONG;
179 order = ffs(BITS_PER_LONG) - 1;
180 __free_pages_bootmem(page, order);
182 page += BITS_PER_LONG;
186 page = pfn_to_page(pfn);
187 for (m = 1; m && i < idx; m<<=1, page++, i++) {
190 __free_pages_bootmem(page, 0);
196 pfn += BITS_PER_LONG;
200 * Now free the allocator bitmap itself, it's not
203 page = virt_to_page(bdata->node_bootmem_map);
204 pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
205 idx = bootmem_bootmap_pages(pages);
206 for (i = 0; i < idx; i++, page++)
207 __free_pages_bootmem(page, 0);
209 bdata->node_bootmem_map = NULL;
211 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
217 * free_all_bootmem_node - release a node's free pages to the buddy allocator
218 * @pgdat: node to be released
220 * Returns the number of pages actually released.
222 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
224 register_page_bootmem_info_node(pgdat);
225 return free_all_bootmem_core(pgdat->bdata);
229 * free_all_bootmem - release free pages to the buddy allocator
231 * Returns the number of pages actually released.
233 unsigned long __init free_all_bootmem(void)
235 return free_all_bootmem_core(NODE_DATA(0)->bdata);
238 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
241 unsigned long sidx, eidx;
247 if (addr + size < bdata->node_boot_start ||
248 PFN_DOWN(addr) > bdata->node_low_pfn)
251 * round down end of usable mem, partially free pages are
252 * considered reserved.
255 if (addr >= bdata->node_boot_start && addr < bdata->last_success)
256 bdata->last_success = addr;
259 * Round up to index to the range.
261 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
262 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
266 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
267 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
268 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
270 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
271 sidx + PFN_DOWN(bdata->node_boot_start),
272 eidx + PFN_DOWN(bdata->node_boot_start));
274 for (i = sidx; i < eidx; i++) {
275 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
281 * free_bootmem_node - mark a page range as usable
282 * @pgdat: node the range resides on
283 * @physaddr: starting address of the range
284 * @size: size of the range in bytes
286 * Partial pages will be considered reserved and left as they are.
288 * Only physical pages that actually reside on @pgdat are marked.
290 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
293 free_bootmem_core(pgdat->bdata, physaddr, size);
297 * free_bootmem - mark a page range as usable
298 * @addr: starting address of the range
299 * @size: size of the range in bytes
301 * Partial pages will be considered reserved and left as they are.
303 * All physical pages within the range are marked, no matter what
304 * node they reside on.
306 void __init free_bootmem(unsigned long addr, unsigned long size)
308 bootmem_data_t *bdata;
309 list_for_each_entry(bdata, &bdata_list, list)
310 free_bootmem_core(bdata, addr, size);
314 * Marks a particular physical memory range as unallocatable. Usable RAM
315 * might be used for boot-time allocations - or it might get added
316 * to the free page pool later on.
318 static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
319 unsigned long addr, unsigned long size, int flags)
321 unsigned long sidx, eidx;
326 /* out of range, don't hold other */
327 if (addr + size < bdata->node_boot_start ||
328 PFN_DOWN(addr) > bdata->node_low_pfn)
332 * Round up to index to the range.
334 if (addr > bdata->node_boot_start)
335 sidx= PFN_DOWN(addr - bdata->node_boot_start);
339 eidx = PFN_UP(addr + size - bdata->node_boot_start);
340 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
341 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
343 for (i = sidx; i < eidx; i++) {
344 if (test_bit(i, bdata->node_bootmem_map)) {
345 if (flags & BOOTMEM_EXCLUSIVE)
354 static void __init reserve_bootmem_core(bootmem_data_t *bdata,
355 unsigned long addr, unsigned long size, int flags)
357 unsigned long sidx, eidx;
363 if (addr + size < bdata->node_boot_start ||
364 PFN_DOWN(addr) > bdata->node_low_pfn)
368 * Round up to index to the range.
370 if (addr > bdata->node_boot_start)
371 sidx= PFN_DOWN(addr - bdata->node_boot_start);
375 eidx = PFN_UP(addr + size - bdata->node_boot_start);
376 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
377 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
379 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
380 bdata - bootmem_node_data,
381 sidx + PFN_DOWN(bdata->node_boot_start),
382 eidx + PFN_DOWN(bdata->node_boot_start),
385 for (i = sidx; i < eidx; i++)
386 if (test_and_set_bit(i, bdata->node_bootmem_map))
387 bdebug("hm, page %lx reserved twice.\n",
388 PFN_DOWN(bdata->node_boot_start) + i);
392 * reserve_bootmem_node - mark a page range as reserved
393 * @pgdat: node the range resides on
394 * @physaddr: starting address of the range
395 * @size: size of the range in bytes
396 * @flags: reservation flags (see linux/bootmem.h)
398 * Partial pages will be reserved.
400 * Only physical pages that actually reside on @pgdat are marked.
402 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
403 unsigned long size, int flags)
407 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
410 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
414 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
416 * reserve_bootmem - mark a page range as usable
417 * @addr: starting address of the range
418 * @size: size of the range in bytes
419 * @flags: reservation flags (see linux/bootmem.h)
421 * Partial pages will be reserved.
423 * All physical pages within the range are marked, no matter what
424 * node they reside on.
426 int __init reserve_bootmem(unsigned long addr, unsigned long size,
429 bootmem_data_t *bdata;
432 list_for_each_entry(bdata, &bdata_list, list) {
433 ret = can_reserve_bootmem_core(bdata, addr, size, flags);
437 list_for_each_entry(bdata, &bdata_list, list)
438 reserve_bootmem_core(bdata, addr, size, flags);
442 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
445 * We 'merge' subsequent allocations to save space. We might 'lose'
446 * some fraction of a page if allocations cannot be satisfied due to
447 * size constraints on boxes where there is physical RAM space
448 * fragmentation - in these cases (mostly large memory boxes) this
451 * On low memory boxes we get it right in 100% of the cases.
453 * alignment has to be a power of 2 value.
455 * NOTE: This function is _not_ reentrant.
458 alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
459 unsigned long align, unsigned long goal, unsigned long limit)
461 unsigned long areasize, preferred;
462 unsigned long i, start = 0, incr, eidx, end_pfn;
464 unsigned long node_boot_start;
465 void *node_bootmem_map;
468 printk("alloc_bootmem_core(): zero-sized request\n");
471 BUG_ON(align & (align-1));
473 /* on nodes without memory - bootmem_map is NULL */
474 if (!bdata->node_bootmem_map)
477 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
478 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
481 /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
482 node_boot_start = bdata->node_boot_start;
483 node_bootmem_map = bdata->node_bootmem_map;
485 node_boot_start = ALIGN(bdata->node_boot_start, align);
486 if (node_boot_start > bdata->node_boot_start)
487 node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
488 PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
491 if (limit && node_boot_start >= limit)
494 end_pfn = bdata->node_low_pfn;
495 limit = PFN_DOWN(limit);
496 if (limit && end_pfn > limit)
499 eidx = end_pfn - PFN_DOWN(node_boot_start);
502 * We try to allocate bootmem pages above 'goal'
503 * first, then we try to allocate lower pages.
506 if (goal && PFN_DOWN(goal) < end_pfn) {
507 if (goal > node_boot_start)
508 preferred = goal - node_boot_start;
510 if (bdata->last_success > node_boot_start &&
511 bdata->last_success - node_boot_start >= preferred)
512 if (!limit || (limit && limit > bdata->last_success))
513 preferred = bdata->last_success - node_boot_start;
516 preferred = PFN_DOWN(ALIGN(preferred, align));
517 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
518 incr = align >> PAGE_SHIFT ? : 1;
521 for (i = preferred; i < eidx;) {
524 i = find_next_zero_bit(node_bootmem_map, eidx, i);
528 if (test_bit(i, node_bootmem_map)) {
532 for (j = i + 1; j < i + areasize; ++j) {
535 if (test_bit(j, node_bootmem_map))
553 bdata->last_success = PFN_PHYS(start) + node_boot_start;
554 BUG_ON(start >= eidx);
557 * Is the next page of the previous allocation-end the start
558 * of this allocation's buffer? If yes then we can 'merge'
559 * the previous partial page with this allocation.
561 if (align < PAGE_SIZE &&
562 bdata->last_offset && bdata->last_pos+1 == start) {
563 unsigned long offset, remaining_size;
564 offset = ALIGN(bdata->last_offset, align);
565 BUG_ON(offset > PAGE_SIZE);
566 remaining_size = PAGE_SIZE - offset;
567 if (size < remaining_size) {
569 /* last_pos unchanged */
570 bdata->last_offset = offset + size;
571 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
572 offset + node_boot_start);
574 remaining_size = size - remaining_size;
575 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
576 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
577 offset + node_boot_start);
578 bdata->last_pos = start + areasize - 1;
579 bdata->last_offset = remaining_size;
581 bdata->last_offset &= ~PAGE_MASK;
583 bdata->last_pos = start + areasize - 1;
584 bdata->last_offset = size & ~PAGE_MASK;
585 ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
588 bdebug("nid=%td start=%lx end=%lx\n",
589 bdata - bootmem_node_data,
590 start + PFN_DOWN(bdata->node_boot_start),
591 start + areasize + PFN_DOWN(bdata->node_boot_start));
594 * Reserve the area now:
596 for (i = start; i < start + areasize; i++)
597 if (unlikely(test_and_set_bit(i, node_bootmem_map)))
599 memset(ret, 0, size);
604 * __alloc_bootmem_nopanic - allocate boot memory without panicking
605 * @size: size of the request in bytes
606 * @align: alignment of the region
607 * @goal: preferred starting address of the region
609 * The goal is dropped if it can not be satisfied and the allocation will
610 * fall back to memory below @goal.
612 * Allocation may happen on any node in the system.
614 * Returns NULL on failure.
616 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
619 bootmem_data_t *bdata;
622 list_for_each_entry(bdata, &bdata_list, list) {
623 ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
631 * __alloc_bootmem - allocate boot memory
632 * @size: size of the request in bytes
633 * @align: alignment of the region
634 * @goal: preferred starting address of the region
636 * The goal is dropped if it can not be satisfied and the allocation will
637 * fall back to memory below @goal.
639 * Allocation may happen on any node in the system.
641 * The function panics if the request can not be satisfied.
643 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
646 void *mem = __alloc_bootmem_nopanic(size,align,goal);
651 * Whoops, we cannot satisfy the allocation request.
653 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
654 panic("Out of memory");
659 * __alloc_bootmem_node - allocate boot memory from a specific node
660 * @pgdat: node to allocate from
661 * @size: size of the request in bytes
662 * @align: alignment of the region
663 * @goal: preferred starting address of the region
665 * The goal is dropped if it can not be satisfied and the allocation will
666 * fall back to memory below @goal.
668 * Allocation may fall back to any node in the system if the specified node
669 * can not hold the requested memory.
671 * The function panics if the request can not be satisfied.
673 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
674 unsigned long align, unsigned long goal)
678 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
682 return __alloc_bootmem(size, align, goal);
685 #ifdef CONFIG_SPARSEMEM
687 * alloc_bootmem_section - allocate boot memory from a specific section
688 * @size: size of the request in bytes
689 * @section_nr: sparse map section to allocate from
691 * Return NULL on failure.
693 void * __init alloc_bootmem_section(unsigned long size,
694 unsigned long section_nr)
697 unsigned long limit, goal, start_nr, end_nr, pfn;
698 struct pglist_data *pgdat;
700 pfn = section_nr_to_pfn(section_nr);
701 goal = PFN_PHYS(pfn);
702 limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
703 pgdat = NODE_DATA(early_pfn_to_nid(pfn));
704 ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
710 start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
711 end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
712 if (start_nr != section_nr || end_nr != section_nr) {
713 printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
715 free_bootmem_core(pgdat->bdata, __pa(ptr), size);
723 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
724 unsigned long align, unsigned long goal)
728 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
732 return __alloc_bootmem_nopanic(size, align, goal);
735 #ifndef ARCH_LOW_ADDRESS_LIMIT
736 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
740 * __alloc_bootmem_low - allocate low boot memory
741 * @size: size of the request in bytes
742 * @align: alignment of the region
743 * @goal: preferred starting address of the region
745 * The goal is dropped if it can not be satisfied and the allocation will
746 * fall back to memory below @goal.
748 * Allocation may happen on any node in the system.
750 * The function panics if the request can not be satisfied.
752 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
755 bootmem_data_t *bdata;
758 list_for_each_entry(bdata, &bdata_list, list) {
759 ptr = alloc_bootmem_core(bdata, size, align, goal,
760 ARCH_LOW_ADDRESS_LIMIT);
766 * Whoops, we cannot satisfy the allocation request.
768 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
769 panic("Out of low memory");
774 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
775 * @pgdat: node to allocate from
776 * @size: size of the request in bytes
777 * @align: alignment of the region
778 * @goal: preferred starting address of the region
780 * The goal is dropped if it can not be satisfied and the allocation will
781 * fall back to memory below @goal.
783 * Allocation may fall back to any node in the system if the specified node
784 * can not hold the requested memory.
786 * The function panics if the request can not be satisfied.
788 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
789 unsigned long align, unsigned long goal)
791 return alloc_bootmem_core(pgdat->bdata, size, align, goal,
792 ARCH_LOW_ADDRESS_LIMIT);