bootmem: clean up free_all_bootmem_core
[linux-2.6-block.git] / mm / bootmem.c
CommitLineData
1da177e4 1/*
57cfc29e 2 * bootmem - A boot-time physical memory allocator and configurator
1da177e4
LT
3 *
4 * Copyright (C) 1999 Ingo Molnar
57cfc29e
JW
5 * 1999 Kanoj Sarcar, SGI
6 * 2008 Johannes Weiner
1da177e4 7 *
57cfc29e
JW
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
1da177e4 10 */
1da177e4 11#include <linux/init.h>
bbc7b92e 12#include <linux/pfn.h>
1da177e4 13#include <linux/bootmem.h>
1da177e4 14#include <linux/module.h>
e786e86a
FBH
15
16#include <asm/bug.h>
1da177e4 17#include <asm/io.h>
dfd54cbc 18#include <asm/processor.h>
e786e86a 19
1da177e4
LT
20#include "internal.h"
21
1da177e4
LT
22unsigned long max_low_pfn;
23unsigned long min_low_pfn;
24unsigned long max_pfn;
25
92aa63a5
VG
26#ifdef CONFIG_CRASH_DUMP
27/*
28 * If we have booted due to a crash, max_pfn will be a very low value. We need
29 * to know the amount of memory that the previous kernel used.
30 */
31unsigned long saved_max_pfn;
32#endif
33
b61bfa3c
JW
34bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
35
636cc40c
JW
36static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
37
2e5237da
JW
38static int bootmem_debug;
39
40static int __init bootmem_debug_setup(char *buf)
41{
42 bootmem_debug = 1;
43 return 0;
44}
45early_param("bootmem_debug", bootmem_debug_setup);
46
47#define bdebug(fmt, args...) ({ \
48 if (unlikely(bootmem_debug)) \
49 printk(KERN_INFO \
50 "bootmem::%s " fmt, \
51 __FUNCTION__, ## args); \
52})
53
df049a5f 54static unsigned long __init bootmap_bytes(unsigned long pages)
223e8dc9 55{
df049a5f 56 unsigned long bytes = (pages + 7) / 8;
223e8dc9 57
df049a5f 58 return ALIGN(bytes, sizeof(long));
223e8dc9
JW
59}
60
a66fd7da
JW
61/**
62 * bootmem_bootmap_pages - calculate bitmap size in pages
63 * @pages: number of pages the bitmap has to represent
64 */
f71bf0ca 65unsigned long __init bootmem_bootmap_pages(unsigned long pages)
1da177e4 66{
df049a5f 67 unsigned long bytes = bootmap_bytes(pages);
1da177e4 68
df049a5f 69 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
1da177e4 70}
f71bf0ca 71
679bc9fb
KH
72/*
73 * link bdata in order
74 */
69d49e68 75static void __init link_bootmem(bootmem_data_t *bdata)
679bc9fb 76{
636cc40c 77 struct list_head *iter;
f71bf0ca 78
636cc40c
JW
79 list_for_each(iter, &bdata_list) {
80 bootmem_data_t *ent;
81
82 ent = list_entry(iter, bootmem_data_t, list);
83 if (bdata->node_boot_start < ent->node_boot_start)
84 break;
679bc9fb 85 }
636cc40c 86 list_add_tail(&bdata->list, iter);
679bc9fb
KH
87}
88
1da177e4
LT
89/*
90 * Called once to set up the allocator itself.
91 */
8ae04463 92static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
1da177e4
LT
93 unsigned long mapstart, unsigned long start, unsigned long end)
94{
bbc7b92e 95 unsigned long mapsize;
1da177e4 96
2dbb51c4 97 mminit_validate_memmodel_limits(&start, &end);
bbc7b92e
FBH
98 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
99 bdata->node_boot_start = PFN_PHYS(start);
1da177e4 100 bdata->node_low_pfn = end;
679bc9fb 101 link_bootmem(bdata);
1da177e4
LT
102
103 /*
104 * Initially all pages are reserved - setup_arch() has to
105 * register free RAM areas explicitly.
106 */
df049a5f 107 mapsize = bootmap_bytes(end - start);
1da177e4
LT
108 memset(bdata->node_bootmem_map, 0xff, mapsize);
109
2e5237da
JW
110 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
111 bdata - bootmem_node_data, start, mapstart, end, mapsize);
112
1da177e4
LT
113 return mapsize;
114}
115
a66fd7da
JW
116/**
117 * init_bootmem_node - register a node as boot memory
118 * @pgdat: node to register
119 * @freepfn: pfn where the bitmap for this node is to be placed
120 * @startpfn: first pfn on the node
121 * @endpfn: first pfn after the node
122 *
123 * Returns the number of bytes needed to hold the bitmap for this node.
124 */
223e8dc9
JW
125unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
126 unsigned long startpfn, unsigned long endpfn)
127{
128 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
129}
130
a66fd7da
JW
131/**
132 * init_bootmem - register boot memory
133 * @start: pfn where the bitmap is to be placed
134 * @pages: number of available physical pages
135 *
136 * Returns the number of bytes needed to hold the bitmap.
137 */
223e8dc9
JW
138unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
139{
140 max_low_pfn = pages;
141 min_low_pfn = start;
142 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
143}
144
145static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
146{
41546c17 147 int aligned;
223e8dc9 148 struct page *page;
41546c17
JW
149 unsigned long start, end, pages, count = 0;
150
151 if (!bdata->node_bootmem_map)
152 return 0;
153
154 start = PFN_DOWN(bdata->node_boot_start);
155 end = bdata->node_low_pfn;
156
223e8dc9 157 /*
41546c17
JW
158 * If the start is aligned to the machines wordsize, we might
159 * be able to free pages in bulks of that order.
223e8dc9 160 */
41546c17 161 aligned = !(start & (BITS_PER_LONG - 1));
223e8dc9 162
41546c17
JW
163 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
164 bdata - bootmem_node_data, start, end, aligned);
223e8dc9 165
41546c17
JW
166 while (start < end) {
167 unsigned long *map, idx, vec;
223e8dc9 168
41546c17
JW
169 map = bdata->node_bootmem_map;
170 idx = start - PFN_DOWN(bdata->node_boot_start);
171 vec = ~map[idx / BITS_PER_LONG];
172
173 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
174 int order = ilog2(BITS_PER_LONG);
175
176 __free_pages_bootmem(pfn_to_page(start), order);
223e8dc9 177 count += BITS_PER_LONG;
41546c17
JW
178 } else {
179 unsigned long off = 0;
180
181 while (vec && off < BITS_PER_LONG) {
182 if (vec & 1) {
183 page = pfn_to_page(start + off);
223e8dc9 184 __free_pages_bootmem(page, 0);
41546c17 185 count++;
223e8dc9 186 }
41546c17
JW
187 vec >>= 1;
188 off++;
223e8dc9 189 }
223e8dc9 190 }
41546c17 191 start += BITS_PER_LONG;
223e8dc9
JW
192 }
193
223e8dc9 194 page = virt_to_page(bdata->node_bootmem_map);
df049a5f 195 pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
41546c17
JW
196 pages = bootmem_bootmap_pages(pages);
197 count += pages;
198 while (pages--)
199 __free_pages_bootmem(page++, 0);
223e8dc9 200
2e5237da
JW
201 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
202
223e8dc9
JW
203 return count;
204}
205
a66fd7da
JW
206/**
207 * free_all_bootmem_node - release a node's free pages to the buddy allocator
208 * @pgdat: node to be released
209 *
210 * Returns the number of pages actually released.
211 */
223e8dc9
JW
212unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
213{
214 register_page_bootmem_info_node(pgdat);
215 return free_all_bootmem_core(pgdat->bdata);
216}
217
a66fd7da
JW
218/**
219 * free_all_bootmem - release free pages to the buddy allocator
220 *
221 * Returns the number of pages actually released.
222 */
223e8dc9
JW
223unsigned long __init free_all_bootmem(void)
224{
225 return free_all_bootmem_core(NODE_DATA(0)->bdata);
226}
227
228static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
229 unsigned long size)
230{
231 unsigned long sidx, eidx;
232 unsigned long i;
233
234 BUG_ON(!size);
235
236 /* out range */
237 if (addr + size < bdata->node_boot_start ||
238 PFN_DOWN(addr) > bdata->node_low_pfn)
239 return;
240 /*
241 * round down end of usable mem, partially free pages are
242 * considered reserved.
243 */
244
245 if (addr >= bdata->node_boot_start && addr < bdata->last_success)
246 bdata->last_success = addr;
247
248 /*
249 * Round up to index to the range.
250 */
251 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
252 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
253 else
254 sidx = 0;
255
256 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
257 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
258 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
259
2e5237da
JW
260 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
261 sidx + PFN_DOWN(bdata->node_boot_start),
262 eidx + PFN_DOWN(bdata->node_boot_start));
263
223e8dc9
JW
264 for (i = sidx; i < eidx; i++) {
265 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
266 BUG();
267 }
268}
269
a66fd7da
JW
270/**
271 * free_bootmem_node - mark a page range as usable
272 * @pgdat: node the range resides on
273 * @physaddr: starting address of the range
274 * @size: size of the range in bytes
275 *
276 * Partial pages will be considered reserved and left as they are.
277 *
278 * Only physical pages that actually reside on @pgdat are marked.
279 */
223e8dc9
JW
280void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
281 unsigned long size)
282{
283 free_bootmem_core(pgdat->bdata, physaddr, size);
284}
285
a66fd7da
JW
286/**
287 * free_bootmem - mark a page range as usable
288 * @addr: starting address of the range
289 * @size: size of the range in bytes
290 *
291 * Partial pages will be considered reserved and left as they are.
292 *
293 * All physical pages within the range are marked, no matter what
294 * node they reside on.
295 */
223e8dc9
JW
296void __init free_bootmem(unsigned long addr, unsigned long size)
297{
298 bootmem_data_t *bdata;
299 list_for_each_entry(bdata, &bdata_list, list)
300 free_bootmem_core(bdata, addr, size);
301}
302
1da177e4
LT
303/*
304 * Marks a particular physical memory range as unallocatable. Usable RAM
305 * might be used for boot-time allocations - or it might get added
306 * to the free page pool later on.
307 */
a5645a61 308static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
72a7fe39 309 unsigned long addr, unsigned long size, int flags)
1da177e4 310{
bbc7b92e 311 unsigned long sidx, eidx;
1da177e4 312 unsigned long i;
a5645a61
YL
313
314 BUG_ON(!size);
315
316 /* out of range, don't hold other */
317 if (addr + size < bdata->node_boot_start ||
318 PFN_DOWN(addr) > bdata->node_low_pfn)
319 return 0;
bbc7b92e 320
1da177e4 321 /*
a5645a61 322 * Round up to index to the range.
1da177e4 323 */
a5645a61
YL
324 if (addr > bdata->node_boot_start)
325 sidx= PFN_DOWN(addr - bdata->node_boot_start);
326 else
327 sidx = 0;
328
329 eidx = PFN_UP(addr + size - bdata->node_boot_start);
330 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
331 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
332
333 for (i = sidx; i < eidx; i++) {
334 if (test_bit(i, bdata->node_bootmem_map)) {
335 if (flags & BOOTMEM_EXCLUSIVE)
336 return -EBUSY;
337 }
338 }
339
340 return 0;
341
342}
343
344static void __init reserve_bootmem_core(bootmem_data_t *bdata,
345 unsigned long addr, unsigned long size, int flags)
346{
347 unsigned long sidx, eidx;
348 unsigned long i;
349
1da177e4 350 BUG_ON(!size);
bbc7b92e 351
a5645a61
YL
352 /* out of range */
353 if (addr + size < bdata->node_boot_start ||
354 PFN_DOWN(addr) > bdata->node_low_pfn)
355 return;
356
357 /*
358 * Round up to index to the range.
359 */
360 if (addr > bdata->node_boot_start)
361 sidx= PFN_DOWN(addr - bdata->node_boot_start);
362 else
363 sidx = 0;
364
bbc7b92e 365 eidx = PFN_UP(addr + size - bdata->node_boot_start);
a5645a61
YL
366 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
367 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
1da177e4 368
2e5237da
JW
369 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
370 bdata - bootmem_node_data,
371 sidx + PFN_DOWN(bdata->node_boot_start),
372 eidx + PFN_DOWN(bdata->node_boot_start),
373 flags);
374
375 for (i = sidx; i < eidx; i++)
376 if (test_and_set_bit(i, bdata->node_bootmem_map))
377 bdebug("hm, page %lx reserved twice.\n",
378 PFN_DOWN(bdata->node_boot_start) + i);
1da177e4
LT
379}
380
a66fd7da
JW
381/**
382 * reserve_bootmem_node - mark a page range as reserved
383 * @pgdat: node the range resides on
384 * @physaddr: starting address of the range
385 * @size: size of the range in bytes
386 * @flags: reservation flags (see linux/bootmem.h)
387 *
388 * Partial pages will be reserved.
389 *
390 * Only physical pages that actually reside on @pgdat are marked.
391 */
223e8dc9
JW
392int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
393 unsigned long size, int flags)
1da177e4 394{
223e8dc9 395 int ret;
1da177e4 396
223e8dc9
JW
397 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
398 if (ret < 0)
399 return -ENOMEM;
400 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
401 return 0;
402}
5a982cbc 403
223e8dc9 404#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
a66fd7da
JW
405/**
406 * reserve_bootmem - mark a page range as usable
407 * @addr: starting address of the range
408 * @size: size of the range in bytes
409 * @flags: reservation flags (see linux/bootmem.h)
410 *
411 * Partial pages will be reserved.
412 *
413 * All physical pages within the range are marked, no matter what
414 * node they reside on.
415 */
223e8dc9
JW
416int __init reserve_bootmem(unsigned long addr, unsigned long size,
417 int flags)
418{
419 bootmem_data_t *bdata;
420 int ret;
1da177e4 421
223e8dc9
JW
422 list_for_each_entry(bdata, &bdata_list, list) {
423 ret = can_reserve_bootmem_core(bdata, addr, size, flags);
424 if (ret < 0)
425 return ret;
1da177e4 426 }
223e8dc9
JW
427 list_for_each_entry(bdata, &bdata_list, list)
428 reserve_bootmem_core(bdata, addr, size, flags);
429
430 return 0;
1da177e4 431}
223e8dc9 432#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
1da177e4
LT
433
434/*
435 * We 'merge' subsequent allocations to save space. We might 'lose'
436 * some fraction of a page if allocations cannot be satisfied due to
437 * size constraints on boxes where there is physical RAM space
438 * fragmentation - in these cases (mostly large memory boxes) this
439 * is not a problem.
440 *
441 * On low memory boxes we get it right in 100% of the cases.
442 *
443 * alignment has to be a power of 2 value.
444 *
445 * NOTE: This function is _not_ reentrant.
446 */
ffc6421f
JW
447static void * __init
448alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
449 unsigned long align, unsigned long goal, unsigned long limit)
1da177e4 450{
9a2dc04c 451 unsigned long areasize, preferred;
bbc7b92e 452 unsigned long i, start = 0, incr, eidx, end_pfn;
1da177e4 453 void *ret;
9a2dc04c
YL
454 unsigned long node_boot_start;
455 void *node_bootmem_map;
1da177e4 456
f71bf0ca 457 if (!size) {
ffc6421f 458 printk("alloc_bootmem_core(): zero-sized request\n");
1da177e4
LT
459 BUG();
460 }
461 BUG_ON(align & (align-1));
462
7c309a64
CK
463 /* on nodes without memory - bootmem_map is NULL */
464 if (!bdata->node_bootmem_map)
465 return NULL;
466
2e5237da
JW
467 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
468 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
469 align, goal, limit);
470
9a2dc04c
YL
471 /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
472 node_boot_start = bdata->node_boot_start;
473 node_bootmem_map = bdata->node_bootmem_map;
474 if (align) {
475 node_boot_start = ALIGN(bdata->node_boot_start, align);
476 if (node_boot_start > bdata->node_boot_start)
477 node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
478 PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
479 }
480
481 if (limit && node_boot_start >= limit)
482 return NULL;
483
bbc7b92e
FBH
484 end_pfn = bdata->node_low_pfn;
485 limit = PFN_DOWN(limit);
281dd25c
YG
486 if (limit && end_pfn > limit)
487 end_pfn = limit;
488
9a2dc04c 489 eidx = end_pfn - PFN_DOWN(node_boot_start);
1da177e4
LT
490
491 /*
492 * We try to allocate bootmem pages above 'goal'
493 * first, then we try to allocate lower pages.
494 */
ad09315c
YL
495 preferred = 0;
496 if (goal && PFN_DOWN(goal) < end_pfn) {
9a2dc04c
YL
497 if (goal > node_boot_start)
498 preferred = goal - node_boot_start;
1da177e4 499
9a2dc04c
YL
500 if (bdata->last_success > node_boot_start &&
501 bdata->last_success - node_boot_start >= preferred)
281dd25c 502 if (!limit || (limit && limit > bdata->last_success))
9a2dc04c 503 preferred = bdata->last_success - node_boot_start;
ad09315c 504 }
1da177e4 505
9a2dc04c 506 preferred = PFN_DOWN(ALIGN(preferred, align));
bbc7b92e 507 areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
1da177e4
LT
508 incr = align >> PAGE_SHIFT ? : 1;
509
510restart_scan:
ad09315c 511 for (i = preferred; i < eidx;) {
1da177e4 512 unsigned long j;
ad09315c 513
9a2dc04c 514 i = find_next_zero_bit(node_bootmem_map, eidx, i);
1da177e4 515 i = ALIGN(i, incr);
66d43e98
HM
516 if (i >= eidx)
517 break;
9a2dc04c 518 if (test_bit(i, node_bootmem_map)) {
ad09315c 519 i += incr;
1da177e4 520 continue;
ad09315c 521 }
1da177e4
LT
522 for (j = i + 1; j < i + areasize; ++j) {
523 if (j >= eidx)
524 goto fail_block;
9a2dc04c 525 if (test_bit(j, node_bootmem_map))
1da177e4
LT
526 goto fail_block;
527 }
528 start = i;
529 goto found;
530 fail_block:
531 i = ALIGN(j, incr);
ad09315c
YL
532 if (i == j)
533 i += incr;
1da177e4
LT
534 }
535
9a2dc04c
YL
536 if (preferred > 0) {
537 preferred = 0;
1da177e4
LT
538 goto restart_scan;
539 }
540 return NULL;
541
542found:
9a2dc04c 543 bdata->last_success = PFN_PHYS(start) + node_boot_start;
1da177e4
LT
544 BUG_ON(start >= eidx);
545
546 /*
547 * Is the next page of the previous allocation-end the start
548 * of this allocation's buffer? If yes then we can 'merge'
549 * the previous partial page with this allocation.
550 */
551 if (align < PAGE_SIZE &&
552 bdata->last_offset && bdata->last_pos+1 == start) {
9a2dc04c 553 unsigned long offset, remaining_size;
8c0e33c1 554 offset = ALIGN(bdata->last_offset, align);
1da177e4 555 BUG_ON(offset > PAGE_SIZE);
f71bf0ca 556 remaining_size = PAGE_SIZE - offset;
1da177e4
LT
557 if (size < remaining_size) {
558 areasize = 0;
559 /* last_pos unchanged */
f71bf0ca
FBH
560 bdata->last_offset = offset + size;
561 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
9a2dc04c 562 offset + node_boot_start);
1da177e4
LT
563 } else {
564 remaining_size = size - remaining_size;
f71bf0ca
FBH
565 areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
566 ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
9a2dc04c 567 offset + node_boot_start);
f71bf0ca 568 bdata->last_pos = start + areasize - 1;
1da177e4
LT
569 bdata->last_offset = remaining_size;
570 }
571 bdata->last_offset &= ~PAGE_MASK;
572 } else {
573 bdata->last_pos = start + areasize - 1;
574 bdata->last_offset = size & ~PAGE_MASK;
9a2dc04c 575 ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
1da177e4
LT
576 }
577
2e5237da
JW
578 bdebug("nid=%td start=%lx end=%lx\n",
579 bdata - bootmem_node_data,
580 start + PFN_DOWN(bdata->node_boot_start),
581 start + areasize + PFN_DOWN(bdata->node_boot_start));
582
1da177e4
LT
583 /*
584 * Reserve the area now:
585 */
f71bf0ca 586 for (i = start; i < start + areasize; i++)
9a2dc04c 587 if (unlikely(test_and_set_bit(i, node_bootmem_map)))
1da177e4
LT
588 BUG();
589 memset(ret, 0, size);
590 return ret;
591}
592
a66fd7da
JW
593/**
594 * __alloc_bootmem_nopanic - allocate boot memory without panicking
595 * @size: size of the request in bytes
596 * @align: alignment of the region
597 * @goal: preferred starting address of the region
598 *
599 * The goal is dropped if it can not be satisfied and the allocation will
600 * fall back to memory below @goal.
601 *
602 * Allocation may happen on any node in the system.
603 *
604 * Returns NULL on failure.
605 */
bb0923a6
FBH
606void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
607 unsigned long goal)
1da177e4 608{
679bc9fb 609 bootmem_data_t *bdata;
1da177e4
LT
610 void *ptr;
611
f71bf0ca 612 list_for_each_entry(bdata, &bdata_list, list) {
ffc6421f 613 ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
f71bf0ca
FBH
614 if (ptr)
615 return ptr;
616 }
a8062231
AK
617 return NULL;
618}
1da177e4 619
a66fd7da
JW
620/**
621 * __alloc_bootmem - allocate boot memory
622 * @size: size of the request in bytes
623 * @align: alignment of the region
624 * @goal: preferred starting address of the region
625 *
626 * The goal is dropped if it can not be satisfied and the allocation will
627 * fall back to memory below @goal.
628 *
629 * Allocation may happen on any node in the system.
630 *
631 * The function panics if the request can not be satisfied.
632 */
bb0923a6
FBH
633void * __init __alloc_bootmem(unsigned long size, unsigned long align,
634 unsigned long goal)
a8062231
AK
635{
636 void *mem = __alloc_bootmem_nopanic(size,align,goal);
f71bf0ca 637
a8062231
AK
638 if (mem)
639 return mem;
1da177e4
LT
640 /*
641 * Whoops, we cannot satisfy the allocation request.
642 */
643 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
644 panic("Out of memory");
645 return NULL;
646}
647
a66fd7da
JW
648/**
649 * __alloc_bootmem_node - allocate boot memory from a specific node
650 * @pgdat: node to allocate from
651 * @size: size of the request in bytes
652 * @align: alignment of the region
653 * @goal: preferred starting address of the region
654 *
655 * The goal is dropped if it can not be satisfied and the allocation will
656 * fall back to memory below @goal.
657 *
658 * Allocation may fall back to any node in the system if the specified node
659 * can not hold the requested memory.
660 *
661 * The function panics if the request can not be satisfied.
662 */
bb0923a6
FBH
663void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
664 unsigned long align, unsigned long goal)
1da177e4
LT
665{
666 void *ptr;
667
ffc6421f 668 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
1da177e4 669 if (ptr)
f71bf0ca 670 return ptr;
1da177e4 671
008857c1 672 return __alloc_bootmem(size, align, goal);
1da177e4
LT
673}
674
e70260aa 675#ifdef CONFIG_SPARSEMEM
a66fd7da
JW
676/**
677 * alloc_bootmem_section - allocate boot memory from a specific section
678 * @size: size of the request in bytes
679 * @section_nr: sparse map section to allocate from
680 *
681 * Return NULL on failure.
682 */
e70260aa
YG
683void * __init alloc_bootmem_section(unsigned long size,
684 unsigned long section_nr)
685{
686 void *ptr;
687 unsigned long limit, goal, start_nr, end_nr, pfn;
688 struct pglist_data *pgdat;
689
690 pfn = section_nr_to_pfn(section_nr);
691 goal = PFN_PHYS(pfn);
692 limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
693 pgdat = NODE_DATA(early_pfn_to_nid(pfn));
ffc6421f
JW
694 ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
695 limit);
e70260aa
YG
696
697 if (!ptr)
698 return NULL;
699
700 start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
701 end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
702 if (start_nr != section_nr || end_nr != section_nr) {
703 printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
704 section_nr);
705 free_bootmem_core(pgdat->bdata, __pa(ptr), size);
706 ptr = NULL;
707 }
708
709 return ptr;
710}
711#endif
712
b54bbf7b
AK
713void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
714 unsigned long align, unsigned long goal)
715{
716 void *ptr;
717
718 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
719 if (ptr)
720 return ptr;
721
722 return __alloc_bootmem_nopanic(size, align, goal);
723}
724
dfd54cbc
HC
725#ifndef ARCH_LOW_ADDRESS_LIMIT
726#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
727#endif
008857c1 728
a66fd7da
JW
729/**
730 * __alloc_bootmem_low - allocate low boot memory
731 * @size: size of the request in bytes
732 * @align: alignment of the region
733 * @goal: preferred starting address of the region
734 *
735 * The goal is dropped if it can not be satisfied and the allocation will
736 * fall back to memory below @goal.
737 *
738 * Allocation may happen on any node in the system.
739 *
740 * The function panics if the request can not be satisfied.
741 */
bb0923a6
FBH
742void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
743 unsigned long goal)
008857c1 744{
679bc9fb 745 bootmem_data_t *bdata;
008857c1
RT
746 void *ptr;
747
f71bf0ca 748 list_for_each_entry(bdata, &bdata_list, list) {
ffc6421f
JW
749 ptr = alloc_bootmem_core(bdata, size, align, goal,
750 ARCH_LOW_ADDRESS_LIMIT);
f71bf0ca
FBH
751 if (ptr)
752 return ptr;
753 }
008857c1
RT
754
755 /*
756 * Whoops, we cannot satisfy the allocation request.
757 */
758 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
759 panic("Out of low memory");
760 return NULL;
761}
762
a66fd7da
JW
763/**
764 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
765 * @pgdat: node to allocate from
766 * @size: size of the request in bytes
767 * @align: alignment of the region
768 * @goal: preferred starting address of the region
769 *
770 * The goal is dropped if it can not be satisfied and the allocation will
771 * fall back to memory below @goal.
772 *
773 * Allocation may fall back to any node in the system if the specified node
774 * can not hold the requested memory.
775 *
776 * The function panics if the request can not be satisfied.
777 */
008857c1
RT
778void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
779 unsigned long align, unsigned long goal)
780{
ffc6421f
JW
781 return alloc_bootmem_core(pgdat->bdata, size, align, goal,
782 ARCH_LOW_ADDRESS_LIMIT);
008857c1 783}