resource: fix the case of null pointer access
[linux-2.6-block.git] / mm / memblock.c
CommitLineData
95f72d1e
YL
1/*
2 * Procedures for maintaining information about logical memory blocks.
3 *
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/kernel.h>
142b45a7 14#include <linux/slab.h>
95f72d1e
YL
15#include <linux/init.h>
16#include <linux/bitops.h>
449e8df3 17#include <linux/poison.h>
c196f76f 18#include <linux/pfn.h>
6d03b885
BH
19#include <linux/debugfs.h>
20#include <linux/seq_file.h>
95f72d1e
YL
21#include <linux/memblock.h>
22
79442ed1 23#include <asm-generic/sections.h>
26f09e9b
SS
24#include <linux/io.h>
25
26#include "internal.h"
79442ed1 27
fe091c20
TH
28static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
29static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
70210ed9
PH
30#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
31static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
32#endif
fe091c20
TH
33
34struct memblock memblock __initdata_memblock = {
35 .memory.regions = memblock_memory_init_regions,
36 .memory.cnt = 1, /* empty dummy entry */
37 .memory.max = INIT_MEMBLOCK_REGIONS,
38
39 .reserved.regions = memblock_reserved_init_regions,
40 .reserved.cnt = 1, /* empty dummy entry */
41 .reserved.max = INIT_MEMBLOCK_REGIONS,
42
70210ed9
PH
43#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
44 .physmem.regions = memblock_physmem_init_regions,
45 .physmem.cnt = 1, /* empty dummy entry */
46 .physmem.max = INIT_PHYSMEM_REGIONS,
47#endif
48
79442ed1 49 .bottom_up = false,
fe091c20
TH
50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
51};
95f72d1e 52
10d06439 53int memblock_debug __initdata_memblock;
55ac590c
TC
54#ifdef CONFIG_MOVABLE_NODE
55bool movable_node_enabled __initdata_memblock = false;
56#endif
1aadc056 57static int memblock_can_resize __initdata_memblock;
181eb394
GS
58static int memblock_memory_in_slab __initdata_memblock = 0;
59static int memblock_reserved_in_slab __initdata_memblock = 0;
95f72d1e 60
142b45a7 61/* inline so we don't get a warning when pr_debug is compiled out */
c2233116
RP
62static __init_memblock const char *
63memblock_type_name(struct memblock_type *type)
142b45a7
BH
64{
65 if (type == &memblock.memory)
66 return "memory";
67 else if (type == &memblock.reserved)
68 return "reserved";
69 else
70 return "unknown";
71}
72
eb18f1b5
TH
73/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
74static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
75{
76 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
77}
78
6ed311b2
BH
79/*
80 * Address comparison utilities
81 */
10d06439 82static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
2898cc4c 83 phys_addr_t base2, phys_addr_t size2)
95f72d1e
YL
84{
85 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
86}
87
2d7d3eb2
HS
88static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
89 phys_addr_t base, phys_addr_t size)
6ed311b2
BH
90{
91 unsigned long i;
92
93 for (i = 0; i < type->cnt; i++) {
94 phys_addr_t rgnbase = type->regions[i].base;
95 phys_addr_t rgnsize = type->regions[i].size;
96 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
97 break;
98 }
99
100 return (i < type->cnt) ? i : -1;
101}
102
79442ed1
TC
103/*
104 * __memblock_find_range_bottom_up - find free area utility in bottom-up
105 * @start: start of candidate range
106 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
107 * @size: size of free area to find
108 * @align: alignment of free area to find
b1154233 109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
79442ed1
TC
110 *
111 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
112 *
113 * RETURNS:
114 * Found address on success, 0 on failure.
115 */
116static phys_addr_t __init_memblock
117__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
118 phys_addr_t size, phys_addr_t align, int nid)
119{
120 phys_addr_t this_start, this_end, cand;
121 u64 i;
122
123 for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) {
124 this_start = clamp(this_start, start, end);
125 this_end = clamp(this_end, start, end);
126
127 cand = round_up(this_start, align);
128 if (cand < this_end && this_end - cand >= size)
129 return cand;
130 }
131
132 return 0;
133}
134
7bd0b0f0 135/**
1402899e 136 * __memblock_find_range_top_down - find free area utility, in top-down
7bd0b0f0
TH
137 * @start: start of candidate range
138 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
139 * @size: size of free area to find
140 * @align: alignment of free area to find
b1154233 141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
7bd0b0f0 142 *
1402899e 143 * Utility called from memblock_find_in_range_node(), find free area top-down.
7bd0b0f0
TH
144 *
145 * RETURNS:
79442ed1 146 * Found address on success, 0 on failure.
6ed311b2 147 */
1402899e
TC
148static phys_addr_t __init_memblock
149__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
150 phys_addr_t size, phys_addr_t align, int nid)
f7210e6c
TC
151{
152 phys_addr_t this_start, this_end, cand;
153 u64 i;
154
f7210e6c
TC
155 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
156 this_start = clamp(this_start, start, end);
157 this_end = clamp(this_end, start, end);
158
159 if (this_end < size)
160 continue;
161
162 cand = round_down(this_end - size, align);
163 if (cand >= this_start)
164 return cand;
165 }
1402899e 166
f7210e6c
TC
167 return 0;
168}
6ed311b2 169
1402899e
TC
170/**
171 * memblock_find_in_range_node - find free area in given range and node
1402899e
TC
172 * @size: size of free area to find
173 * @align: alignment of free area to find
87029ee9
GS
174 * @start: start of candidate range
175 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
b1154233 176 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1402899e
TC
177 *
178 * Find @size free area aligned to @align in the specified range and node.
179 *
79442ed1
TC
180 * When allocation direction is bottom-up, the @start should be greater
181 * than the end of the kernel image. Otherwise, it will be trimmed. The
182 * reason is that we want the bottom-up allocation just near the kernel
183 * image so it is highly likely that the allocated memory and the kernel
184 * will reside in the same node.
185 *
186 * If bottom-up allocation failed, will try to allocate memory top-down.
187 *
1402899e 188 * RETURNS:
79442ed1 189 * Found address on success, 0 on failure.
1402899e 190 */
87029ee9
GS
191phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
192 phys_addr_t align, phys_addr_t start,
193 phys_addr_t end, int nid)
1402899e 194{
79442ed1
TC
195 int ret;
196 phys_addr_t kernel_end;
197
1402899e
TC
198 /* pump up @end */
199 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
200 end = memblock.current_limit;
201
202 /* avoid allocating the first page */
203 start = max_t(phys_addr_t, start, PAGE_SIZE);
204 end = max(start, end);
79442ed1
TC
205 kernel_end = __pa_symbol(_end);
206
207 /*
208 * try bottom-up allocation only when bottom-up mode
209 * is set and @end is above the kernel image.
210 */
211 if (memblock_bottom_up() && end > kernel_end) {
212 phys_addr_t bottom_up_start;
213
214 /* make sure we will allocate above the kernel */
215 bottom_up_start = max(start, kernel_end);
216
217 /* ok, try bottom-up allocation first */
218 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
219 size, align, nid);
220 if (ret)
221 return ret;
222
223 /*
224 * we always limit bottom-up allocation above the kernel,
225 * but top-down allocation doesn't have the limit, so
226 * retrying top-down allocation may succeed when bottom-up
227 * allocation failed.
228 *
229 * bottom-up allocation is expected to be fail very rarely,
230 * so we use WARN_ONCE() here to see the stack trace if
231 * fail happens.
232 */
233 WARN_ONCE(1, "memblock: bottom-up allocation failed, "
234 "memory hotunplug may be affected\n");
235 }
1402899e
TC
236
237 return __memblock_find_range_top_down(start, end, size, align, nid);
238}
239
7bd0b0f0
TH
240/**
241 * memblock_find_in_range - find free area in given range
242 * @start: start of candidate range
243 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
244 * @size: size of free area to find
245 * @align: alignment of free area to find
246 *
247 * Find @size free area aligned to @align in the specified range.
248 *
249 * RETURNS:
79442ed1 250 * Found address on success, 0 on failure.
fc769a8e 251 */
7bd0b0f0
TH
252phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
253 phys_addr_t end, phys_addr_t size,
254 phys_addr_t align)
6ed311b2 255{
87029ee9 256 return memblock_find_in_range_node(size, align, start, end,
b1154233 257 NUMA_NO_NODE);
6ed311b2
BH
258}
259
10d06439 260static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
95f72d1e 261{
1440c4e2 262 type->total_size -= type->regions[r].size;
7c0caeb8
TH
263 memmove(&type->regions[r], &type->regions[r + 1],
264 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
e3239ff9 265 type->cnt--;
95f72d1e 266
8f7a6605
BH
267 /* Special case for empty arrays */
268 if (type->cnt == 0) {
1440c4e2 269 WARN_ON(type->total_size != 0);
8f7a6605
BH
270 type->cnt = 1;
271 type->regions[0].base = 0;
272 type->regions[0].size = 0;
66a20757 273 type->regions[0].flags = 0;
7c0caeb8 274 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
8f7a6605 275 }
95f72d1e
YL
276}
277
354f17e1
PH
278#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
279
29f67386
YL
280phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
281 phys_addr_t *addr)
282{
283 if (memblock.reserved.regions == memblock_reserved_init_regions)
284 return 0;
285
286 *addr = __pa(memblock.reserved.regions);
287
288 return PAGE_ALIGN(sizeof(struct memblock_region) *
289 memblock.reserved.max);
290}
291
5e270e25
PH
292phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
293 phys_addr_t *addr)
294{
295 if (memblock.memory.regions == memblock_memory_init_regions)
296 return 0;
297
298 *addr = __pa(memblock.memory.regions);
299
300 return PAGE_ALIGN(sizeof(struct memblock_region) *
301 memblock.memory.max);
302}
303
304#endif
305
48c3b583
GP
306/**
307 * memblock_double_array - double the size of the memblock regions array
308 * @type: memblock type of the regions array being doubled
309 * @new_area_start: starting address of memory range to avoid overlap with
310 * @new_area_size: size of memory range to avoid overlap with
311 *
312 * Double the size of the @type regions array. If memblock is being used to
313 * allocate memory for a new reserved regions array and there is a previously
314 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
315 * waiting to be reserved, ensure the memory used by the new array does
316 * not overlap.
317 *
318 * RETURNS:
319 * 0 on success, -1 on failure.
320 */
321static int __init_memblock memblock_double_array(struct memblock_type *type,
322 phys_addr_t new_area_start,
323 phys_addr_t new_area_size)
142b45a7
BH
324{
325 struct memblock_region *new_array, *old_array;
29f67386 326 phys_addr_t old_alloc_size, new_alloc_size;
142b45a7
BH
327 phys_addr_t old_size, new_size, addr;
328 int use_slab = slab_is_available();
181eb394 329 int *in_slab;
142b45a7
BH
330
331 /* We don't allow resizing until we know about the reserved regions
332 * of memory that aren't suitable for allocation
333 */
334 if (!memblock_can_resize)
335 return -1;
336
142b45a7
BH
337 /* Calculate new doubled size */
338 old_size = type->max * sizeof(struct memblock_region);
339 new_size = old_size << 1;
29f67386
YL
340 /*
341 * We need to allocated new one align to PAGE_SIZE,
342 * so we can free them completely later.
343 */
344 old_alloc_size = PAGE_ALIGN(old_size);
345 new_alloc_size = PAGE_ALIGN(new_size);
142b45a7 346
181eb394
GS
347 /* Retrieve the slab flag */
348 if (type == &memblock.memory)
349 in_slab = &memblock_memory_in_slab;
350 else
351 in_slab = &memblock_reserved_in_slab;
352
142b45a7
BH
353 /* Try to find some space for it.
354 *
355 * WARNING: We assume that either slab_is_available() and we use it or
fd07383b
AM
356 * we use MEMBLOCK for allocations. That means that this is unsafe to
357 * use when bootmem is currently active (unless bootmem itself is
358 * implemented on top of MEMBLOCK which isn't the case yet)
142b45a7
BH
359 *
360 * This should however not be an issue for now, as we currently only
fd07383b
AM
361 * call into MEMBLOCK while it's still active, or much later when slab
362 * is active for memory hotplug operations
142b45a7
BH
363 */
364 if (use_slab) {
365 new_array = kmalloc(new_size, GFP_KERNEL);
1f5026a7 366 addr = new_array ? __pa(new_array) : 0;
4e2f0775 367 } else {
48c3b583
GP
368 /* only exclude range when trying to double reserved.regions */
369 if (type != &memblock.reserved)
370 new_area_start = new_area_size = 0;
371
372 addr = memblock_find_in_range(new_area_start + new_area_size,
373 memblock.current_limit,
29f67386 374 new_alloc_size, PAGE_SIZE);
48c3b583
GP
375 if (!addr && new_area_size)
376 addr = memblock_find_in_range(0,
fd07383b
AM
377 min(new_area_start, memblock.current_limit),
378 new_alloc_size, PAGE_SIZE);
48c3b583 379
15674868 380 new_array = addr ? __va(addr) : NULL;
4e2f0775 381 }
1f5026a7 382 if (!addr) {
142b45a7
BH
383 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
384 memblock_type_name(type), type->max, type->max * 2);
385 return -1;
386 }
142b45a7 387
fd07383b
AM
388 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
389 memblock_type_name(type), type->max * 2, (u64)addr,
390 (u64)addr + new_size - 1);
ea9e4376 391
fd07383b
AM
392 /*
393 * Found space, we now need to move the array over before we add the
394 * reserved region since it may be our reserved array itself that is
395 * full.
142b45a7
BH
396 */
397 memcpy(new_array, type->regions, old_size);
398 memset(new_array + type->max, 0, old_size);
399 old_array = type->regions;
400 type->regions = new_array;
401 type->max <<= 1;
402
fd07383b 403 /* Free old array. We needn't free it if the array is the static one */
181eb394
GS
404 if (*in_slab)
405 kfree(old_array);
406 else if (old_array != memblock_memory_init_regions &&
407 old_array != memblock_reserved_init_regions)
29f67386 408 memblock_free(__pa(old_array), old_alloc_size);
142b45a7 409
fd07383b
AM
410 /*
411 * Reserve the new array if that comes from the memblock. Otherwise, we
412 * needn't do it
181eb394
GS
413 */
414 if (!use_slab)
29f67386 415 BUG_ON(memblock_reserve(addr, new_alloc_size));
181eb394
GS
416
417 /* Update slab flag */
418 *in_slab = use_slab;
419
142b45a7
BH
420 return 0;
421}
422
784656f9
TH
423/**
424 * memblock_merge_regions - merge neighboring compatible regions
425 * @type: memblock type to scan
426 *
427 * Scan @type and merge neighboring compatible regions.
428 */
429static void __init_memblock memblock_merge_regions(struct memblock_type *type)
95f72d1e 430{
784656f9 431 int i = 0;
95f72d1e 432
784656f9
TH
433 /* cnt never goes below 1 */
434 while (i < type->cnt - 1) {
435 struct memblock_region *this = &type->regions[i];
436 struct memblock_region *next = &type->regions[i + 1];
95f72d1e 437
7c0caeb8
TH
438 if (this->base + this->size != next->base ||
439 memblock_get_region_node(this) !=
66a20757
TC
440 memblock_get_region_node(next) ||
441 this->flags != next->flags) {
784656f9
TH
442 BUG_ON(this->base + this->size > next->base);
443 i++;
444 continue;
8f7a6605
BH
445 }
446
784656f9 447 this->size += next->size;
c0232ae8
LF
448 /* move forward from next + 1, index of which is i + 2 */
449 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
784656f9 450 type->cnt--;
95f72d1e 451 }
784656f9 452}
95f72d1e 453
784656f9
TH
454/**
455 * memblock_insert_region - insert new memblock region
209ff86d
TC
456 * @type: memblock type to insert into
457 * @idx: index for the insertion point
458 * @base: base address of the new region
459 * @size: size of the new region
460 * @nid: node id of the new region
66a20757 461 * @flags: flags of the new region
784656f9
TH
462 *
463 * Insert new memblock region [@base,@base+@size) into @type at @idx.
464 * @type must already have extra room to accomodate the new region.
465 */
466static void __init_memblock memblock_insert_region(struct memblock_type *type,
467 int idx, phys_addr_t base,
66a20757
TC
468 phys_addr_t size,
469 int nid, unsigned long flags)
784656f9
TH
470{
471 struct memblock_region *rgn = &type->regions[idx];
472
473 BUG_ON(type->cnt >= type->max);
474 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
475 rgn->base = base;
476 rgn->size = size;
66a20757 477 rgn->flags = flags;
7c0caeb8 478 memblock_set_region_node(rgn, nid);
784656f9 479 type->cnt++;
1440c4e2 480 type->total_size += size;
784656f9
TH
481}
482
483/**
f1af9d3a 484 * memblock_add_range - add new memblock region
784656f9
TH
485 * @type: memblock type to add new region into
486 * @base: base address of the new region
487 * @size: size of the new region
7fb0bc3f 488 * @nid: nid of the new region
66a20757 489 * @flags: flags of the new region
784656f9
TH
490 *
491 * Add new memblock region [@base,@base+@size) into @type. The new region
492 * is allowed to overlap with existing ones - overlaps don't affect already
493 * existing regions. @type is guaranteed to be minimal (all neighbouring
494 * compatible regions are merged) after the addition.
495 *
496 * RETURNS:
497 * 0 on success, -errno on failure.
498 */
f1af9d3a 499int __init_memblock memblock_add_range(struct memblock_type *type,
66a20757
TC
500 phys_addr_t base, phys_addr_t size,
501 int nid, unsigned long flags)
784656f9
TH
502{
503 bool insert = false;
eb18f1b5
TH
504 phys_addr_t obase = base;
505 phys_addr_t end = base + memblock_cap_size(base, &size);
784656f9
TH
506 int i, nr_new;
507
b3dc627c
TH
508 if (!size)
509 return 0;
510
784656f9
TH
511 /* special case for empty array */
512 if (type->regions[0].size == 0) {
1440c4e2 513 WARN_ON(type->cnt != 1 || type->total_size);
8f7a6605
BH
514 type->regions[0].base = base;
515 type->regions[0].size = size;
66a20757 516 type->regions[0].flags = flags;
7fb0bc3f 517 memblock_set_region_node(&type->regions[0], nid);
1440c4e2 518 type->total_size = size;
8f7a6605 519 return 0;
95f72d1e 520 }
784656f9
TH
521repeat:
522 /*
523 * The following is executed twice. Once with %false @insert and
524 * then with %true. The first counts the number of regions needed
525 * to accomodate the new area. The second actually inserts them.
142b45a7 526 */
784656f9
TH
527 base = obase;
528 nr_new = 0;
95f72d1e 529
784656f9
TH
530 for (i = 0; i < type->cnt; i++) {
531 struct memblock_region *rgn = &type->regions[i];
532 phys_addr_t rbase = rgn->base;
533 phys_addr_t rend = rbase + rgn->size;
534
535 if (rbase >= end)
95f72d1e 536 break;
784656f9
TH
537 if (rend <= base)
538 continue;
539 /*
540 * @rgn overlaps. If it separates the lower part of new
541 * area, insert that portion.
542 */
543 if (rbase > base) {
544 nr_new++;
545 if (insert)
546 memblock_insert_region(type, i++, base,
66a20757
TC
547 rbase - base, nid,
548 flags);
95f72d1e 549 }
784656f9
TH
550 /* area below @rend is dealt with, forget about it */
551 base = min(rend, end);
95f72d1e 552 }
784656f9
TH
553
554 /* insert the remaining portion */
555 if (base < end) {
556 nr_new++;
557 if (insert)
66a20757
TC
558 memblock_insert_region(type, i, base, end - base,
559 nid, flags);
95f72d1e 560 }
95f72d1e 561
784656f9
TH
562 /*
563 * If this was the first round, resize array and repeat for actual
564 * insertions; otherwise, merge and return.
142b45a7 565 */
784656f9
TH
566 if (!insert) {
567 while (type->cnt + nr_new > type->max)
48c3b583 568 if (memblock_double_array(type, obase, size) < 0)
784656f9
TH
569 return -ENOMEM;
570 insert = true;
571 goto repeat;
572 } else {
573 memblock_merge_regions(type);
574 return 0;
142b45a7 575 }
95f72d1e
YL
576}
577
7fb0bc3f
TH
578int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
579 int nid)
580{
f1af9d3a 581 return memblock_add_range(&memblock.memory, base, size, nid, 0);
7fb0bc3f
TH
582}
583
581adcbe 584int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
95f72d1e 585{
f1af9d3a 586 return memblock_add_range(&memblock.memory, base, size,
66a20757 587 MAX_NUMNODES, 0);
95f72d1e
YL
588}
589
6a9ceb31
TH
590/**
591 * memblock_isolate_range - isolate given range into disjoint memblocks
592 * @type: memblock type to isolate range for
593 * @base: base of range to isolate
594 * @size: size of range to isolate
595 * @start_rgn: out parameter for the start of isolated region
596 * @end_rgn: out parameter for the end of isolated region
597 *
598 * Walk @type and ensure that regions don't cross the boundaries defined by
599 * [@base,@base+@size). Crossing regions are split at the boundaries,
600 * which may create at most two more regions. The index of the first
601 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
602 *
603 * RETURNS:
604 * 0 on success, -errno on failure.
605 */
606static int __init_memblock memblock_isolate_range(struct memblock_type *type,
607 phys_addr_t base, phys_addr_t size,
608 int *start_rgn, int *end_rgn)
609{
eb18f1b5 610 phys_addr_t end = base + memblock_cap_size(base, &size);
6a9ceb31
TH
611 int i;
612
613 *start_rgn = *end_rgn = 0;
614
b3dc627c
TH
615 if (!size)
616 return 0;
617
6a9ceb31
TH
618 /* we'll create at most two more regions */
619 while (type->cnt + 2 > type->max)
48c3b583 620 if (memblock_double_array(type, base, size) < 0)
6a9ceb31
TH
621 return -ENOMEM;
622
623 for (i = 0; i < type->cnt; i++) {
624 struct memblock_region *rgn = &type->regions[i];
625 phys_addr_t rbase = rgn->base;
626 phys_addr_t rend = rbase + rgn->size;
627
628 if (rbase >= end)
629 break;
630 if (rend <= base)
631 continue;
632
633 if (rbase < base) {
634 /*
635 * @rgn intersects from below. Split and continue
636 * to process the next region - the new top half.
637 */
638 rgn->base = base;
1440c4e2
TH
639 rgn->size -= base - rbase;
640 type->total_size -= base - rbase;
6a9ceb31 641 memblock_insert_region(type, i, rbase, base - rbase,
66a20757
TC
642 memblock_get_region_node(rgn),
643 rgn->flags);
6a9ceb31
TH
644 } else if (rend > end) {
645 /*
646 * @rgn intersects from above. Split and redo the
647 * current region - the new bottom half.
648 */
649 rgn->base = end;
1440c4e2
TH
650 rgn->size -= end - rbase;
651 type->total_size -= end - rbase;
6a9ceb31 652 memblock_insert_region(type, i--, rbase, end - rbase,
66a20757
TC
653 memblock_get_region_node(rgn),
654 rgn->flags);
6a9ceb31
TH
655 } else {
656 /* @rgn is fully contained, record it */
657 if (!*end_rgn)
658 *start_rgn = i;
659 *end_rgn = i + 1;
660 }
661 }
662
663 return 0;
664}
6a9ceb31 665
f1af9d3a
PH
666int __init_memblock memblock_remove_range(struct memblock_type *type,
667 phys_addr_t base, phys_addr_t size)
95f72d1e 668{
71936180
TH
669 int start_rgn, end_rgn;
670 int i, ret;
95f72d1e 671
71936180
TH
672 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
673 if (ret)
674 return ret;
95f72d1e 675
71936180
TH
676 for (i = end_rgn - 1; i >= start_rgn; i--)
677 memblock_remove_region(type, i);
8f7a6605 678 return 0;
95f72d1e
YL
679}
680
581adcbe 681int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
95f72d1e 682{
f1af9d3a 683 return memblock_remove_range(&memblock.memory, base, size);
95f72d1e
YL
684}
685
f1af9d3a 686
581adcbe 687int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
95f72d1e 688{
24aa0788 689 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
a150439c 690 (unsigned long long)base,
931d13f5 691 (unsigned long long)base + size - 1,
a150439c 692 (void *)_RET_IP_);
24aa0788 693
aedf95ea 694 kmemleak_free_part(__va(base), size);
f1af9d3a 695 return memblock_remove_range(&memblock.reserved, base, size);
95f72d1e
YL
696}
697
66a20757
TC
698static int __init_memblock memblock_reserve_region(phys_addr_t base,
699 phys_addr_t size,
700 int nid,
701 unsigned long flags)
95f72d1e 702{
e3239ff9 703 struct memblock_type *_rgn = &memblock.reserved;
95f72d1e 704
66a20757 705 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
a150439c 706 (unsigned long long)base,
931d13f5 707 (unsigned long long)base + size - 1,
66a20757
TC
708 flags, (void *)_RET_IP_);
709
f1af9d3a 710 return memblock_add_range(_rgn, base, size, nid, flags);
66a20757 711}
95f72d1e 712
66a20757
TC
713int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
714{
715 return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
95f72d1e
YL
716}
717
66b16edf
TC
718/**
719 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
720 * @base: the base phys addr of the region
721 * @size: the size of the region
722 *
723 * This function isolates region [@base, @base + @size), and mark it with flag
724 * MEMBLOCK_HOTPLUG.
725 *
726 * Return 0 on succees, -errno on failure.
727 */
728int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
729{
730 struct memblock_type *type = &memblock.memory;
731 int i, ret, start_rgn, end_rgn;
732
733 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
734 if (ret)
735 return ret;
736
737 for (i = start_rgn; i < end_rgn; i++)
738 memblock_set_region_flags(&type->regions[i], MEMBLOCK_HOTPLUG);
739
740 memblock_merge_regions(type);
741 return 0;
742}
743
744/**
745 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
746 * @base: the base phys addr of the region
747 * @size: the size of the region
748 *
749 * This function isolates region [@base, @base + @size), and clear flag
750 * MEMBLOCK_HOTPLUG for the isolated regions.
751 *
752 * Return 0 on succees, -errno on failure.
753 */
754int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
755{
756 struct memblock_type *type = &memblock.memory;
757 int i, ret, start_rgn, end_rgn;
758
759 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
760 if (ret)
761 return ret;
762
763 for (i = start_rgn; i < end_rgn; i++)
764 memblock_clear_region_flags(&type->regions[i],
765 MEMBLOCK_HOTPLUG);
766
767 memblock_merge_regions(type);
768 return 0;
769}
770
35fd0808 771/**
f1af9d3a 772 * __next__mem_range - next function for for_each_free_mem_range() etc.
35fd0808 773 * @idx: pointer to u64 loop variable
b1154233 774 * @nid: node selector, %NUMA_NO_NODE for all nodes
f1af9d3a
PH
775 * @type_a: pointer to memblock_type from where the range is taken
776 * @type_b: pointer to memblock_type which excludes memory from being taken
dad7557e
WL
777 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
778 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
779 * @out_nid: ptr to int for nid of the range, can be %NULL
35fd0808 780 *
f1af9d3a 781 * Find the first area from *@idx which matches @nid, fill the out
35fd0808 782 * parameters, and update *@idx for the next iteration. The lower 32bit of
f1af9d3a
PH
783 * *@idx contains index into type_a and the upper 32bit indexes the
784 * areas before each region in type_b. For example, if type_b regions
35fd0808
TH
785 * look like the following,
786 *
787 * 0:[0-16), 1:[32-48), 2:[128-130)
788 *
789 * The upper 32bit indexes the following regions.
790 *
791 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
792 *
793 * As both region arrays are sorted, the function advances the two indices
794 * in lockstep and returns each intersection.
795 */
f1af9d3a
PH
796void __init_memblock __next_mem_range(u64 *idx, int nid,
797 struct memblock_type *type_a,
798 struct memblock_type *type_b,
799 phys_addr_t *out_start,
800 phys_addr_t *out_end, int *out_nid)
35fd0808 801{
f1af9d3a
PH
802 int idx_a = *idx & 0xffffffff;
803 int idx_b = *idx >> 32;
b1154233 804
f1af9d3a
PH
805 if (WARN_ONCE(nid == MAX_NUMNODES,
806 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
560dca27 807 nid = NUMA_NO_NODE;
35fd0808 808
f1af9d3a
PH
809 for (; idx_a < type_a->cnt; idx_a++) {
810 struct memblock_region *m = &type_a->regions[idx_a];
811
35fd0808
TH
812 phys_addr_t m_start = m->base;
813 phys_addr_t m_end = m->base + m->size;
f1af9d3a 814 int m_nid = memblock_get_region_node(m);
35fd0808
TH
815
816 /* only memory regions are associated with nodes, check it */
f1af9d3a 817 if (nid != NUMA_NO_NODE && nid != m_nid)
35fd0808
TH
818 continue;
819
f1af9d3a
PH
820 if (!type_b) {
821 if (out_start)
822 *out_start = m_start;
823 if (out_end)
824 *out_end = m_end;
825 if (out_nid)
826 *out_nid = m_nid;
827 idx_a++;
828 *idx = (u32)idx_a | (u64)idx_b << 32;
829 return;
830 }
831
832 /* scan areas before each reservation */
833 for (; idx_b < type_b->cnt + 1; idx_b++) {
834 struct memblock_region *r;
835 phys_addr_t r_start;
836 phys_addr_t r_end;
837
838 r = &type_b->regions[idx_b];
839 r_start = idx_b ? r[-1].base + r[-1].size : 0;
840 r_end = idx_b < type_b->cnt ?
841 r->base : ULLONG_MAX;
35fd0808 842
f1af9d3a
PH
843 /*
844 * if idx_b advanced past idx_a,
845 * break out to advance idx_a
846 */
35fd0808
TH
847 if (r_start >= m_end)
848 break;
849 /* if the two regions intersect, we're done */
850 if (m_start < r_end) {
851 if (out_start)
f1af9d3a
PH
852 *out_start =
853 max(m_start, r_start);
35fd0808
TH
854 if (out_end)
855 *out_end = min(m_end, r_end);
856 if (out_nid)
f1af9d3a 857 *out_nid = m_nid;
35fd0808 858 /*
f1af9d3a
PH
859 * The region which ends first is
860 * advanced for the next iteration.
35fd0808
TH
861 */
862 if (m_end <= r_end)
f1af9d3a 863 idx_a++;
35fd0808 864 else
f1af9d3a
PH
865 idx_b++;
866 *idx = (u32)idx_a | (u64)idx_b << 32;
35fd0808
TH
867 return;
868 }
869 }
870 }
871
872 /* signal end of iteration */
873 *idx = ULLONG_MAX;
874}
875
7bd0b0f0 876/**
f1af9d3a
PH
877 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
878 *
879 * Finds the next range from type_a which is not marked as unsuitable
880 * in type_b.
881 *
7bd0b0f0 882 * @idx: pointer to u64 loop variable
b1154233 883 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
f1af9d3a
PH
884 * @type_a: pointer to memblock_type from where the range is taken
885 * @type_b: pointer to memblock_type which excludes memory from being taken
dad7557e
WL
886 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
887 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
888 * @out_nid: ptr to int for nid of the range, can be %NULL
7bd0b0f0 889 *
f1af9d3a 890 * Reverse of __next_mem_range().
7bd0b0f0 891 */
f1af9d3a
PH
892void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
893 struct memblock_type *type_a,
894 struct memblock_type *type_b,
895 phys_addr_t *out_start,
896 phys_addr_t *out_end, int *out_nid)
7bd0b0f0 897{
f1af9d3a
PH
898 int idx_a = *idx & 0xffffffff;
899 int idx_b = *idx >> 32;
b1154233 900
560dca27
GS
901 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
902 nid = NUMA_NO_NODE;
7bd0b0f0
TH
903
904 if (*idx == (u64)ULLONG_MAX) {
f1af9d3a
PH
905 idx_a = type_a->cnt - 1;
906 idx_b = type_b->cnt;
7bd0b0f0
TH
907 }
908
f1af9d3a
PH
909 for (; idx_a >= 0; idx_a--) {
910 struct memblock_region *m = &type_a->regions[idx_a];
911
7bd0b0f0
TH
912 phys_addr_t m_start = m->base;
913 phys_addr_t m_end = m->base + m->size;
f1af9d3a 914 int m_nid = memblock_get_region_node(m);
7bd0b0f0
TH
915
916 /* only memory regions are associated with nodes, check it */
f1af9d3a 917 if (nid != NUMA_NO_NODE && nid != m_nid)
7bd0b0f0
TH
918 continue;
919
55ac590c
TC
920 /* skip hotpluggable memory regions if needed */
921 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
922 continue;
923
f1af9d3a
PH
924 if (!type_b) {
925 if (out_start)
926 *out_start = m_start;
927 if (out_end)
928 *out_end = m_end;
929 if (out_nid)
930 *out_nid = m_nid;
931 idx_a++;
932 *idx = (u32)idx_a | (u64)idx_b << 32;
933 return;
934 }
935
936 /* scan areas before each reservation */
937 for (; idx_b >= 0; idx_b--) {
938 struct memblock_region *r;
939 phys_addr_t r_start;
940 phys_addr_t r_end;
941
942 r = &type_b->regions[idx_b];
943 r_start = idx_b ? r[-1].base + r[-1].size : 0;
944 r_end = idx_b < type_b->cnt ?
945 r->base : ULLONG_MAX;
946 /*
947 * if idx_b advanced past idx_a,
948 * break out to advance idx_a
949 */
7bd0b0f0 950
7bd0b0f0
TH
951 if (r_end <= m_start)
952 break;
953 /* if the two regions intersect, we're done */
954 if (m_end > r_start) {
955 if (out_start)
956 *out_start = max(m_start, r_start);
957 if (out_end)
958 *out_end = min(m_end, r_end);
959 if (out_nid)
f1af9d3a 960 *out_nid = m_nid;
7bd0b0f0 961 if (m_start >= r_start)
f1af9d3a 962 idx_a--;
7bd0b0f0 963 else
f1af9d3a
PH
964 idx_b--;
965 *idx = (u32)idx_a | (u64)idx_b << 32;
7bd0b0f0
TH
966 return;
967 }
968 }
969 }
f1af9d3a 970 /* signal end of iteration */
7bd0b0f0
TH
971 *idx = ULLONG_MAX;
972}
973
7c0caeb8
TH
974#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
975/*
976 * Common iterator interface used to define for_each_mem_range().
977 */
978void __init_memblock __next_mem_pfn_range(int *idx, int nid,
979 unsigned long *out_start_pfn,
980 unsigned long *out_end_pfn, int *out_nid)
981{
982 struct memblock_type *type = &memblock.memory;
983 struct memblock_region *r;
984
985 while (++*idx < type->cnt) {
986 r = &type->regions[*idx];
987
988 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
989 continue;
990 if (nid == MAX_NUMNODES || nid == r->nid)
991 break;
992 }
993 if (*idx >= type->cnt) {
994 *idx = -1;
995 return;
996 }
997
998 if (out_start_pfn)
999 *out_start_pfn = PFN_UP(r->base);
1000 if (out_end_pfn)
1001 *out_end_pfn = PFN_DOWN(r->base + r->size);
1002 if (out_nid)
1003 *out_nid = r->nid;
1004}
1005
1006/**
1007 * memblock_set_node - set node ID on memblock regions
1008 * @base: base of area to set node ID for
1009 * @size: size of area to set node ID for
e7e8de59 1010 * @type: memblock type to set node ID for
7c0caeb8
TH
1011 * @nid: node ID to set
1012 *
e7e8de59 1013 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
7c0caeb8
TH
1014 * Regions which cross the area boundaries are split as necessary.
1015 *
1016 * RETURNS:
1017 * 0 on success, -errno on failure.
1018 */
1019int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
e7e8de59 1020 struct memblock_type *type, int nid)
7c0caeb8 1021{
6a9ceb31
TH
1022 int start_rgn, end_rgn;
1023 int i, ret;
7c0caeb8 1024
6a9ceb31
TH
1025 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1026 if (ret)
1027 return ret;
7c0caeb8 1028
6a9ceb31 1029 for (i = start_rgn; i < end_rgn; i++)
e9d24ad3 1030 memblock_set_region_node(&type->regions[i], nid);
7c0caeb8
TH
1031
1032 memblock_merge_regions(type);
1033 return 0;
1034}
1035#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1036
2bfc2862
AM
1037static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1038 phys_addr_t align, phys_addr_t start,
1039 phys_addr_t end, int nid)
95f72d1e 1040{
6ed311b2 1041 phys_addr_t found;
95f72d1e 1042
79f40fab
GS
1043 if (!align)
1044 align = SMP_CACHE_BYTES;
94f3d3af 1045
2bfc2862 1046 found = memblock_find_in_range_node(size, align, start, end, nid);
aedf95ea
CM
1047 if (found && !memblock_reserve(found, size)) {
1048 /*
1049 * The min_count is set to 0 so that memblock allocations are
1050 * never reported as leaks.
1051 */
1052 kmemleak_alloc(__va(found), size, 0, 0);
6ed311b2 1053 return found;
aedf95ea 1054 }
6ed311b2 1055 return 0;
95f72d1e
YL
1056}
1057
2bfc2862
AM
1058phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1059 phys_addr_t start, phys_addr_t end)
1060{
1061 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1062}
1063
1064static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1065 phys_addr_t align, phys_addr_t max_addr,
1066 int nid)
1067{
1068 return memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1069}
1070
7bd0b0f0
TH
1071phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1072{
1073 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1074}
1075
1076phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1077{
b1154233 1078 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE);
7bd0b0f0
TH
1079}
1080
6ed311b2 1081phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
95f72d1e 1082{
6ed311b2
BH
1083 phys_addr_t alloc;
1084
1085 alloc = __memblock_alloc_base(size, align, max_addr);
1086
1087 if (alloc == 0)
1088 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
1089 (unsigned long long) size, (unsigned long long) max_addr);
1090
1091 return alloc;
95f72d1e
YL
1092}
1093
6ed311b2 1094phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
95f72d1e 1095{
6ed311b2
BH
1096 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
1097}
95f72d1e 1098
9d1e2492
BH
1099phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1100{
1101 phys_addr_t res = memblock_alloc_nid(size, align, nid);
1102
1103 if (res)
1104 return res;
15fb0972 1105 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
95f72d1e
YL
1106}
1107
26f09e9b
SS
1108/**
1109 * memblock_virt_alloc_internal - allocate boot memory block
1110 * @size: size of memory block to be allocated in bytes
1111 * @align: alignment of the region and block's size
1112 * @min_addr: the lower bound of the memory region to allocate (phys address)
1113 * @max_addr: the upper bound of the memory region to allocate (phys address)
1114 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1115 *
1116 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1117 * will fall back to memory below @min_addr. Also, allocation may fall back
1118 * to any node in the system if the specified node can not
1119 * hold the requested memory.
1120 *
1121 * The allocation is performed from memory region limited by
1122 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
1123 *
1124 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
1125 *
1126 * The phys address of allocated boot memory block is converted to virtual and
1127 * allocated memory is reset to 0.
1128 *
1129 * In addition, function sets the min_count to 0 using kmemleak_alloc for
1130 * allocated boot memory block, so that it is never reported as leaks.
1131 *
1132 * RETURNS:
1133 * Virtual address of allocated memory block on success, NULL on failure.
1134 */
1135static void * __init memblock_virt_alloc_internal(
1136 phys_addr_t size, phys_addr_t align,
1137 phys_addr_t min_addr, phys_addr_t max_addr,
1138 int nid)
1139{
1140 phys_addr_t alloc;
1141 void *ptr;
1142
560dca27
GS
1143 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1144 nid = NUMA_NO_NODE;
26f09e9b
SS
1145
1146 /*
1147 * Detect any accidental use of these APIs after slab is ready, as at
1148 * this moment memblock may be deinitialized already and its
1149 * internal data may be destroyed (after execution of free_all_bootmem)
1150 */
1151 if (WARN_ON_ONCE(slab_is_available()))
1152 return kzalloc_node(size, GFP_NOWAIT, nid);
1153
1154 if (!align)
1155 align = SMP_CACHE_BYTES;
1156
f544e14f
YL
1157 if (max_addr > memblock.current_limit)
1158 max_addr = memblock.current_limit;
1159
26f09e9b
SS
1160again:
1161 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1162 nid);
1163 if (alloc)
1164 goto done;
1165
1166 if (nid != NUMA_NO_NODE) {
1167 alloc = memblock_find_in_range_node(size, align, min_addr,
1168 max_addr, NUMA_NO_NODE);
1169 if (alloc)
1170 goto done;
1171 }
1172
1173 if (min_addr) {
1174 min_addr = 0;
1175 goto again;
1176 } else {
1177 goto error;
1178 }
1179
1180done:
1181 memblock_reserve(alloc, size);
1182 ptr = phys_to_virt(alloc);
1183 memset(ptr, 0, size);
1184
1185 /*
1186 * The min_count is set to 0 so that bootmem allocated blocks
1187 * are never reported as leaks. This is because many of these blocks
1188 * are only referred via the physical address which is not
1189 * looked up by kmemleak.
1190 */
1191 kmemleak_alloc(ptr, size, 0, 0);
1192
1193 return ptr;
1194
1195error:
1196 return NULL;
1197}
1198
1199/**
1200 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
1201 * @size: size of memory block to be allocated in bytes
1202 * @align: alignment of the region and block's size
1203 * @min_addr: the lower bound of the memory region from where the allocation
1204 * is preferred (phys address)
1205 * @max_addr: the upper bound of the memory region from where the allocation
1206 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1207 * allocate only from memory limited by memblock.current_limit value
1208 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1209 *
1210 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
1211 * additional debug information (including caller info), if enabled.
1212 *
1213 * RETURNS:
1214 * Virtual address of allocated memory block on success, NULL on failure.
1215 */
1216void * __init memblock_virt_alloc_try_nid_nopanic(
1217 phys_addr_t size, phys_addr_t align,
1218 phys_addr_t min_addr, phys_addr_t max_addr,
1219 int nid)
1220{
1221 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1222 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1223 (u64)max_addr, (void *)_RET_IP_);
1224 return memblock_virt_alloc_internal(size, align, min_addr,
1225 max_addr, nid);
1226}
1227
1228/**
1229 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
1230 * @size: size of memory block to be allocated in bytes
1231 * @align: alignment of the region and block's size
1232 * @min_addr: the lower bound of the memory region from where the allocation
1233 * is preferred (phys address)
1234 * @max_addr: the upper bound of the memory region from where the allocation
1235 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
1236 * allocate only from memory limited by memblock.current_limit value
1237 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1238 *
1239 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
1240 * which provides debug information (including caller info), if enabled,
1241 * and panics if the request can not be satisfied.
1242 *
1243 * RETURNS:
1244 * Virtual address of allocated memory block on success, NULL on failure.
1245 */
1246void * __init memblock_virt_alloc_try_nid(
1247 phys_addr_t size, phys_addr_t align,
1248 phys_addr_t min_addr, phys_addr_t max_addr,
1249 int nid)
1250{
1251 void *ptr;
1252
1253 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
1254 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1255 (u64)max_addr, (void *)_RET_IP_);
1256 ptr = memblock_virt_alloc_internal(size, align,
1257 min_addr, max_addr, nid);
1258 if (ptr)
1259 return ptr;
1260
1261 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
1262 __func__, (u64)size, (u64)align, nid, (u64)min_addr,
1263 (u64)max_addr);
1264 return NULL;
1265}
1266
1267/**
1268 * __memblock_free_early - free boot memory block
1269 * @base: phys starting address of the boot memory block
1270 * @size: size of the boot memory block in bytes
1271 *
1272 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
1273 * The freeing memory will not be released to the buddy allocator.
1274 */
1275void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
1276{
1277 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1278 __func__, (u64)base, (u64)base + size - 1,
1279 (void *)_RET_IP_);
1280 kmemleak_free_part(__va(base), size);
f1af9d3a 1281 memblock_remove_range(&memblock.reserved, base, size);
26f09e9b
SS
1282}
1283
1284/*
1285 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1286 * @addr: phys starting address of the boot memory block
1287 * @size: size of the boot memory block in bytes
1288 *
1289 * This is only useful when the bootmem allocator has already been torn
1290 * down, but we are still initializing the system. Pages are released directly
1291 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1292 */
1293void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1294{
1295 u64 cursor, end;
1296
1297 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
1298 __func__, (u64)base, (u64)base + size - 1,
1299 (void *)_RET_IP_);
1300 kmemleak_free_part(__va(base), size);
1301 cursor = PFN_UP(base);
1302 end = PFN_DOWN(base + size);
1303
1304 for (; cursor < end; cursor++) {
1305 __free_pages_bootmem(pfn_to_page(cursor), 0);
1306 totalram_pages++;
1307 }
1308}
9d1e2492
BH
1309
1310/*
1311 * Remaining API functions
1312 */
1313
2898cc4c 1314phys_addr_t __init memblock_phys_mem_size(void)
95f72d1e 1315{
1440c4e2 1316 return memblock.memory.total_size;
95f72d1e
YL
1317}
1318
595ad9af
YL
1319phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1320{
1321 unsigned long pages = 0;
1322 struct memblock_region *r;
1323 unsigned long start_pfn, end_pfn;
1324
1325 for_each_memblock(memory, r) {
1326 start_pfn = memblock_region_memory_base_pfn(r);
1327 end_pfn = memblock_region_memory_end_pfn(r);
1328 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1329 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1330 pages += end_pfn - start_pfn;
1331 }
1332
16763230 1333 return PFN_PHYS(pages);
595ad9af
YL
1334}
1335
0a93ebef
SR
1336/* lowest address */
1337phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1338{
1339 return memblock.memory.regions[0].base;
1340}
1341
10d06439 1342phys_addr_t __init_memblock memblock_end_of_DRAM(void)
95f72d1e
YL
1343{
1344 int idx = memblock.memory.cnt - 1;
1345
e3239ff9 1346 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
95f72d1e
YL
1347}
1348
c0ce8fef 1349void __init memblock_enforce_memory_limit(phys_addr_t limit)
95f72d1e 1350{
c0ce8fef 1351 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
136199f0 1352 struct memblock_region *r;
95f72d1e 1353
c0ce8fef 1354 if (!limit)
95f72d1e
YL
1355 return;
1356
c0ce8fef 1357 /* find out max address */
136199f0 1358 for_each_memblock(memory, r) {
c0ce8fef
TH
1359 if (limit <= r->size) {
1360 max_addr = r->base + limit;
1361 break;
95f72d1e 1362 }
c0ce8fef 1363 limit -= r->size;
95f72d1e 1364 }
c0ce8fef
TH
1365
1366 /* truncate both memory and reserved regions */
f1af9d3a
PH
1367 memblock_remove_range(&memblock.memory, max_addr,
1368 (phys_addr_t)ULLONG_MAX);
1369 memblock_remove_range(&memblock.reserved, max_addr,
1370 (phys_addr_t)ULLONG_MAX);
95f72d1e
YL
1371}
1372
cd79481d 1373static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
72d4b0b4
BH
1374{
1375 unsigned int left = 0, right = type->cnt;
1376
1377 do {
1378 unsigned int mid = (right + left) / 2;
1379
1380 if (addr < type->regions[mid].base)
1381 right = mid;
1382 else if (addr >= (type->regions[mid].base +
1383 type->regions[mid].size))
1384 left = mid + 1;
1385 else
1386 return mid;
1387 } while (left < right);
1388 return -1;
1389}
1390
2898cc4c 1391int __init memblock_is_reserved(phys_addr_t addr)
95f72d1e 1392{
72d4b0b4
BH
1393 return memblock_search(&memblock.reserved, addr) != -1;
1394}
95f72d1e 1395
3661ca66 1396int __init_memblock memblock_is_memory(phys_addr_t addr)
72d4b0b4
BH
1397{
1398 return memblock_search(&memblock.memory, addr) != -1;
1399}
1400
e76b63f8
YL
1401#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1402int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1403 unsigned long *start_pfn, unsigned long *end_pfn)
1404{
1405 struct memblock_type *type = &memblock.memory;
16763230 1406 int mid = memblock_search(type, PFN_PHYS(pfn));
e76b63f8
YL
1407
1408 if (mid == -1)
1409 return -1;
1410
f7e2f7e8
FF
1411 *start_pfn = PFN_DOWN(type->regions[mid].base);
1412 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
e76b63f8
YL
1413
1414 return type->regions[mid].nid;
1415}
1416#endif
1417
eab30949
SB
1418/**
1419 * memblock_is_region_memory - check if a region is a subset of memory
1420 * @base: base of region to check
1421 * @size: size of region to check
1422 *
1423 * Check if the region [@base, @base+@size) is a subset of a memory block.
1424 *
1425 * RETURNS:
1426 * 0 if false, non-zero if true
1427 */
3661ca66 1428int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
72d4b0b4 1429{
abb65272 1430 int idx = memblock_search(&memblock.memory, base);
eb18f1b5 1431 phys_addr_t end = base + memblock_cap_size(base, &size);
72d4b0b4
BH
1432
1433 if (idx == -1)
1434 return 0;
abb65272
TV
1435 return memblock.memory.regions[idx].base <= base &&
1436 (memblock.memory.regions[idx].base +
eb18f1b5 1437 memblock.memory.regions[idx].size) >= end;
95f72d1e
YL
1438}
1439
eab30949
SB
1440/**
1441 * memblock_is_region_reserved - check if a region intersects reserved memory
1442 * @base: base of region to check
1443 * @size: size of region to check
1444 *
1445 * Check if the region [@base, @base+@size) intersects a reserved memory block.
1446 *
1447 * RETURNS:
1448 * 0 if false, non-zero if true
1449 */
10d06439 1450int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
95f72d1e 1451{
eb18f1b5 1452 memblock_cap_size(base, &size);
f1c2c19c 1453 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
95f72d1e
YL
1454}
1455
6ede1fd3
YL
1456void __init_memblock memblock_trim_memory(phys_addr_t align)
1457{
6ede1fd3 1458 phys_addr_t start, end, orig_start, orig_end;
136199f0 1459 struct memblock_region *r;
6ede1fd3 1460
136199f0
EM
1461 for_each_memblock(memory, r) {
1462 orig_start = r->base;
1463 orig_end = r->base + r->size;
6ede1fd3
YL
1464 start = round_up(orig_start, align);
1465 end = round_down(orig_end, align);
1466
1467 if (start == orig_start && end == orig_end)
1468 continue;
1469
1470 if (start < end) {
136199f0
EM
1471 r->base = start;
1472 r->size = end - start;
6ede1fd3 1473 } else {
136199f0
EM
1474 memblock_remove_region(&memblock.memory,
1475 r - memblock.memory.regions);
1476 r--;
6ede1fd3
YL
1477 }
1478 }
1479}
e63075a3 1480
3661ca66 1481void __init_memblock memblock_set_current_limit(phys_addr_t limit)
e63075a3
BH
1482{
1483 memblock.current_limit = limit;
1484}
1485
fec51014
LA
1486phys_addr_t __init_memblock memblock_get_current_limit(void)
1487{
1488 return memblock.current_limit;
1489}
1490
7c0caeb8 1491static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
6ed311b2
BH
1492{
1493 unsigned long long base, size;
66a20757 1494 unsigned long flags;
6ed311b2
BH
1495 int i;
1496
7c0caeb8 1497 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
6ed311b2 1498
7c0caeb8
TH
1499 for (i = 0; i < type->cnt; i++) {
1500 struct memblock_region *rgn = &type->regions[i];
1501 char nid_buf[32] = "";
1502
1503 base = rgn->base;
1504 size = rgn->size;
66a20757 1505 flags = rgn->flags;
7c0caeb8
TH
1506#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1507 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1508 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1509 memblock_get_region_node(rgn));
1510#endif
66a20757
TC
1511 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
1512 name, i, base, base + size - 1, size, nid_buf, flags);
6ed311b2
BH
1513 }
1514}
1515
4ff7b82f 1516void __init_memblock __memblock_dump_all(void)
6ed311b2 1517{
6ed311b2 1518 pr_info("MEMBLOCK configuration:\n");
1440c4e2
TH
1519 pr_info(" memory size = %#llx reserved size = %#llx\n",
1520 (unsigned long long)memblock.memory.total_size,
1521 (unsigned long long)memblock.reserved.total_size);
6ed311b2
BH
1522
1523 memblock_dump(&memblock.memory, "memory");
1524 memblock_dump(&memblock.reserved, "reserved");
1525}
1526
1aadc056 1527void __init memblock_allow_resize(void)
6ed311b2 1528{
142b45a7 1529 memblock_can_resize = 1;
6ed311b2
BH
1530}
1531
6ed311b2
BH
1532static int __init early_memblock(char *p)
1533{
1534 if (p && strstr(p, "debug"))
1535 memblock_debug = 1;
1536 return 0;
1537}
1538early_param("memblock", early_memblock);
1539
c378ddd5 1540#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
6d03b885
BH
1541
1542static int memblock_debug_show(struct seq_file *m, void *private)
1543{
1544 struct memblock_type *type = m->private;
1545 struct memblock_region *reg;
1546 int i;
1547
1548 for (i = 0; i < type->cnt; i++) {
1549 reg = &type->regions[i];
1550 seq_printf(m, "%4d: ", i);
1551 if (sizeof(phys_addr_t) == 4)
1552 seq_printf(m, "0x%08lx..0x%08lx\n",
1553 (unsigned long)reg->base,
1554 (unsigned long)(reg->base + reg->size - 1));
1555 else
1556 seq_printf(m, "0x%016llx..0x%016llx\n",
1557 (unsigned long long)reg->base,
1558 (unsigned long long)(reg->base + reg->size - 1));
1559
1560 }
1561 return 0;
1562}
1563
1564static int memblock_debug_open(struct inode *inode, struct file *file)
1565{
1566 return single_open(file, memblock_debug_show, inode->i_private);
1567}
1568
1569static const struct file_operations memblock_debug_fops = {
1570 .open = memblock_debug_open,
1571 .read = seq_read,
1572 .llseek = seq_lseek,
1573 .release = single_release,
1574};
1575
1576static int __init memblock_init_debugfs(void)
1577{
1578 struct dentry *root = debugfs_create_dir("memblock", NULL);
1579 if (!root)
1580 return -ENXIO;
1581 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1582 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
70210ed9
PH
1583#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1584 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
1585#endif
6d03b885
BH
1586
1587 return 0;
1588}
1589__initcall(memblock_init_debugfs);
1590
1591#endif /* CONFIG_DEBUG_FS */