Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
95f72d1e YL |
2 | #ifndef _LINUX_MEMBLOCK_H |
3 | #define _LINUX_MEMBLOCK_H | |
95f72d1e YL |
4 | |
5 | /* | |
6 | * Logical memory blocks. | |
7 | * | |
8 | * Copyright (C) 2001 Peter Bergner, IBM Corp. | |
95f72d1e YL |
9 | */ |
10 | ||
11 | #include <linux/init.h> | |
12 | #include <linux/mm.h> | |
57c8a661 MR |
13 | #include <asm/dma.h> |
14 | ||
15 | extern unsigned long max_low_pfn; | |
16 | extern unsigned long min_low_pfn; | |
17 | ||
18 | /* | |
19 | * highest page | |
20 | */ | |
21 | extern unsigned long max_pfn; | |
22 | /* | |
23 | * highest possible page | |
24 | */ | |
25 | extern unsigned long long max_possible_pfn; | |
95f72d1e | 26 | |
9a0de1bf MR |
27 | /** |
28 | * enum memblock_flags - definition of memory region attributes | |
29 | * @MEMBLOCK_NONE: no special request | |
e14b4155 DH |
30 | * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory |
31 | * map during early boot as hot(un)pluggable system RAM (e.g., memory range | |
32 | * that might get hotunplugged later). With "movable_node" set on the kernel | |
33 | * commandline, try keeping this memory region hotunpluggable. Does not apply | |
34 | * to memblocks added ("hotplugged") after early boot. | |
9a0de1bf | 35 | * @MEMBLOCK_MIRROR: mirrored region |
9092d4f7 MR |
36 | * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as |
37 | * reserved in the memory map; refer to memblock_mark_nomap() description | |
38 | * for further details | |
f7892d8e DH |
39 | * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added |
40 | * via a driver, and never indicated in the firmware-provided memory map as | |
41 | * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the | |
42 | * kernel resource tree. | |
9a0de1bf | 43 | */ |
e1720fee | 44 | enum memblock_flags { |
fc6daaf9 TL |
45 | MEMBLOCK_NONE = 0x0, /* No special request */ |
46 | MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ | |
a3f5bafc | 47 | MEMBLOCK_MIRROR = 0x2, /* mirrored region */ |
bf3d3cc5 | 48 | MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ |
f7892d8e | 49 | MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */ |
fc6daaf9 | 50 | }; |
66b16edf | 51 | |
9a0de1bf MR |
52 | /** |
53 | * struct memblock_region - represents a memory region | |
8cbd54f5 | 54 | * @base: base address of the region |
9a0de1bf MR |
55 | * @size: size of the region |
56 | * @flags: memory region attributes | |
57 | * @nid: NUMA node id | |
58 | */ | |
e3239ff9 | 59 | struct memblock_region { |
2898cc4c BH |
60 | phys_addr_t base; |
61 | phys_addr_t size; | |
e1720fee | 62 | enum memblock_flags flags; |
a9ee6cf5 | 63 | #ifdef CONFIG_NUMA |
7c0caeb8 TH |
64 | int nid; |
65 | #endif | |
95f72d1e YL |
66 | }; |
67 | ||
9a0de1bf MR |
68 | /** |
69 | * struct memblock_type - collection of memory regions of certain type | |
70 | * @cnt: number of regions | |
71 | * @max: size of the allocated array | |
72 | * @total_size: size of all regions | |
73 | * @regions: array of regions | |
74 | * @name: the memory type symbolic name | |
75 | */ | |
e3239ff9 | 76 | struct memblock_type { |
9a0de1bf MR |
77 | unsigned long cnt; |
78 | unsigned long max; | |
79 | phys_addr_t total_size; | |
bf23c51f | 80 | struct memblock_region *regions; |
0262d9c8 | 81 | char *name; |
95f72d1e YL |
82 | }; |
83 | ||
9a0de1bf MR |
84 | /** |
85 | * struct memblock - memblock allocator metadata | |
86 | * @bottom_up: is bottom up direction? | |
87 | * @current_limit: physical address of the current allocation limit | |
8cbd54f5 | 88 | * @memory: usable memory regions |
9a0de1bf | 89 | * @reserved: reserved memory regions |
9a0de1bf | 90 | */ |
95f72d1e | 91 | struct memblock { |
79442ed1 | 92 | bool bottom_up; /* is bottom up direction? */ |
2898cc4c | 93 | phys_addr_t current_limit; |
e3239ff9 BH |
94 | struct memblock_type memory; |
95 | struct memblock_type reserved; | |
95f72d1e YL |
96 | }; |
97 | ||
98 | extern struct memblock memblock; | |
5e63cf43 | 99 | |
350e88ba | 100 | #ifndef CONFIG_ARCH_KEEP_MEMBLOCK |
036fbb21 KS |
101 | #define __init_memblock __meminit |
102 | #define __initdata_memblock __meminitdata | |
3010f876 | 103 | void memblock_discard(void); |
036fbb21 KS |
104 | #else |
105 | #define __init_memblock | |
106 | #define __initdata_memblock | |
350e88ba | 107 | static inline void memblock_discard(void) {} |
036fbb21 KS |
108 | #endif |
109 | ||
1aadc056 | 110 | void memblock_allow_resize(void); |
952eea9b DH |
111 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid, |
112 | enum memblock_flags flags); | |
581adcbe TH |
113 | int memblock_add(phys_addr_t base, phys_addr_t size); |
114 | int memblock_remove(phys_addr_t base, phys_addr_t size); | |
3ecc6834 | 115 | int memblock_phys_free(phys_addr_t base, phys_addr_t size); |
581adcbe | 116 | int memblock_reserve(phys_addr_t base, phys_addr_t size); |
02634a44 AK |
117 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
118 | int memblock_physmem_add(phys_addr_t base, phys_addr_t size); | |
119 | #endif | |
6ede1fd3 | 120 | void memblock_trim_memory(phys_addr_t align); |
95cf82ec TC |
121 | bool memblock_overlaps_region(struct memblock_type *type, |
122 | phys_addr_t base, phys_addr_t size); | |
66b16edf TC |
123 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
124 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | |
a3f5bafc | 125 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
bf3d3cc5 | 126 | int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); |
4c546b8a | 127 | int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); |
f1af9d3a | 128 | |
097d43d8 | 129 | void memblock_free_all(void); |
4421cca0 | 130 | void memblock_free(void *ptr, size_t size); |
57c8a661 MR |
131 | void reset_all_zones_managed_pages(void); |
132 | ||
f1af9d3a | 133 | /* Low level functions */ |
e1720fee | 134 | void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, |
fc6daaf9 | 135 | struct memblock_type *type_a, |
f1af9d3a PH |
136 | struct memblock_type *type_b, phys_addr_t *out_start, |
137 | phys_addr_t *out_end, int *out_nid); | |
138 | ||
e1720fee | 139 | void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, |
fc6daaf9 | 140 | struct memblock_type *type_a, |
f1af9d3a PH |
141 | struct memblock_type *type_b, phys_addr_t *out_start, |
142 | phys_addr_t *out_end, int *out_nid); | |
143 | ||
621d9739 | 144 | void memblock_free_late(phys_addr_t base, phys_addr_t size); |
3010f876 | 145 | |
77649905 DH |
146 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
147 | static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, | |
148 | phys_addr_t *out_start, | |
149 | phys_addr_t *out_end) | |
150 | { | |
151 | extern struct memblock_type physmem; | |
152 | ||
153 | __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, | |
154 | out_start, out_end, NULL); | |
155 | } | |
156 | ||
157 | /** | |
158 | * for_each_physmem_range - iterate through physmem areas not included in type. | |
159 | * @i: u64 used as loop variable | |
160 | * @type: ptr to memblock_type which excludes from the iteration, can be %NULL | |
161 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
162 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
163 | */ | |
164 | #define for_each_physmem_range(i, type, p_start, p_end) \ | |
165 | for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ | |
166 | i != (u64)ULLONG_MAX; \ | |
167 | __next_physmem_range(&i, type, p_start, p_end)) | |
168 | #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ | |
169 | ||
f1af9d3a | 170 | /** |
6e245ad4 | 171 | * __for_each_mem_range - iterate through memblock areas from type_a and not |
f1af9d3a PH |
172 | * included in type_b. Or just type_a if type_b is NULL. |
173 | * @i: u64 used as loop variable | |
174 | * @type_a: ptr to memblock_type to iterate | |
175 | * @type_b: ptr to memblock_type which excludes from the iteration | |
176 | * @nid: node selector, %NUMA_NO_NODE for all nodes | |
fc6daaf9 | 177 | * @flags: pick from blocks based on memory attributes |
f1af9d3a PH |
178 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
179 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
180 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
181 | */ | |
6e245ad4 | 182 | #define __for_each_mem_range(i, type_a, type_b, nid, flags, \ |
f1af9d3a | 183 | p_start, p_end, p_nid) \ |
fc6daaf9 | 184 | for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
185 | p_start, p_end, p_nid); \ |
186 | i != (u64)ULLONG_MAX; \ | |
fc6daaf9 | 187 | __next_mem_range(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
188 | p_start, p_end, p_nid)) |
189 | ||
190 | /** | |
6e245ad4 | 191 | * __for_each_mem_range_rev - reverse iterate through memblock areas from |
f1af9d3a PH |
192 | * type_a and not included in type_b. Or just type_a if type_b is NULL. |
193 | * @i: u64 used as loop variable | |
194 | * @type_a: ptr to memblock_type to iterate | |
195 | * @type_b: ptr to memblock_type which excludes from the iteration | |
196 | * @nid: node selector, %NUMA_NO_NODE for all nodes | |
fc6daaf9 | 197 | * @flags: pick from blocks based on memory attributes |
f1af9d3a PH |
198 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
199 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
200 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
201 | */ | |
6e245ad4 MR |
202 | #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
203 | p_start, p_end, p_nid) \ | |
f1af9d3a | 204 | for (i = (u64)ULLONG_MAX, \ |
6e245ad4 | 205 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
ba6c19fd | 206 | p_start, p_end, p_nid); \ |
f1af9d3a | 207 | i != (u64)ULLONG_MAX; \ |
fc6daaf9 | 208 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
209 | p_start, p_end, p_nid)) |
210 | ||
6e245ad4 MR |
211 | /** |
212 | * for_each_mem_range - iterate through memory areas. | |
213 | * @i: u64 used as loop variable | |
214 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
215 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
216 | */ | |
217 | #define for_each_mem_range(i, p_start, p_end) \ | |
218 | __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ | |
f7892d8e DH |
219 | MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \ |
220 | p_start, p_end, NULL) | |
6e245ad4 MR |
221 | |
222 | /** | |
223 | * for_each_mem_range_rev - reverse iterate through memblock areas from | |
224 | * type_a and not included in type_b. Or just type_a if type_b is NULL. | |
225 | * @i: u64 used as loop variable | |
226 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
227 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
228 | */ | |
229 | #define for_each_mem_range_rev(i, p_start, p_end) \ | |
230 | __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ | |
f7892d8e DH |
231 | MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\ |
232 | p_start, p_end, NULL) | |
6e245ad4 | 233 | |
8e7a7f86 | 234 | /** |
9f3d5eaa | 235 | * for_each_reserved_mem_range - iterate over all reserved memblock areas |
8e7a7f86 RH |
236 | * @i: u64 used as loop variable |
237 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
238 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
239 | * | |
240 | * Walks over reserved areas of memblock. Available as soon as memblock | |
241 | * is initialized. | |
242 | */ | |
9f3d5eaa MR |
243 | #define for_each_reserved_mem_range(i, p_start, p_end) \ |
244 | __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ | |
245 | MEMBLOCK_NONE, p_start, p_end, NULL) | |
8e7a7f86 | 246 | |
55ac590c TC |
247 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) |
248 | { | |
249 | return m->flags & MEMBLOCK_HOTPLUG; | |
250 | } | |
251 | ||
a3f5bafc TL |
252 | static inline bool memblock_is_mirror(struct memblock_region *m) |
253 | { | |
254 | return m->flags & MEMBLOCK_MIRROR; | |
255 | } | |
256 | ||
bf3d3cc5 AB |
257 | static inline bool memblock_is_nomap(struct memblock_region *m) |
258 | { | |
259 | return m->flags & MEMBLOCK_NOMAP; | |
260 | } | |
261 | ||
f7892d8e DH |
262 | static inline bool memblock_is_driver_managed(struct memblock_region *m) |
263 | { | |
264 | return m->flags & MEMBLOCK_DRIVER_MANAGED; | |
265 | } | |
266 | ||
e76b63f8 YL |
267 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, |
268 | unsigned long *end_pfn); | |
0ee332c1 TH |
269 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, |
270 | unsigned long *out_end_pfn, int *out_nid); | |
271 | ||
272 | /** | |
273 | * for_each_mem_pfn_range - early memory pfn range iterator | |
274 | * @i: an integer used as loop variable | |
275 | * @nid: node selector, %MAX_NUMNODES for all nodes | |
276 | * @p_start: ptr to ulong for start pfn of the range, can be %NULL | |
277 | * @p_end: ptr to ulong for end pfn of the range, can be %NULL | |
278 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
279 | * | |
f2d52fe5 | 280 | * Walks over configured memory ranges. |
0ee332c1 TH |
281 | */ |
282 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ | |
283 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ | |
284 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) | |
0ee332c1 | 285 | |
837566e7 AD |
286 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
287 | void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, | |
288 | unsigned long *out_spfn, | |
289 | unsigned long *out_epfn); | |
290 | /** | |
909782ad | 291 | * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free |
837566e7 AD |
292 | * memblock areas |
293 | * @i: u64 used as loop variable | |
294 | * @zone: zone in which all of the memory blocks reside | |
295 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
296 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
297 | * | |
298 | * Walks over free (memory && !reserved) areas of memblock in a specific | |
299 | * zone. Available once memblock and an empty zone is initialized. The main | |
300 | * assumption is that the zone start, end, and pgdat have been associated. | |
301 | * This way we can use the zone to determine NUMA node, and if a given part | |
302 | * of the memblock is valid for the zone. | |
303 | */ | |
304 | #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ | |
305 | for (i = 0, \ | |
306 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ | |
307 | i != U64_MAX; \ | |
308 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) | |
0e56acae AD |
309 | |
310 | /** | |
909782ad | 311 | * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific |
0e56acae AD |
312 | * free memblock areas from a given point |
313 | * @i: u64 used as loop variable | |
314 | * @zone: zone in which all of the memory blocks reside | |
315 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
316 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
317 | * | |
318 | * Walks over free (memory && !reserved) areas of memblock in a specific | |
319 | * zone, continuing from current position. Available as soon as memblock is | |
320 | * initialized. | |
321 | */ | |
322 | #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ | |
323 | for (; i != U64_MAX; \ | |
324 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) | |
ecd09650 DJ |
325 | |
326 | int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); | |
327 | ||
837566e7 AD |
328 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
329 | ||
35fd0808 TH |
330 | /** |
331 | * for_each_free_mem_range - iterate through free memblock areas | |
332 | * @i: u64 used as loop variable | |
b1154233 | 333 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
d30b5545 | 334 | * @flags: pick from blocks based on memory attributes |
35fd0808 TH |
335 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
336 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
337 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
338 | * | |
339 | * Walks over free (memory && !reserved) areas of memblock. Available as | |
340 | * soon as memblock is initialized. | |
341 | */ | |
fc6daaf9 | 342 | #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ |
6e245ad4 MR |
343 | __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
344 | nid, flags, p_start, p_end, p_nid) | |
7bd0b0f0 TH |
345 | |
346 | /** | |
347 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas | |
348 | * @i: u64 used as loop variable | |
b1154233 | 349 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
d30b5545 | 350 | * @flags: pick from blocks based on memory attributes |
7bd0b0f0 TH |
351 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
352 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
353 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
354 | * | |
355 | * Walks over free (memory && !reserved) areas of memblock in reverse | |
356 | * order. Available as soon as memblock is initialized. | |
357 | */ | |
fc6daaf9 TL |
358 | #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ |
359 | p_nid) \ | |
6e245ad4 MR |
360 | __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
361 | nid, flags, p_start, p_end, p_nid) | |
7bd0b0f0 | 362 | |
e7e8de59 TC |
363 | int memblock_set_node(phys_addr_t base, phys_addr_t size, |
364 | struct memblock_type *type, int nid); | |
7c0caeb8 | 365 | |
a9ee6cf5 | 366 | #ifdef CONFIG_NUMA |
7c0caeb8 TH |
367 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) |
368 | { | |
369 | r->nid = nid; | |
370 | } | |
371 | ||
372 | static inline int memblock_get_region_node(const struct memblock_region *r) | |
373 | { | |
374 | return r->nid; | |
375 | } | |
376 | #else | |
377 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | |
378 | { | |
379 | } | |
380 | ||
381 | static inline int memblock_get_region_node(const struct memblock_region *r) | |
382 | { | |
383 | return 0; | |
384 | } | |
a9ee6cf5 | 385 | #endif /* CONFIG_NUMA */ |
7c0caeb8 | 386 | |
57c8a661 MR |
387 | /* Flags for memblock allocation APIs */ |
388 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | |
389 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | |
c6975d7c | 390 | #define MEMBLOCK_ALLOC_NOLEAKTRACE 1 |
57c8a661 MR |
391 | |
392 | /* We are using top down, so it is safe to use 0 here */ | |
393 | #define MEMBLOCK_LOW_LIMIT 0 | |
394 | ||
395 | #ifndef ARCH_LOW_ADDRESS_LIMIT | |
396 | #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL | |
397 | #endif | |
398 | ||
8a770c2a MR |
399 | phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, |
400 | phys_addr_t start, phys_addr_t end); | |
8676af1f AB |
401 | phys_addr_t memblock_alloc_range_nid(phys_addr_t size, |
402 | phys_addr_t align, phys_addr_t start, | |
403 | phys_addr_t end, int nid, bool exact_nid); | |
9a8dd708 | 404 | phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); |
9d1e2492 | 405 | |
d7f55471 JL |
406 | static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size, |
407 | phys_addr_t align) | |
ecc3e771 MR |
408 | { |
409 | return memblock_phys_alloc_range(size, align, 0, | |
410 | MEMBLOCK_ALLOC_ACCESSIBLE); | |
411 | } | |
e63075a3 | 412 | |
0ac398b1 YY |
413 | void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, |
414 | phys_addr_t min_addr, phys_addr_t max_addr, | |
415 | int nid); | |
57c8a661 MR |
416 | void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, |
417 | phys_addr_t min_addr, phys_addr_t max_addr, | |
418 | int nid); | |
57c8a661 MR |
419 | void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, |
420 | phys_addr_t min_addr, phys_addr_t max_addr, | |
421 | int nid); | |
422 | ||
5bdba520 | 423 | static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) |
57c8a661 MR |
424 | { |
425 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | |
426 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); | |
427 | } | |
428 | ||
5bdba520 | 429 | static inline void *memblock_alloc_raw(phys_addr_t size, |
57c8a661 MR |
430 | phys_addr_t align) |
431 | { | |
432 | return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, | |
433 | MEMBLOCK_ALLOC_ACCESSIBLE, | |
434 | NUMA_NO_NODE); | |
435 | } | |
436 | ||
5bdba520 | 437 | static inline void *memblock_alloc_from(phys_addr_t size, |
57c8a661 MR |
438 | phys_addr_t align, |
439 | phys_addr_t min_addr) | |
440 | { | |
441 | return memblock_alloc_try_nid(size, align, min_addr, | |
442 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); | |
443 | } | |
444 | ||
5bdba520 | 445 | static inline void *memblock_alloc_low(phys_addr_t size, |
57c8a661 MR |
446 | phys_addr_t align) |
447 | { | |
448 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | |
449 | ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); | |
450 | } | |
57c8a661 | 451 | |
5bdba520 | 452 | static inline void *memblock_alloc_node(phys_addr_t size, |
57c8a661 MR |
453 | phys_addr_t align, int nid) |
454 | { | |
455 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | |
456 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); | |
457 | } | |
458 | ||
79442ed1 TC |
459 | /* |
460 | * Set the allocation direction to bottom-up or top-down. | |
461 | */ | |
a024b7c2 | 462 | static inline __init_memblock void memblock_set_bottom_up(bool enable) |
79442ed1 TC |
463 | { |
464 | memblock.bottom_up = enable; | |
465 | } | |
466 | ||
467 | /* | |
468 | * Check if the allocation direction is bottom-up or not. | |
469 | * if this is true, that said, memblock will allocate memory | |
470 | * in bottom-up direction. | |
471 | */ | |
a024b7c2 | 472 | static inline __init_memblock bool memblock_bottom_up(void) |
79442ed1 TC |
473 | { |
474 | return memblock.bottom_up; | |
475 | } | |
79442ed1 | 476 | |
581adcbe | 477 | phys_addr_t memblock_phys_mem_size(void); |
8907de5d | 478 | phys_addr_t memblock_reserved_size(void); |
581adcbe TH |
479 | phys_addr_t memblock_start_of_DRAM(void); |
480 | phys_addr_t memblock_end_of_DRAM(void); | |
481 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); | |
c9ca9b4e | 482 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); |
a571d4eb | 483 | void memblock_mem_limit_remove_map(phys_addr_t limit); |
b4ad0c7e | 484 | bool memblock_is_memory(phys_addr_t addr); |
937f0c26 YB |
485 | bool memblock_is_map_memory(phys_addr_t addr); |
486 | bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | |
b4ad0c7e | 487 | bool memblock_is_reserved(phys_addr_t addr); |
c5c5c9d1 | 488 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
581adcbe | 489 | |
87c55870 | 490 | void memblock_dump_all(void); |
95f72d1e | 491 | |
e63075a3 BH |
492 | /** |
493 | * memblock_set_current_limit - Set the current allocation limit to allow | |
494 | * limiting allocations to what is currently | |
495 | * accessible during boot | |
496 | * @limit: New limit value (physical address) | |
497 | */ | |
581adcbe | 498 | void memblock_set_current_limit(phys_addr_t limit); |
e63075a3 | 499 | |
35a1f0bd | 500 | |
fec51014 LA |
501 | phys_addr_t memblock_get_current_limit(void); |
502 | ||
5b385f25 BH |
503 | /* |
504 | * pfn conversion functions | |
505 | * | |
506 | * While the memory MEMBLOCKs should always be page aligned, the reserved | |
507 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear | |
508 | * idea of what they return for such non aligned MEMBLOCKs. | |
509 | */ | |
510 | ||
511 | /** | |
47cec443 | 512 | * memblock_region_memory_base_pfn - get the lowest pfn of the memory region |
5b385f25 | 513 | * @reg: memblock_region structure |
47cec443 MR |
514 | * |
515 | * Return: the lowest pfn intersecting with the memory region | |
5b385f25 | 516 | */ |
c7fc2de0 | 517 | static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) |
5b385f25 | 518 | { |
c7fc2de0 | 519 | return PFN_UP(reg->base); |
5b385f25 BH |
520 | } |
521 | ||
522 | /** | |
47cec443 | 523 | * memblock_region_memory_end_pfn - get the end pfn of the memory region |
5b385f25 | 524 | * @reg: memblock_region structure |
47cec443 MR |
525 | * |
526 | * Return: the end_pfn of the reserved region | |
5b385f25 | 527 | */ |
c7fc2de0 | 528 | static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) |
5b385f25 | 529 | { |
c7fc2de0 | 530 | return PFN_DOWN(reg->base + reg->size); |
5b385f25 BH |
531 | } |
532 | ||
533 | /** | |
47cec443 | 534 | * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region |
5b385f25 | 535 | * @reg: memblock_region structure |
47cec443 MR |
536 | * |
537 | * Return: the lowest pfn intersecting with the reserved region | |
5b385f25 | 538 | */ |
c7fc2de0 | 539 | static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) |
5b385f25 | 540 | { |
c7fc2de0 | 541 | return PFN_DOWN(reg->base); |
5b385f25 BH |
542 | } |
543 | ||
544 | /** | |
47cec443 | 545 | * memblock_region_reserved_end_pfn - get the end pfn of the reserved region |
5b385f25 | 546 | * @reg: memblock_region structure |
47cec443 MR |
547 | * |
548 | * Return: the end_pfn of the reserved region | |
5b385f25 | 549 | */ |
c7fc2de0 | 550 | static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) |
5b385f25 | 551 | { |
c7fc2de0 | 552 | return PFN_UP(reg->base + reg->size); |
5b385f25 BH |
553 | } |
554 | ||
cc6de168 MR |
555 | /** |
556 | * for_each_mem_region - itereate over memory regions | |
557 | * @region: loop variable | |
558 | */ | |
559 | #define for_each_mem_region(region) \ | |
560 | for (region = memblock.memory.regions; \ | |
561 | region < (memblock.memory.regions + memblock.memory.cnt); \ | |
562 | region++) | |
563 | ||
564 | /** | |
565 | * for_each_reserved_mem_region - itereate over reserved memory regions | |
566 | * @region: loop variable | |
567 | */ | |
568 | #define for_each_reserved_mem_region(region) \ | |
569 | for (region = memblock.reserved.regions; \ | |
570 | region < (memblock.reserved.regions + memblock.reserved.cnt); \ | |
5b385f25 BH |
571 | region++) |
572 | ||
57c8a661 MR |
573 | extern void *alloc_large_system_hash(const char *tablename, |
574 | unsigned long bucketsize, | |
575 | unsigned long numentries, | |
576 | int scale, | |
577 | int flags, | |
578 | unsigned int *_hash_shift, | |
579 | unsigned int *_hash_mask, | |
580 | unsigned long low_limit, | |
581 | unsigned long high_limit); | |
582 | ||
583 | #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ | |
584 | #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min | |
585 | * shift passed via *_hash_shift */ | |
586 | #define HASH_ZERO 0x00000004 /* Zero allocated hash table */ | |
587 | ||
588 | /* Only NUMA needs hash distribution. 64bit NUMA architectures have | |
589 | * sufficient vmalloc space. | |
590 | */ | |
591 | #ifdef CONFIG_NUMA | |
592 | #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) | |
593 | extern int hashdist; /* Distribute hashes across NUMA nodes? */ | |
594 | #else | |
595 | #define hashdist (0) | |
596 | #endif | |
597 | ||
4a20799d | 598 | #ifdef CONFIG_MEMTEST |
bd23024b TM |
599 | extern phys_addr_t early_memtest_bad_size; /* Size of faulty ram found by memtest */ |
600 | extern bool early_memtest_done; /* Was early memtest done? */ | |
7f70baee | 601 | extern void early_memtest(phys_addr_t start, phys_addr_t end); |
4a20799d | 602 | #else |
7f70baee | 603 | static inline void early_memtest(phys_addr_t start, phys_addr_t end) |
4a20799d VM |
604 | { |
605 | } | |
606 | #endif | |
f0b37fad | 607 | |
95f72d1e YL |
608 | |
609 | #endif /* _LINUX_MEMBLOCK_H */ |