Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
95f72d1e YL |
2 | #ifndef _LINUX_MEMBLOCK_H |
3 | #define _LINUX_MEMBLOCK_H | |
4 | #ifdef __KERNEL__ | |
5 | ||
6 | /* | |
7 | * Logical memory blocks. | |
8 | * | |
9 | * Copyright (C) 2001 Peter Bergner, IBM Corp. | |
95f72d1e YL |
10 | */ |
11 | ||
12 | #include <linux/init.h> | |
13 | #include <linux/mm.h> | |
57c8a661 MR |
14 | #include <asm/dma.h> |
15 | ||
16 | extern unsigned long max_low_pfn; | |
17 | extern unsigned long min_low_pfn; | |
18 | ||
19 | /* | |
20 | * highest page | |
21 | */ | |
22 | extern unsigned long max_pfn; | |
23 | /* | |
24 | * highest possible page | |
25 | */ | |
26 | extern unsigned long long max_possible_pfn; | |
95f72d1e | 27 | |
9a0de1bf MR |
28 | /** |
29 | * enum memblock_flags - definition of memory region attributes | |
30 | * @MEMBLOCK_NONE: no special request | |
31 | * @MEMBLOCK_HOTPLUG: hotpluggable region | |
32 | * @MEMBLOCK_MIRROR: mirrored region | |
33 | * @MEMBLOCK_NOMAP: don't add to kernel direct mapping | |
34 | */ | |
e1720fee | 35 | enum memblock_flags { |
fc6daaf9 TL |
36 | MEMBLOCK_NONE = 0x0, /* No special request */ |
37 | MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ | |
a3f5bafc | 38 | MEMBLOCK_MIRROR = 0x2, /* mirrored region */ |
bf3d3cc5 | 39 | MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ |
fc6daaf9 | 40 | }; |
66b16edf | 41 | |
9a0de1bf MR |
42 | /** |
43 | * struct memblock_region - represents a memory region | |
8cbd54f5 | 44 | * @base: base address of the region |
9a0de1bf MR |
45 | * @size: size of the region |
46 | * @flags: memory region attributes | |
47 | * @nid: NUMA node id | |
48 | */ | |
e3239ff9 | 49 | struct memblock_region { |
2898cc4c BH |
50 | phys_addr_t base; |
51 | phys_addr_t size; | |
e1720fee | 52 | enum memblock_flags flags; |
3f08a302 | 53 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
7c0caeb8 TH |
54 | int nid; |
55 | #endif | |
95f72d1e YL |
56 | }; |
57 | ||
9a0de1bf MR |
58 | /** |
59 | * struct memblock_type - collection of memory regions of certain type | |
60 | * @cnt: number of regions | |
61 | * @max: size of the allocated array | |
62 | * @total_size: size of all regions | |
63 | * @regions: array of regions | |
64 | * @name: the memory type symbolic name | |
65 | */ | |
e3239ff9 | 66 | struct memblock_type { |
9a0de1bf MR |
67 | unsigned long cnt; |
68 | unsigned long max; | |
69 | phys_addr_t total_size; | |
bf23c51f | 70 | struct memblock_region *regions; |
0262d9c8 | 71 | char *name; |
95f72d1e YL |
72 | }; |
73 | ||
9a0de1bf MR |
74 | /** |
75 | * struct memblock - memblock allocator metadata | |
76 | * @bottom_up: is bottom up direction? | |
77 | * @current_limit: physical address of the current allocation limit | |
8cbd54f5 | 78 | * @memory: usable memory regions |
9a0de1bf | 79 | * @reserved: reserved memory regions |
9a0de1bf | 80 | */ |
95f72d1e | 81 | struct memblock { |
79442ed1 | 82 | bool bottom_up; /* is bottom up direction? */ |
2898cc4c | 83 | phys_addr_t current_limit; |
e3239ff9 BH |
84 | struct memblock_type memory; |
85 | struct memblock_type reserved; | |
95f72d1e YL |
86 | }; |
87 | ||
88 | extern struct memblock memblock; | |
5e63cf43 | 89 | |
350e88ba | 90 | #ifndef CONFIG_ARCH_KEEP_MEMBLOCK |
036fbb21 KS |
91 | #define __init_memblock __meminit |
92 | #define __initdata_memblock __meminitdata | |
3010f876 | 93 | void memblock_discard(void); |
036fbb21 KS |
94 | #else |
95 | #define __init_memblock | |
96 | #define __initdata_memblock | |
350e88ba | 97 | static inline void memblock_discard(void) {} |
036fbb21 KS |
98 | #endif |
99 | ||
fc769a8e TH |
100 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
101 | phys_addr_t size, phys_addr_t align); | |
1aadc056 | 102 | void memblock_allow_resize(void); |
7fb0bc3f | 103 | int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); |
581adcbe TH |
104 | int memblock_add(phys_addr_t base, phys_addr_t size); |
105 | int memblock_remove(phys_addr_t base, phys_addr_t size); | |
106 | int memblock_free(phys_addr_t base, phys_addr_t size); | |
107 | int memblock_reserve(phys_addr_t base, phys_addr_t size); | |
02634a44 AK |
108 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
109 | int memblock_physmem_add(phys_addr_t base, phys_addr_t size); | |
110 | #endif | |
6ede1fd3 | 111 | void memblock_trim_memory(phys_addr_t align); |
95cf82ec TC |
112 | bool memblock_overlaps_region(struct memblock_type *type, |
113 | phys_addr_t base, phys_addr_t size); | |
66b16edf TC |
114 | int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size); |
115 | int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | |
a3f5bafc | 116 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
bf3d3cc5 | 117 | int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); |
4c546b8a | 118 | int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); |
f1af9d3a | 119 | |
097d43d8 | 120 | void memblock_free_all(void); |
57c8a661 MR |
121 | void reset_node_managed_pages(pg_data_t *pgdat); |
122 | void reset_all_zones_managed_pages(void); | |
123 | ||
f1af9d3a | 124 | /* Low level functions */ |
e1720fee | 125 | void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, |
fc6daaf9 | 126 | struct memblock_type *type_a, |
f1af9d3a PH |
127 | struct memblock_type *type_b, phys_addr_t *out_start, |
128 | phys_addr_t *out_end, int *out_nid); | |
129 | ||
e1720fee | 130 | void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, |
fc6daaf9 | 131 | struct memblock_type *type_a, |
f1af9d3a PH |
132 | struct memblock_type *type_b, phys_addr_t *out_start, |
133 | phys_addr_t *out_end, int *out_nid); | |
134 | ||
3010f876 PT |
135 | void __memblock_free_late(phys_addr_t base, phys_addr_t size); |
136 | ||
77649905 DH |
137 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
138 | static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, | |
139 | phys_addr_t *out_start, | |
140 | phys_addr_t *out_end) | |
141 | { | |
142 | extern struct memblock_type physmem; | |
143 | ||
144 | __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, | |
145 | out_start, out_end, NULL); | |
146 | } | |
147 | ||
148 | /** | |
149 | * for_each_physmem_range - iterate through physmem areas not included in type. | |
150 | * @i: u64 used as loop variable | |
151 | * @type: ptr to memblock_type which excludes from the iteration, can be %NULL | |
152 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
153 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
154 | */ | |
155 | #define for_each_physmem_range(i, type, p_start, p_end) \ | |
156 | for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ | |
157 | i != (u64)ULLONG_MAX; \ | |
158 | __next_physmem_range(&i, type, p_start, p_end)) | |
159 | #endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ | |
160 | ||
f1af9d3a | 161 | /** |
6e245ad4 | 162 | * __for_each_mem_range - iterate through memblock areas from type_a and not |
f1af9d3a PH |
163 | * included in type_b. Or just type_a if type_b is NULL. |
164 | * @i: u64 used as loop variable | |
165 | * @type_a: ptr to memblock_type to iterate | |
166 | * @type_b: ptr to memblock_type which excludes from the iteration | |
167 | * @nid: node selector, %NUMA_NO_NODE for all nodes | |
fc6daaf9 | 168 | * @flags: pick from blocks based on memory attributes |
f1af9d3a PH |
169 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
170 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
171 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
172 | */ | |
6e245ad4 | 173 | #define __for_each_mem_range(i, type_a, type_b, nid, flags, \ |
f1af9d3a | 174 | p_start, p_end, p_nid) \ |
fc6daaf9 | 175 | for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
176 | p_start, p_end, p_nid); \ |
177 | i != (u64)ULLONG_MAX; \ | |
fc6daaf9 | 178 | __next_mem_range(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
179 | p_start, p_end, p_nid)) |
180 | ||
181 | /** | |
6e245ad4 | 182 | * __for_each_mem_range_rev - reverse iterate through memblock areas from |
f1af9d3a PH |
183 | * type_a and not included in type_b. Or just type_a if type_b is NULL. |
184 | * @i: u64 used as loop variable | |
185 | * @type_a: ptr to memblock_type to iterate | |
186 | * @type_b: ptr to memblock_type which excludes from the iteration | |
187 | * @nid: node selector, %NUMA_NO_NODE for all nodes | |
fc6daaf9 | 188 | * @flags: pick from blocks based on memory attributes |
f1af9d3a PH |
189 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
190 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
191 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
192 | */ | |
6e245ad4 MR |
193 | #define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
194 | p_start, p_end, p_nid) \ | |
f1af9d3a | 195 | for (i = (u64)ULLONG_MAX, \ |
6e245ad4 | 196 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
ba6c19fd | 197 | p_start, p_end, p_nid); \ |
f1af9d3a | 198 | i != (u64)ULLONG_MAX; \ |
fc6daaf9 | 199 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
f1af9d3a PH |
200 | p_start, p_end, p_nid)) |
201 | ||
6e245ad4 MR |
202 | /** |
203 | * for_each_mem_range - iterate through memory areas. | |
204 | * @i: u64 used as loop variable | |
205 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
206 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
207 | */ | |
208 | #define for_each_mem_range(i, p_start, p_end) \ | |
209 | __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ | |
210 | MEMBLOCK_NONE, p_start, p_end, NULL) | |
211 | ||
212 | /** | |
213 | * for_each_mem_range_rev - reverse iterate through memblock areas from | |
214 | * type_a and not included in type_b. Or just type_a if type_b is NULL. | |
215 | * @i: u64 used as loop variable | |
216 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
217 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
218 | */ | |
219 | #define for_each_mem_range_rev(i, p_start, p_end) \ | |
220 | __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ | |
221 | MEMBLOCK_NONE, p_start, p_end, NULL) | |
222 | ||
8e7a7f86 | 223 | /** |
9f3d5eaa | 224 | * for_each_reserved_mem_range - iterate over all reserved memblock areas |
8e7a7f86 RH |
225 | * @i: u64 used as loop variable |
226 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
227 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
228 | * | |
229 | * Walks over reserved areas of memblock. Available as soon as memblock | |
230 | * is initialized. | |
231 | */ | |
9f3d5eaa MR |
232 | #define for_each_reserved_mem_range(i, p_start, p_end) \ |
233 | __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ | |
234 | MEMBLOCK_NONE, p_start, p_end, NULL) | |
8e7a7f86 | 235 | |
55ac590c TC |
236 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) |
237 | { | |
238 | return m->flags & MEMBLOCK_HOTPLUG; | |
239 | } | |
240 | ||
a3f5bafc TL |
241 | static inline bool memblock_is_mirror(struct memblock_region *m) |
242 | { | |
243 | return m->flags & MEMBLOCK_MIRROR; | |
244 | } | |
245 | ||
bf3d3cc5 AB |
246 | static inline bool memblock_is_nomap(struct memblock_region *m) |
247 | { | |
248 | return m->flags & MEMBLOCK_NOMAP; | |
249 | } | |
250 | ||
e76b63f8 YL |
251 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, |
252 | unsigned long *end_pfn); | |
0ee332c1 TH |
253 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, |
254 | unsigned long *out_end_pfn, int *out_nid); | |
255 | ||
256 | /** | |
257 | * for_each_mem_pfn_range - early memory pfn range iterator | |
258 | * @i: an integer used as loop variable | |
259 | * @nid: node selector, %MAX_NUMNODES for all nodes | |
260 | * @p_start: ptr to ulong for start pfn of the range, can be %NULL | |
261 | * @p_end: ptr to ulong for end pfn of the range, can be %NULL | |
262 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
263 | * | |
f2d52fe5 | 264 | * Walks over configured memory ranges. |
0ee332c1 TH |
265 | */ |
266 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ | |
267 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ | |
268 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) | |
0ee332c1 | 269 | |
837566e7 AD |
270 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
271 | void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, | |
272 | unsigned long *out_spfn, | |
273 | unsigned long *out_epfn); | |
274 | /** | |
909782ad | 275 | * for_each_free_mem_pfn_range_in_zone - iterate through zone specific free |
837566e7 AD |
276 | * memblock areas |
277 | * @i: u64 used as loop variable | |
278 | * @zone: zone in which all of the memory blocks reside | |
279 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
280 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
281 | * | |
282 | * Walks over free (memory && !reserved) areas of memblock in a specific | |
283 | * zone. Available once memblock and an empty zone is initialized. The main | |
284 | * assumption is that the zone start, end, and pgdat have been associated. | |
285 | * This way we can use the zone to determine NUMA node, and if a given part | |
286 | * of the memblock is valid for the zone. | |
287 | */ | |
288 | #define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ | |
289 | for (i = 0, \ | |
290 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ | |
291 | i != U64_MAX; \ | |
292 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) | |
0e56acae AD |
293 | |
294 | /** | |
909782ad | 295 | * for_each_free_mem_pfn_range_in_zone_from - iterate through zone specific |
0e56acae AD |
296 | * free memblock areas from a given point |
297 | * @i: u64 used as loop variable | |
298 | * @zone: zone in which all of the memory blocks reside | |
299 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL | |
300 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
301 | * | |
302 | * Walks over free (memory && !reserved) areas of memblock in a specific | |
303 | * zone, continuing from current position. Available as soon as memblock is | |
304 | * initialized. | |
305 | */ | |
306 | #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ | |
307 | for (; i != U64_MAX; \ | |
308 | __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) | |
ecd09650 DJ |
309 | |
310 | int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); | |
311 | ||
837566e7 AD |
312 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
313 | ||
35fd0808 TH |
314 | /** |
315 | * for_each_free_mem_range - iterate through free memblock areas | |
316 | * @i: u64 used as loop variable | |
b1154233 | 317 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
d30b5545 | 318 | * @flags: pick from blocks based on memory attributes |
35fd0808 TH |
319 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
320 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
321 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
322 | * | |
323 | * Walks over free (memory && !reserved) areas of memblock. Available as | |
324 | * soon as memblock is initialized. | |
325 | */ | |
fc6daaf9 | 326 | #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ |
6e245ad4 MR |
327 | __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
328 | nid, flags, p_start, p_end, p_nid) | |
7bd0b0f0 TH |
329 | |
330 | /** | |
331 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas | |
332 | * @i: u64 used as loop variable | |
b1154233 | 333 | * @nid: node selector, %NUMA_NO_NODE for all nodes |
d30b5545 | 334 | * @flags: pick from blocks based on memory attributes |
7bd0b0f0 TH |
335 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
336 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL | |
337 | * @p_nid: ptr to int for nid of the range, can be %NULL | |
338 | * | |
339 | * Walks over free (memory && !reserved) areas of memblock in reverse | |
340 | * order. Available as soon as memblock is initialized. | |
341 | */ | |
fc6daaf9 TL |
342 | #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ |
343 | p_nid) \ | |
6e245ad4 MR |
344 | __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
345 | nid, flags, p_start, p_end, p_nid) | |
7bd0b0f0 | 346 | |
e7e8de59 TC |
347 | int memblock_set_node(phys_addr_t base, phys_addr_t size, |
348 | struct memblock_type *type, int nid); | |
7c0caeb8 | 349 | |
3f08a302 | 350 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
7c0caeb8 TH |
351 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) |
352 | { | |
353 | r->nid = nid; | |
354 | } | |
355 | ||
356 | static inline int memblock_get_region_node(const struct memblock_region *r) | |
357 | { | |
358 | return r->nid; | |
359 | } | |
360 | #else | |
361 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) | |
362 | { | |
363 | } | |
364 | ||
365 | static inline int memblock_get_region_node(const struct memblock_region *r) | |
366 | { | |
367 | return 0; | |
368 | } | |
3f08a302 | 369 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ |
7c0caeb8 | 370 | |
57c8a661 MR |
371 | /* Flags for memblock allocation APIs */ |
372 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | |
373 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | |
fed84c78 | 374 | #define MEMBLOCK_ALLOC_KASAN 1 |
57c8a661 MR |
375 | |
376 | /* We are using top down, so it is safe to use 0 here */ | |
377 | #define MEMBLOCK_LOW_LIMIT 0 | |
378 | ||
379 | #ifndef ARCH_LOW_ADDRESS_LIMIT | |
380 | #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL | |
381 | #endif | |
382 | ||
8a770c2a MR |
383 | phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, |
384 | phys_addr_t start, phys_addr_t end); | |
8676af1f AB |
385 | phys_addr_t memblock_alloc_range_nid(phys_addr_t size, |
386 | phys_addr_t align, phys_addr_t start, | |
387 | phys_addr_t end, int nid, bool exact_nid); | |
9a8dd708 | 388 | phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); |
9d1e2492 | 389 | |
ecc3e771 MR |
390 | static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, |
391 | phys_addr_t align) | |
392 | { | |
393 | return memblock_phys_alloc_range(size, align, 0, | |
394 | MEMBLOCK_ALLOC_ACCESSIBLE); | |
395 | } | |
e63075a3 | 396 | |
0ac398b1 YY |
397 | void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, |
398 | phys_addr_t min_addr, phys_addr_t max_addr, | |
399 | int nid); | |
57c8a661 MR |
400 | void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, |
401 | phys_addr_t min_addr, phys_addr_t max_addr, | |
402 | int nid); | |
57c8a661 MR |
403 | void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, |
404 | phys_addr_t min_addr, phys_addr_t max_addr, | |
405 | int nid); | |
406 | ||
5bdba520 | 407 | static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) |
57c8a661 MR |
408 | { |
409 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | |
410 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); | |
411 | } | |
412 | ||
5bdba520 | 413 | static inline void *memblock_alloc_raw(phys_addr_t size, |
57c8a661 MR |
414 | phys_addr_t align) |
415 | { | |
416 | return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, | |
417 | MEMBLOCK_ALLOC_ACCESSIBLE, | |
418 | NUMA_NO_NODE); | |
419 | } | |
420 | ||
5bdba520 | 421 | static inline void *memblock_alloc_from(phys_addr_t size, |
57c8a661 MR |
422 | phys_addr_t align, |
423 | phys_addr_t min_addr) | |
424 | { | |
425 | return memblock_alloc_try_nid(size, align, min_addr, | |
426 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); | |
427 | } | |
428 | ||
5bdba520 | 429 | static inline void *memblock_alloc_low(phys_addr_t size, |
57c8a661 MR |
430 | phys_addr_t align) |
431 | { | |
432 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | |
433 | ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); | |
434 | } | |
57c8a661 | 435 | |
5bdba520 | 436 | static inline void *memblock_alloc_node(phys_addr_t size, |
57c8a661 MR |
437 | phys_addr_t align, int nid) |
438 | { | |
439 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | |
440 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); | |
441 | } | |
442 | ||
5bdba520 | 443 | static inline void memblock_free_early(phys_addr_t base, |
57c8a661 MR |
444 | phys_addr_t size) |
445 | { | |
4d72868c | 446 | memblock_free(base, size); |
57c8a661 MR |
447 | } |
448 | ||
5bdba520 | 449 | static inline void memblock_free_early_nid(phys_addr_t base, |
57c8a661 MR |
450 | phys_addr_t size, int nid) |
451 | { | |
4d72868c | 452 | memblock_free(base, size); |
57c8a661 MR |
453 | } |
454 | ||
5bdba520 | 455 | static inline void memblock_free_late(phys_addr_t base, phys_addr_t size) |
57c8a661 MR |
456 | { |
457 | __memblock_free_late(base, size); | |
458 | } | |
459 | ||
79442ed1 TC |
460 | /* |
461 | * Set the allocation direction to bottom-up or top-down. | |
462 | */ | |
a024b7c2 | 463 | static inline __init_memblock void memblock_set_bottom_up(bool enable) |
79442ed1 TC |
464 | { |
465 | memblock.bottom_up = enable; | |
466 | } | |
467 | ||
468 | /* | |
469 | * Check if the allocation direction is bottom-up or not. | |
470 | * if this is true, that said, memblock will allocate memory | |
471 | * in bottom-up direction. | |
472 | */ | |
a024b7c2 | 473 | static inline __init_memblock bool memblock_bottom_up(void) |
79442ed1 TC |
474 | { |
475 | return memblock.bottom_up; | |
476 | } | |
79442ed1 | 477 | |
581adcbe | 478 | phys_addr_t memblock_phys_mem_size(void); |
8907de5d | 479 | phys_addr_t memblock_reserved_size(void); |
581adcbe TH |
480 | phys_addr_t memblock_start_of_DRAM(void); |
481 | phys_addr_t memblock_end_of_DRAM(void); | |
482 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); | |
c9ca9b4e | 483 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); |
a571d4eb | 484 | void memblock_mem_limit_remove_map(phys_addr_t limit); |
b4ad0c7e | 485 | bool memblock_is_memory(phys_addr_t addr); |
937f0c26 YB |
486 | bool memblock_is_map_memory(phys_addr_t addr); |
487 | bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | |
b4ad0c7e | 488 | bool memblock_is_reserved(phys_addr_t addr); |
c5c5c9d1 | 489 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
581adcbe | 490 | |
87c55870 | 491 | void memblock_dump_all(void); |
95f72d1e | 492 | |
e63075a3 BH |
493 | /** |
494 | * memblock_set_current_limit - Set the current allocation limit to allow | |
495 | * limiting allocations to what is currently | |
496 | * accessible during boot | |
497 | * @limit: New limit value (physical address) | |
498 | */ | |
581adcbe | 499 | void memblock_set_current_limit(phys_addr_t limit); |
e63075a3 | 500 | |
35a1f0bd | 501 | |
fec51014 LA |
502 | phys_addr_t memblock_get_current_limit(void); |
503 | ||
5b385f25 BH |
504 | /* |
505 | * pfn conversion functions | |
506 | * | |
507 | * While the memory MEMBLOCKs should always be page aligned, the reserved | |
508 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear | |
509 | * idea of what they return for such non aligned MEMBLOCKs. | |
510 | */ | |
511 | ||
512 | /** | |
47cec443 | 513 | * memblock_region_memory_base_pfn - get the lowest pfn of the memory region |
5b385f25 | 514 | * @reg: memblock_region structure |
47cec443 MR |
515 | * |
516 | * Return: the lowest pfn intersecting with the memory region | |
5b385f25 | 517 | */ |
c7fc2de0 | 518 | static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) |
5b385f25 | 519 | { |
c7fc2de0 | 520 | return PFN_UP(reg->base); |
5b385f25 BH |
521 | } |
522 | ||
523 | /** | |
47cec443 | 524 | * memblock_region_memory_end_pfn - get the end pfn of the memory region |
5b385f25 | 525 | * @reg: memblock_region structure |
47cec443 MR |
526 | * |
527 | * Return: the end_pfn of the reserved region | |
5b385f25 | 528 | */ |
c7fc2de0 | 529 | static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) |
5b385f25 | 530 | { |
c7fc2de0 | 531 | return PFN_DOWN(reg->base + reg->size); |
5b385f25 BH |
532 | } |
533 | ||
534 | /** | |
47cec443 | 535 | * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region |
5b385f25 | 536 | * @reg: memblock_region structure |
47cec443 MR |
537 | * |
538 | * Return: the lowest pfn intersecting with the reserved region | |
5b385f25 | 539 | */ |
c7fc2de0 | 540 | static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) |
5b385f25 | 541 | { |
c7fc2de0 | 542 | return PFN_DOWN(reg->base); |
5b385f25 BH |
543 | } |
544 | ||
545 | /** | |
47cec443 | 546 | * memblock_region_reserved_end_pfn - get the end pfn of the reserved region |
5b385f25 | 547 | * @reg: memblock_region structure |
47cec443 MR |
548 | * |
549 | * Return: the end_pfn of the reserved region | |
5b385f25 | 550 | */ |
c7fc2de0 | 551 | static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) |
5b385f25 | 552 | { |
c7fc2de0 | 553 | return PFN_UP(reg->base + reg->size); |
5b385f25 BH |
554 | } |
555 | ||
cc6de168 MR |
556 | /** |
557 | * for_each_mem_region - itereate over memory regions | |
558 | * @region: loop variable | |
559 | */ | |
560 | #define for_each_mem_region(region) \ | |
561 | for (region = memblock.memory.regions; \ | |
562 | region < (memblock.memory.regions + memblock.memory.cnt); \ | |
563 | region++) | |
564 | ||
565 | /** | |
566 | * for_each_reserved_mem_region - itereate over reserved memory regions | |
567 | * @region: loop variable | |
568 | */ | |
569 | #define for_each_reserved_mem_region(region) \ | |
570 | for (region = memblock.reserved.regions; \ | |
571 | region < (memblock.reserved.regions + memblock.reserved.cnt); \ | |
5b385f25 BH |
572 | region++) |
573 | ||
57c8a661 MR |
574 | extern void *alloc_large_system_hash(const char *tablename, |
575 | unsigned long bucketsize, | |
576 | unsigned long numentries, | |
577 | int scale, | |
578 | int flags, | |
579 | unsigned int *_hash_shift, | |
580 | unsigned int *_hash_mask, | |
581 | unsigned long low_limit, | |
582 | unsigned long high_limit); | |
583 | ||
584 | #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ | |
585 | #define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min | |
586 | * shift passed via *_hash_shift */ | |
587 | #define HASH_ZERO 0x00000004 /* Zero allocated hash table */ | |
588 | ||
589 | /* Only NUMA needs hash distribution. 64bit NUMA architectures have | |
590 | * sufficient vmalloc space. | |
591 | */ | |
592 | #ifdef CONFIG_NUMA | |
593 | #define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) | |
594 | extern int hashdist; /* Distribute hashes across NUMA nodes? */ | |
595 | #else | |
596 | #define hashdist (0) | |
597 | #endif | |
598 | ||
4a20799d | 599 | #ifdef CONFIG_MEMTEST |
7f70baee | 600 | extern void early_memtest(phys_addr_t start, phys_addr_t end); |
4a20799d | 601 | #else |
7f70baee | 602 | static inline void early_memtest(phys_addr_t start, phys_addr_t end) |
4a20799d VM |
603 | { |
604 | } | |
605 | #endif | |
f0b37fad | 606 | |
95f72d1e YL |
607 | #endif /* __KERNEL__ */ |
608 | ||
609 | #endif /* _LINUX_MEMBLOCK_H */ |