Commit | Line | Data |
---|---|---|
95f72d1e YL |
1 | #ifndef _LINUX_MEMBLOCK_H |
2 | #define _LINUX_MEMBLOCK_H | |
3 | #ifdef __KERNEL__ | |
4 | ||
5 | /* | |
6 | * Logical memory blocks. | |
7 | * | |
8 | * Copyright (C) 2001 Peter Bergner, IBM Corp. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include <linux/init.h> | |
17 | #include <linux/mm.h> | |
18 | ||
411a25a8 BH |
19 | #include <asm/memblock.h> |
20 | ||
bf23c51f | 21 | #define INIT_MEMBLOCK_REGIONS 128 |
95f72d1e | 22 | |
e3239ff9 | 23 | struct memblock_region { |
2898cc4c BH |
24 | phys_addr_t base; |
25 | phys_addr_t size; | |
95f72d1e YL |
26 | }; |
27 | ||
e3239ff9 | 28 | struct memblock_type { |
bf23c51f BH |
29 | unsigned long cnt; /* number of regions */ |
30 | unsigned long max; /* size of the allocated array */ | |
31 | struct memblock_region *regions; | |
95f72d1e YL |
32 | }; |
33 | ||
34 | struct memblock { | |
2898cc4c | 35 | phys_addr_t current_limit; |
4734b594 | 36 | phys_addr_t memory_size; /* Updated by memblock_analyze() */ |
e3239ff9 BH |
37 | struct memblock_type memory; |
38 | struct memblock_type reserved; | |
95f72d1e YL |
39 | }; |
40 | ||
41 | extern struct memblock memblock; | |
42 | ||
43 | extern void __init memblock_init(void); | |
44 | extern void __init memblock_analyze(void); | |
2898cc4c BH |
45 | extern long memblock_add(phys_addr_t base, phys_addr_t size); |
46 | extern long memblock_remove(phys_addr_t base, phys_addr_t size); | |
47 | extern long __init memblock_free(phys_addr_t base, phys_addr_t size); | |
48 | extern long __init memblock_reserve(phys_addr_t base, phys_addr_t size); | |
e63075a3 | 49 | |
c196f76f BH |
50 | /* The numa aware allocator is only available if |
51 | * CONFIG_ARCH_POPULATES_NODE_MAP is set | |
52 | */ | |
2898cc4c BH |
53 | extern phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); |
54 | extern phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align); | |
e63075a3 BH |
55 | |
56 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ | |
2898cc4c | 57 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
e63075a3 BH |
58 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
59 | ||
2898cc4c | 60 | extern phys_addr_t __init memblock_alloc_base(phys_addr_t size, |
9d3c30f5 BH |
61 | phys_addr_t align, |
62 | phys_addr_t max_addr); | |
2898cc4c | 63 | extern phys_addr_t __init __memblock_alloc_base(phys_addr_t size, |
9d3c30f5 BH |
64 | phys_addr_t align, |
65 | phys_addr_t max_addr); | |
2898cc4c BH |
66 | extern phys_addr_t __init memblock_phys_mem_size(void); |
67 | extern phys_addr_t memblock_end_of_DRAM(void); | |
68 | extern void __init memblock_enforce_memory_limit(phys_addr_t memory_limit); | |
69 | extern int memblock_is_memory(phys_addr_t addr); | |
70 | extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | |
71 | extern int __init memblock_is_reserved(phys_addr_t addr); | |
72 | extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | |
95f72d1e YL |
73 | |
74 | extern void memblock_dump_all(void); | |
75 | ||
35a1f0bd | 76 | /* Provided by the architecture */ |
2898cc4c | 77 | extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); |
d2cd563b BH |
78 | extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, |
79 | phys_addr_t addr2, phys_addr_t size2); | |
35a1f0bd | 80 | |
e63075a3 BH |
81 | /** |
82 | * memblock_set_current_limit - Set the current allocation limit to allow | |
83 | * limiting allocations to what is currently | |
84 | * accessible during boot | |
85 | * @limit: New limit value (physical address) | |
86 | */ | |
2898cc4c | 87 | extern void memblock_set_current_limit(phys_addr_t limit); |
e63075a3 | 88 | |
35a1f0bd | 89 | |
5b385f25 BH |
90 | /* |
91 | * pfn conversion functions | |
92 | * | |
93 | * While the memory MEMBLOCKs should always be page aligned, the reserved | |
94 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear | |
95 | * idea of what they return for such non aligned MEMBLOCKs. | |
96 | */ | |
97 | ||
98 | /** | |
99 | * memblock_region_base_pfn - Return the lowest pfn intersecting with the region | |
100 | * @reg: memblock_region structure | |
101 | */ | |
102 | static inline unsigned long memblock_region_base_pfn(const struct memblock_region *reg) | |
103 | { | |
104 | return reg->base >> PAGE_SHIFT; | |
105 | } | |
106 | ||
107 | /** | |
108 | * memblock_region_last_pfn - Return the highest pfn intersecting with the region | |
109 | * @reg: memblock_region structure | |
110 | */ | |
111 | static inline unsigned long memblock_region_last_pfn(const struct memblock_region *reg) | |
112 | { | |
113 | return (reg->base + reg->size - 1) >> PAGE_SHIFT; | |
114 | } | |
115 | ||
116 | /** | |
117 | * memblock_region_end_pfn - Return the pfn of the first page following the region | |
118 | * but not intersecting it | |
119 | * @reg: memblock_region structure | |
120 | */ | |
121 | static inline unsigned long memblock_region_end_pfn(const struct memblock_region *reg) | |
122 | { | |
123 | return memblock_region_last_pfn(reg) + 1; | |
124 | } | |
125 | ||
126 | /** | |
127 | * memblock_region_pages - Return the number of pages covering a region | |
128 | * @reg: memblock_region structure | |
129 | */ | |
130 | static inline unsigned long memblock_region_pages(const struct memblock_region *reg) | |
131 | { | |
132 | return memblock_region_end_pfn(reg) - memblock_region_end_pfn(reg); | |
133 | } | |
134 | ||
135 | #define for_each_memblock(memblock_type, region) \ | |
136 | for (region = memblock.memblock_type.regions; \ | |
137 | region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ | |
138 | region++) | |
139 | ||
140 | ||
95f72d1e YL |
141 | #endif /* __KERNEL__ */ |
142 | ||
143 | #endif /* _LINUX_MEMBLOCK_H */ |