Commit | Line | Data |
---|---|---|
f4eb07c1 | 1 | /* |
f4eb07c1 HC |
2 | * Copyright IBM Corp. 2006 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | |
4 | */ | |
5 | ||
6 | #include <linux/bootmem.h> | |
7 | #include <linux/pfn.h> | |
8 | #include <linux/mm.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/list.h> | |
53492b1d | 11 | #include <linux/hugetlb.h> |
5a0e3ad6 | 12 | #include <linux/slab.h> |
f4eb07c1 HC |
13 | #include <asm/pgalloc.h> |
14 | #include <asm/pgtable.h> | |
15 | #include <asm/setup.h> | |
16 | #include <asm/tlbflush.h> | |
53492b1d | 17 | #include <asm/sections.h> |
f4eb07c1 | 18 | |
f4eb07c1 HC |
19 | static DEFINE_MUTEX(vmem_mutex); |
20 | ||
21 | struct memory_segment { | |
22 | struct list_head list; | |
23 | unsigned long start; | |
24 | unsigned long size; | |
25 | }; | |
26 | ||
27 | static LIST_HEAD(mem_segs); | |
28 | ||
67060d9c HC |
29 | static void __ref *vmem_alloc_pages(unsigned int order) |
30 | { | |
31 | if (slab_is_available()) | |
32 | return (void *)__get_free_pages(GFP_KERNEL, order); | |
33 | return alloc_bootmem_pages((1 << order) * PAGE_SIZE); | |
34 | } | |
35 | ||
36 | static inline pud_t *vmem_pud_alloc(void) | |
5a216a20 MS |
37 | { |
38 | pud_t *pud = NULL; | |
39 | ||
40 | #ifdef CONFIG_64BIT | |
67060d9c | 41 | pud = vmem_alloc_pages(2); |
5a216a20 MS |
42 | if (!pud) |
43 | return NULL; | |
8fc63658 | 44 | clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4); |
5a216a20 MS |
45 | #endif |
46 | return pud; | |
47 | } | |
190a1d72 | 48 | |
67060d9c | 49 | static inline pmd_t *vmem_pmd_alloc(void) |
f4eb07c1 | 50 | { |
3610cce8 | 51 | pmd_t *pmd = NULL; |
f4eb07c1 | 52 | |
3610cce8 | 53 | #ifdef CONFIG_64BIT |
67060d9c | 54 | pmd = vmem_alloc_pages(2); |
f4eb07c1 HC |
55 | if (!pmd) |
56 | return NULL; | |
8fc63658 | 57 | clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4); |
3610cce8 | 58 | #endif |
f4eb07c1 HC |
59 | return pmd; |
60 | } | |
61 | ||
e5992f2e | 62 | static pte_t __ref *vmem_pte_alloc(unsigned long address) |
f4eb07c1 | 63 | { |
146e4b3c | 64 | pte_t *pte; |
f4eb07c1 | 65 | |
146e4b3c | 66 | if (slab_is_available()) |
e5992f2e | 67 | pte = (pte_t *) page_table_alloc(&init_mm, address); |
146e4b3c MS |
68 | else |
69 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); | |
f4eb07c1 HC |
70 | if (!pte) |
71 | return NULL; | |
6af7eea2 CB |
72 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, |
73 | PTRS_PER_PTE * sizeof(pte_t)); | |
f4eb07c1 HC |
74 | return pte; |
75 | } | |
76 | ||
77 | /* | |
78 | * Add a physical memory range to the 1:1 mapping. | |
79 | */ | |
17f34580 | 80 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
f4eb07c1 HC |
81 | { |
82 | unsigned long address; | |
83 | pgd_t *pg_dir; | |
190a1d72 | 84 | pud_t *pu_dir; |
f4eb07c1 HC |
85 | pmd_t *pm_dir; |
86 | pte_t *pt_dir; | |
87 | pte_t pte; | |
88 | int ret = -ENOMEM; | |
89 | ||
90 | for (address = start; address < start + size; address += PAGE_SIZE) { | |
91 | pg_dir = pgd_offset_k(address); | |
92 | if (pgd_none(*pg_dir)) { | |
190a1d72 MS |
93 | pu_dir = vmem_pud_alloc(); |
94 | if (!pu_dir) | |
95 | goto out; | |
b2fa47e6 | 96 | pgd_populate(&init_mm, pg_dir, pu_dir); |
190a1d72 MS |
97 | } |
98 | ||
99 | pu_dir = pud_offset(pg_dir, address); | |
100 | if (pud_none(*pu_dir)) { | |
f4eb07c1 HC |
101 | pm_dir = vmem_pmd_alloc(); |
102 | if (!pm_dir) | |
103 | goto out; | |
b2fa47e6 | 104 | pud_populate(&init_mm, pu_dir, pm_dir); |
f4eb07c1 HC |
105 | } |
106 | ||
53492b1d | 107 | pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); |
190a1d72 | 108 | pm_dir = pmd_offset(pu_dir, address); |
53492b1d | 109 | |
f4815ac6 | 110 | #ifdef CONFIG_64BIT |
53492b1d GS |
111 | if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && |
112 | (address + HPAGE_SIZE <= start + size) && | |
113 | (address >= HPAGE_SIZE)) { | |
6af7eea2 | 114 | pte_val(pte) |= _SEGMENT_ENTRY_LARGE; |
53492b1d GS |
115 | pmd_val(*pm_dir) = pte_val(pte); |
116 | address += HPAGE_SIZE - PAGE_SIZE; | |
117 | continue; | |
118 | } | |
119 | #endif | |
f4eb07c1 | 120 | if (pmd_none(*pm_dir)) { |
e5992f2e | 121 | pt_dir = vmem_pte_alloc(address); |
f4eb07c1 HC |
122 | if (!pt_dir) |
123 | goto out; | |
b2fa47e6 | 124 | pmd_populate(&init_mm, pm_dir, pt_dir); |
f4eb07c1 HC |
125 | } |
126 | ||
127 | pt_dir = pte_offset_kernel(pm_dir, address); | |
c1821c2e | 128 | *pt_dir = pte; |
f4eb07c1 HC |
129 | } |
130 | ret = 0; | |
131 | out: | |
132 | flush_tlb_kernel_range(start, start + size); | |
133 | return ret; | |
134 | } | |
135 | ||
136 | /* | |
137 | * Remove a physical memory range from the 1:1 mapping. | |
138 | * Currently only invalidates page table entries. | |
139 | */ | |
140 | static void vmem_remove_range(unsigned long start, unsigned long size) | |
141 | { | |
142 | unsigned long address; | |
143 | pgd_t *pg_dir; | |
190a1d72 | 144 | pud_t *pu_dir; |
f4eb07c1 HC |
145 | pmd_t *pm_dir; |
146 | pte_t *pt_dir; | |
147 | pte_t pte; | |
148 | ||
149 | pte_val(pte) = _PAGE_TYPE_EMPTY; | |
150 | for (address = start; address < start + size; address += PAGE_SIZE) { | |
151 | pg_dir = pgd_offset_k(address); | |
190a1d72 MS |
152 | pu_dir = pud_offset(pg_dir, address); |
153 | if (pud_none(*pu_dir)) | |
f4eb07c1 | 154 | continue; |
190a1d72 | 155 | pm_dir = pmd_offset(pu_dir, address); |
f4eb07c1 HC |
156 | if (pmd_none(*pm_dir)) |
157 | continue; | |
53492b1d GS |
158 | |
159 | if (pmd_huge(*pm_dir)) { | |
b2fa47e6 | 160 | pmd_clear(pm_dir); |
53492b1d GS |
161 | address += HPAGE_SIZE - PAGE_SIZE; |
162 | continue; | |
163 | } | |
164 | ||
f4eb07c1 | 165 | pt_dir = pte_offset_kernel(pm_dir, address); |
c1821c2e | 166 | *pt_dir = pte; |
f4eb07c1 HC |
167 | } |
168 | flush_tlb_kernel_range(start, start + size); | |
169 | } | |
170 | ||
171 | /* | |
172 | * Add a backed mem_map array to the virtual mem_map array. | |
173 | */ | |
17f34580 | 174 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
f4eb07c1 HC |
175 | { |
176 | unsigned long address, start_addr, end_addr; | |
f4eb07c1 | 177 | pgd_t *pg_dir; |
190a1d72 | 178 | pud_t *pu_dir; |
f4eb07c1 HC |
179 | pmd_t *pm_dir; |
180 | pte_t *pt_dir; | |
181 | pte_t pte; | |
182 | int ret = -ENOMEM; | |
183 | ||
17f34580 HC |
184 | start_addr = (unsigned long) start; |
185 | end_addr = (unsigned long) (start + nr); | |
f4eb07c1 HC |
186 | |
187 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | |
188 | pg_dir = pgd_offset_k(address); | |
189 | if (pgd_none(*pg_dir)) { | |
190a1d72 MS |
190 | pu_dir = vmem_pud_alloc(); |
191 | if (!pu_dir) | |
192 | goto out; | |
b2fa47e6 | 193 | pgd_populate(&init_mm, pg_dir, pu_dir); |
190a1d72 MS |
194 | } |
195 | ||
196 | pu_dir = pud_offset(pg_dir, address); | |
197 | if (pud_none(*pu_dir)) { | |
f4eb07c1 HC |
198 | pm_dir = vmem_pmd_alloc(); |
199 | if (!pm_dir) | |
200 | goto out; | |
b2fa47e6 | 201 | pud_populate(&init_mm, pu_dir, pm_dir); |
f4eb07c1 HC |
202 | } |
203 | ||
190a1d72 | 204 | pm_dir = pmd_offset(pu_dir, address); |
f4eb07c1 | 205 | if (pmd_none(*pm_dir)) { |
e5992f2e | 206 | pt_dir = vmem_pte_alloc(address); |
f4eb07c1 HC |
207 | if (!pt_dir) |
208 | goto out; | |
b2fa47e6 | 209 | pmd_populate(&init_mm, pm_dir, pt_dir); |
f4eb07c1 HC |
210 | } |
211 | ||
212 | pt_dir = pte_offset_kernel(pm_dir, address); | |
213 | if (pte_none(*pt_dir)) { | |
214 | unsigned long new_page; | |
215 | ||
67060d9c | 216 | new_page =__pa(vmem_alloc_pages(0)); |
f4eb07c1 HC |
217 | if (!new_page) |
218 | goto out; | |
219 | pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); | |
c1821c2e | 220 | *pt_dir = pte; |
f4eb07c1 HC |
221 | } |
222 | } | |
67060d9c | 223 | memset(start, 0, nr * sizeof(struct page)); |
f4eb07c1 HC |
224 | ret = 0; |
225 | out: | |
226 | flush_tlb_kernel_range(start_addr, end_addr); | |
227 | return ret; | |
228 | } | |
229 | ||
f4eb07c1 HC |
230 | /* |
231 | * Add memory segment to the segment list if it doesn't overlap with | |
232 | * an already present segment. | |
233 | */ | |
234 | static int insert_memory_segment(struct memory_segment *seg) | |
235 | { | |
236 | struct memory_segment *tmp; | |
237 | ||
ee0ddadd | 238 | if (seg->start + seg->size > VMEM_MAX_PHYS || |
f4eb07c1 HC |
239 | seg->start + seg->size < seg->start) |
240 | return -ERANGE; | |
241 | ||
242 | list_for_each_entry(tmp, &mem_segs, list) { | |
243 | if (seg->start >= tmp->start + tmp->size) | |
244 | continue; | |
245 | if (seg->start + seg->size <= tmp->start) | |
246 | continue; | |
247 | return -ENOSPC; | |
248 | } | |
249 | list_add(&seg->list, &mem_segs); | |
250 | return 0; | |
251 | } | |
252 | ||
253 | /* | |
254 | * Remove memory segment from the segment list. | |
255 | */ | |
256 | static void remove_memory_segment(struct memory_segment *seg) | |
257 | { | |
258 | list_del(&seg->list); | |
259 | } | |
260 | ||
261 | static void __remove_shared_memory(struct memory_segment *seg) | |
262 | { | |
263 | remove_memory_segment(seg); | |
264 | vmem_remove_range(seg->start, seg->size); | |
265 | } | |
266 | ||
17f34580 | 267 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
f4eb07c1 HC |
268 | { |
269 | struct memory_segment *seg; | |
270 | int ret; | |
271 | ||
272 | mutex_lock(&vmem_mutex); | |
273 | ||
274 | ret = -ENOENT; | |
275 | list_for_each_entry(seg, &mem_segs, list) { | |
276 | if (seg->start == start && seg->size == size) | |
277 | break; | |
278 | } | |
279 | ||
280 | if (seg->start != start || seg->size != size) | |
281 | goto out; | |
282 | ||
283 | ret = 0; | |
284 | __remove_shared_memory(seg); | |
285 | kfree(seg); | |
286 | out: | |
287 | mutex_unlock(&vmem_mutex); | |
288 | return ret; | |
289 | } | |
290 | ||
17f34580 | 291 | int vmem_add_mapping(unsigned long start, unsigned long size) |
f4eb07c1 HC |
292 | { |
293 | struct memory_segment *seg; | |
f4eb07c1 HC |
294 | int ret; |
295 | ||
296 | mutex_lock(&vmem_mutex); | |
297 | ret = -ENOMEM; | |
298 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); | |
299 | if (!seg) | |
300 | goto out; | |
301 | seg->start = start; | |
302 | seg->size = size; | |
303 | ||
304 | ret = insert_memory_segment(seg); | |
305 | if (ret) | |
306 | goto out_free; | |
307 | ||
53492b1d | 308 | ret = vmem_add_mem(start, size, 0); |
f4eb07c1 HC |
309 | if (ret) |
310 | goto out_remove; | |
f4eb07c1 HC |
311 | goto out; |
312 | ||
313 | out_remove: | |
314 | __remove_shared_memory(seg); | |
315 | out_free: | |
316 | kfree(seg); | |
317 | out: | |
318 | mutex_unlock(&vmem_mutex); | |
319 | return ret; | |
320 | } | |
321 | ||
322 | /* | |
323 | * map whole physical memory to virtual memory (identity mapping) | |
5fd9c6e2 CB |
324 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
325 | * additional memory segments. | |
f4eb07c1 HC |
326 | */ |
327 | void __init vmem_map_init(void) | |
328 | { | |
53492b1d GS |
329 | unsigned long ro_start, ro_end; |
330 | unsigned long start, end; | |
f4eb07c1 HC |
331 | int i; |
332 | ||
53492b1d GS |
333 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
334 | ro_end = PFN_ALIGN((unsigned long)&_eshared); | |
335 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | |
60a0c68d MH |
336 | if (memory_chunk[i].type == CHUNK_CRASHK || |
337 | memory_chunk[i].type == CHUNK_OLDMEM) | |
338 | continue; | |
53492b1d GS |
339 | start = memory_chunk[i].addr; |
340 | end = memory_chunk[i].addr + memory_chunk[i].size; | |
341 | if (start >= ro_end || end <= ro_start) | |
342 | vmem_add_mem(start, end - start, 0); | |
343 | else if (start >= ro_start && end <= ro_end) | |
344 | vmem_add_mem(start, end - start, 1); | |
345 | else if (start >= ro_start) { | |
346 | vmem_add_mem(start, ro_end - start, 1); | |
347 | vmem_add_mem(ro_end, end - ro_end, 0); | |
348 | } else if (end < ro_end) { | |
349 | vmem_add_mem(start, ro_start - start, 0); | |
350 | vmem_add_mem(ro_start, end - ro_start, 1); | |
351 | } else { | |
352 | vmem_add_mem(start, ro_start - start, 0); | |
353 | vmem_add_mem(ro_start, ro_end - ro_start, 1); | |
354 | vmem_add_mem(ro_end, end - ro_end, 0); | |
355 | } | |
356 | } | |
f4eb07c1 HC |
357 | } |
358 | ||
359 | /* | |
360 | * Convert memory chunk array to a memory segment list so there is a single | |
361 | * list that contains both r/w memory and shared memory segments. | |
362 | */ | |
363 | static int __init vmem_convert_memory_chunk(void) | |
364 | { | |
365 | struct memory_segment *seg; | |
366 | int i; | |
367 | ||
368 | mutex_lock(&vmem_mutex); | |
9f4b0ba8 | 369 | for (i = 0; i < MEMORY_CHUNKS; i++) { |
f4eb07c1 HC |
370 | if (!memory_chunk[i].size) |
371 | continue; | |
60a0c68d MH |
372 | if (memory_chunk[i].type == CHUNK_CRASHK || |
373 | memory_chunk[i].type == CHUNK_OLDMEM) | |
374 | continue; | |
f4eb07c1 HC |
375 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
376 | if (!seg) | |
377 | panic("Out of memory...\n"); | |
378 | seg->start = memory_chunk[i].addr; | |
379 | seg->size = memory_chunk[i].size; | |
380 | insert_memory_segment(seg); | |
381 | } | |
382 | mutex_unlock(&vmem_mutex); | |
383 | return 0; | |
384 | } | |
385 | ||
386 | core_initcall(vmem_convert_memory_chunk); |