Merge branch 'pm-cpuidle'
[linux-block.git] / arch / s390 / mm / vmem.c
CommitLineData
f4eb07c1 1/*
f4eb07c1
HC
2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/list.h>
53492b1d 11#include <linux/hugetlb.h>
5a0e3ad6 12#include <linux/slab.h>
f4eb07c1
HC
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
53492b1d 17#include <asm/sections.h>
f4eb07c1 18
f4eb07c1
HC
19static DEFINE_MUTEX(vmem_mutex);
20
21struct memory_segment {
22 struct list_head list;
23 unsigned long start;
24 unsigned long size;
25};
26
27static LIST_HEAD(mem_segs);
28
67060d9c
HC
29static void __ref *vmem_alloc_pages(unsigned int order)
30{
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL, order);
33 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
34}
35
36static inline pud_t *vmem_pud_alloc(void)
5a216a20
MS
37{
38 pud_t *pud = NULL;
39
40#ifdef CONFIG_64BIT
67060d9c 41 pud = vmem_alloc_pages(2);
5a216a20
MS
42 if (!pud)
43 return NULL;
8fc63658 44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
5a216a20
MS
45#endif
46 return pud;
47}
190a1d72 48
67060d9c 49static inline pmd_t *vmem_pmd_alloc(void)
f4eb07c1 50{
3610cce8 51 pmd_t *pmd = NULL;
f4eb07c1 52
3610cce8 53#ifdef CONFIG_64BIT
67060d9c 54 pmd = vmem_alloc_pages(2);
f4eb07c1
HC
55 if (!pmd)
56 return NULL;
8fc63658 57 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
3610cce8 58#endif
f4eb07c1
HC
59 return pmd;
60}
61
e5992f2e 62static pte_t __ref *vmem_pte_alloc(unsigned long address)
f4eb07c1 63{
146e4b3c 64 pte_t *pte;
f4eb07c1 65
146e4b3c 66 if (slab_is_available())
e5992f2e 67 pte = (pte_t *) page_table_alloc(&init_mm, address);
146e4b3c
MS
68 else
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
f4eb07c1
HC
70 if (!pte)
71 return NULL;
e5098611 72 clear_table((unsigned long *) pte, _PAGE_INVALID,
6af7eea2 73 PTRS_PER_PTE * sizeof(pte_t));
f4eb07c1
HC
74 return pte;
75}
76
77/*
78 * Add a physical memory range to the 1:1 mapping.
79 */
17f34580 80static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
f4eb07c1 81{
378b1e7a
HC
82 unsigned long end = start + size;
83 unsigned long address = start;
f4eb07c1 84 pgd_t *pg_dir;
190a1d72 85 pud_t *pu_dir;
f4eb07c1
HC
86 pmd_t *pm_dir;
87 pte_t *pt_dir;
f4eb07c1
HC
88 int ret = -ENOMEM;
89
378b1e7a 90 while (address < end) {
f4eb07c1
HC
91 pg_dir = pgd_offset_k(address);
92 if (pgd_none(*pg_dir)) {
190a1d72
MS
93 pu_dir = vmem_pud_alloc();
94 if (!pu_dir)
95 goto out;
b2fa47e6 96 pgd_populate(&init_mm, pg_dir, pu_dir);
190a1d72 97 }
190a1d72 98 pu_dir = pud_offset(pg_dir, address);
18da2369
HC
99#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
100 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
abf09bed
MS
102 pud_val(*pu_dir) = __pa(address) |
103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
e5098611 104 (ro ? _REGION_ENTRY_PROTECT : 0);
18da2369
HC
105 address += PUD_SIZE;
106 continue;
107 }
108#endif
190a1d72 109 if (pud_none(*pu_dir)) {
f4eb07c1
HC
110 pm_dir = vmem_pmd_alloc();
111 if (!pm_dir)
112 goto out;
b2fa47e6 113 pud_populate(&init_mm, pu_dir, pm_dir);
f4eb07c1 114 }
190a1d72 115 pm_dir = pmd_offset(pu_dir, address);
648609e3 116#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
fc7e48aa
HC
117 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
abf09bed
MS
119 pmd_val(*pm_dir) = __pa(address) |
120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
0944fe3f 121 _SEGMENT_ENTRY_YOUNG |
e5098611 122 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
378b1e7a 123 address += PMD_SIZE;
53492b1d
GS
124 continue;
125 }
126#endif
f4eb07c1 127 if (pmd_none(*pm_dir)) {
e5992f2e 128 pt_dir = vmem_pte_alloc(address);
f4eb07c1
HC
129 if (!pt_dir)
130 goto out;
b2fa47e6 131 pmd_populate(&init_mm, pm_dir, pt_dir);
f4eb07c1
HC
132 }
133
134 pt_dir = pte_offset_kernel(pm_dir, address);
e5098611
MS
135 pte_val(*pt_dir) = __pa(address) |
136 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
378b1e7a 137 address += PAGE_SIZE;
f4eb07c1
HC
138 }
139 ret = 0;
140out:
378b1e7a 141 flush_tlb_kernel_range(start, end);
f4eb07c1
HC
142 return ret;
143}
144
145/*
146 * Remove a physical memory range from the 1:1 mapping.
147 * Currently only invalidates page table entries.
148 */
149static void vmem_remove_range(unsigned long start, unsigned long size)
150{
378b1e7a
HC
151 unsigned long end = start + size;
152 unsigned long address = start;
f4eb07c1 153 pgd_t *pg_dir;
190a1d72 154 pud_t *pu_dir;
f4eb07c1
HC
155 pmd_t *pm_dir;
156 pte_t *pt_dir;
157 pte_t pte;
158
e5098611 159 pte_val(pte) = _PAGE_INVALID;
378b1e7a 160 while (address < end) {
f4eb07c1 161 pg_dir = pgd_offset_k(address);
fc7e48aa
HC
162 if (pgd_none(*pg_dir)) {
163 address += PGDIR_SIZE;
164 continue;
165 }
190a1d72 166 pu_dir = pud_offset(pg_dir, address);
fc7e48aa
HC
167 if (pud_none(*pu_dir)) {
168 address += PUD_SIZE;
f4eb07c1 169 continue;
fc7e48aa 170 }
18da2369
HC
171 if (pud_large(*pu_dir)) {
172 pud_clear(pu_dir);
173 address += PUD_SIZE;
174 continue;
175 }
190a1d72 176 pm_dir = pmd_offset(pu_dir, address);
fc7e48aa
HC
177 if (pmd_none(*pm_dir)) {
178 address += PMD_SIZE;
f4eb07c1 179 continue;
fc7e48aa 180 }
378b1e7a 181 if (pmd_large(*pm_dir)) {
b2fa47e6 182 pmd_clear(pm_dir);
378b1e7a 183 address += PMD_SIZE;
53492b1d
GS
184 continue;
185 }
f4eb07c1 186 pt_dir = pte_offset_kernel(pm_dir, address);
c1821c2e 187 *pt_dir = pte;
378b1e7a 188 address += PAGE_SIZE;
f4eb07c1 189 }
378b1e7a 190 flush_tlb_kernel_range(start, end);
f4eb07c1
HC
191}
192
193/*
194 * Add a backed mem_map array to the virtual mem_map array.
195 */
0aad818b 196int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
f4eb07c1 197{
0aad818b 198 unsigned long address = start;
f4eb07c1 199 pgd_t *pg_dir;
190a1d72 200 pud_t *pu_dir;
f4eb07c1
HC
201 pmd_t *pm_dir;
202 pte_t *pt_dir;
f4eb07c1
HC
203 int ret = -ENOMEM;
204
0aad818b 205 for (address = start; address < end;) {
f4eb07c1
HC
206 pg_dir = pgd_offset_k(address);
207 if (pgd_none(*pg_dir)) {
190a1d72
MS
208 pu_dir = vmem_pud_alloc();
209 if (!pu_dir)
210 goto out;
b2fa47e6 211 pgd_populate(&init_mm, pg_dir, pu_dir);
190a1d72
MS
212 }
213
214 pu_dir = pud_offset(pg_dir, address);
215 if (pud_none(*pu_dir)) {
f4eb07c1
HC
216 pm_dir = vmem_pmd_alloc();
217 if (!pm_dir)
218 goto out;
b2fa47e6 219 pud_populate(&init_mm, pu_dir, pm_dir);
f4eb07c1
HC
220 }
221
190a1d72 222 pm_dir = pmd_offset(pu_dir, address);
f4eb07c1 223 if (pmd_none(*pm_dir)) {
f7817968
HC
224#ifdef CONFIG_64BIT
225 /* Use 1MB frames for vmemmap if available. We always
226 * use large frames even if they are only partially
227 * used.
228 * Otherwise we would have also page tables since
229 * vmemmap_populate gets called for each section
230 * separately. */
231 if (MACHINE_HAS_EDAT1) {
232 void *new_page;
233
234 new_page = vmemmap_alloc_block(PMD_SIZE, node);
235 if (!new_page)
236 goto out;
abf09bed 237 pmd_val(*pm_dir) = __pa(new_page) |
17ea345a
HC
238 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
239 _SEGMENT_ENTRY_CO;
f7817968
HC
240 address = (address + PMD_SIZE) & PMD_MASK;
241 continue;
242 }
243#endif
e5992f2e 244 pt_dir = vmem_pte_alloc(address);
f4eb07c1
HC
245 if (!pt_dir)
246 goto out;
b2fa47e6 247 pmd_populate(&init_mm, pm_dir, pt_dir);
f7817968
HC
248 } else if (pmd_large(*pm_dir)) {
249 address = (address + PMD_SIZE) & PMD_MASK;
250 continue;
f4eb07c1
HC
251 }
252
253 pt_dir = pte_offset_kernel(pm_dir, address);
254 if (pte_none(*pt_dir)) {
255 unsigned long new_page;
256
67060d9c 257 new_page =__pa(vmem_alloc_pages(0));
f4eb07c1
HC
258 if (!new_page)
259 goto out;
e5098611
MS
260 pte_val(*pt_dir) =
261 __pa(new_page) | pgprot_val(PAGE_KERNEL);
f4eb07c1 262 }
f7817968 263 address += PAGE_SIZE;
f4eb07c1 264 }
0aad818b 265 memset((void *)start, 0, end - start);
f4eb07c1
HC
266 ret = 0;
267out:
0aad818b 268 flush_tlb_kernel_range(start, end);
f4eb07c1
HC
269 return ret;
270}
271
0aad818b 272void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
273{
274}
275
f4eb07c1
HC
276/*
277 * Add memory segment to the segment list if it doesn't overlap with
278 * an already present segment.
279 */
280static int insert_memory_segment(struct memory_segment *seg)
281{
282 struct memory_segment *tmp;
283
ee0ddadd 284 if (seg->start + seg->size > VMEM_MAX_PHYS ||
f4eb07c1
HC
285 seg->start + seg->size < seg->start)
286 return -ERANGE;
287
288 list_for_each_entry(tmp, &mem_segs, list) {
289 if (seg->start >= tmp->start + tmp->size)
290 continue;
291 if (seg->start + seg->size <= tmp->start)
292 continue;
293 return -ENOSPC;
294 }
295 list_add(&seg->list, &mem_segs);
296 return 0;
297}
298
299/*
300 * Remove memory segment from the segment list.
301 */
302static void remove_memory_segment(struct memory_segment *seg)
303{
304 list_del(&seg->list);
305}
306
307static void __remove_shared_memory(struct memory_segment *seg)
308{
309 remove_memory_segment(seg);
310 vmem_remove_range(seg->start, seg->size);
311}
312
17f34580 313int vmem_remove_mapping(unsigned long start, unsigned long size)
f4eb07c1
HC
314{
315 struct memory_segment *seg;
316 int ret;
317
318 mutex_lock(&vmem_mutex);
319
320 ret = -ENOENT;
321 list_for_each_entry(seg, &mem_segs, list) {
322 if (seg->start == start && seg->size == size)
323 break;
324 }
325
326 if (seg->start != start || seg->size != size)
327 goto out;
328
329 ret = 0;
330 __remove_shared_memory(seg);
331 kfree(seg);
332out:
333 mutex_unlock(&vmem_mutex);
334 return ret;
335}
336
17f34580 337int vmem_add_mapping(unsigned long start, unsigned long size)
f4eb07c1
HC
338{
339 struct memory_segment *seg;
f4eb07c1
HC
340 int ret;
341
342 mutex_lock(&vmem_mutex);
343 ret = -ENOMEM;
344 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
345 if (!seg)
346 goto out;
347 seg->start = start;
348 seg->size = size;
349
350 ret = insert_memory_segment(seg);
351 if (ret)
352 goto out_free;
353
53492b1d 354 ret = vmem_add_mem(start, size, 0);
f4eb07c1
HC
355 if (ret)
356 goto out_remove;
f4eb07c1
HC
357 goto out;
358
359out_remove:
360 __remove_shared_memory(seg);
361out_free:
362 kfree(seg);
363out:
364 mutex_unlock(&vmem_mutex);
365 return ret;
366}
367
368/*
369 * map whole physical memory to virtual memory (identity mapping)
5fd9c6e2
CB
370 * we reserve enough space in the vmalloc area for vmemmap to hotplug
371 * additional memory segments.
f4eb07c1
HC
372 */
373void __init vmem_map_init(void)
374{
53492b1d
GS
375 unsigned long ro_start, ro_end;
376 unsigned long start, end;
f4eb07c1
HC
377 int i;
378
8fe234d3
HC
379 ro_start = PFN_ALIGN((unsigned long)&_stext);
380 ro_end = (unsigned long)&_eshared & PAGE_MASK;
996b4a7d
HC
381 for (i = 0; i < MEMORY_CHUNKS; i++) {
382 if (!memory_chunk[i].size)
60a0c68d 383 continue;
53492b1d
GS
384 start = memory_chunk[i].addr;
385 end = memory_chunk[i].addr + memory_chunk[i].size;
386 if (start >= ro_end || end <= ro_start)
387 vmem_add_mem(start, end - start, 0);
388 else if (start >= ro_start && end <= ro_end)
389 vmem_add_mem(start, end - start, 1);
390 else if (start >= ro_start) {
391 vmem_add_mem(start, ro_end - start, 1);
392 vmem_add_mem(ro_end, end - ro_end, 0);
393 } else if (end < ro_end) {
394 vmem_add_mem(start, ro_start - start, 0);
395 vmem_add_mem(ro_start, end - ro_start, 1);
396 } else {
397 vmem_add_mem(start, ro_start - start, 0);
398 vmem_add_mem(ro_start, ro_end - ro_start, 1);
399 vmem_add_mem(ro_end, end - ro_end, 0);
400 }
401 }
f4eb07c1
HC
402}
403
404/*
405 * Convert memory chunk array to a memory segment list so there is a single
406 * list that contains both r/w memory and shared memory segments.
407 */
408static int __init vmem_convert_memory_chunk(void)
409{
410 struct memory_segment *seg;
411 int i;
412
413 mutex_lock(&vmem_mutex);
9f4b0ba8 414 for (i = 0; i < MEMORY_CHUNKS; i++) {
f4eb07c1
HC
415 if (!memory_chunk[i].size)
416 continue;
417 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
418 if (!seg)
419 panic("Out of memory...\n");
420 seg->start = memory_chunk[i].addr;
421 seg->size = memory_chunk[i].size;
422 insert_memory_segment(seg);
423 }
424 mutex_unlock(&vmem_mutex);
425 return 0;
426}
427
428core_initcall(vmem_convert_memory_chunk);