Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/mm/init.c | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | |
7 | * | |
8 | * Derived from "arch/i386/mm/init.c" | |
9 | * Copyright (C) 1995 Linus Torvalds | |
10 | */ | |
11 | ||
12 | #include <linux/config.h> | |
13 | #include <linux/signal.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/errno.h> | |
17 | #include <linux/string.h> | |
18 | #include <linux/types.h> | |
19 | #include <linux/ptrace.h> | |
20 | #include <linux/mman.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/swap.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/pagemap.h> | |
26 | #include <linux/bootmem.h> | |
27 | ||
28 | #include <asm/processor.h> | |
29 | #include <asm/system.h> | |
30 | #include <asm/uaccess.h> | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/pgalloc.h> | |
33 | #include <asm/dma.h> | |
34 | #include <asm/lowcore.h> | |
35 | #include <asm/tlb.h> | |
36 | #include <asm/tlbflush.h> | |
37 | ||
38 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
39 | ||
40 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | |
41 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | |
42 | ||
43 | void diag10(unsigned long addr) | |
44 | { | |
45 | if (addr >= 0x7ff00000) | |
46 | return; | |
47 | #ifdef __s390x__ | |
48 | asm volatile ( | |
49 | " sam31\n" | |
50 | " diag %0,%0,0x10\n" | |
51 | "0: sam64\n" | |
52 | ".section __ex_table,\"a\"\n" | |
53 | " .align 8\n" | |
54 | " .quad 0b, 0b\n" | |
55 | ".previous\n" | |
56 | : : "a" (addr)); | |
57 | #else | |
58 | asm volatile ( | |
59 | " diag %0,%0,0x10\n" | |
60 | "0:\n" | |
61 | ".section __ex_table,\"a\"\n" | |
62 | " .align 4\n" | |
63 | " .long 0b, 0b\n" | |
64 | ".previous\n" | |
65 | : : "a" (addr)); | |
66 | #endif | |
67 | } | |
68 | ||
69 | void show_mem(void) | |
70 | { | |
71 | int i, total = 0, reserved = 0; | |
72 | int shared = 0, cached = 0; | |
73 | ||
74 | printk("Mem-info:\n"); | |
75 | show_free_areas(); | |
76 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
77 | i = max_mapnr; | |
78 | while (i-- > 0) { | |
79 | total++; | |
80 | if (PageReserved(mem_map+i)) | |
81 | reserved++; | |
82 | else if (PageSwapCache(mem_map+i)) | |
83 | cached++; | |
84 | else if (page_count(mem_map+i)) | |
85 | shared += page_count(mem_map+i) - 1; | |
86 | } | |
87 | printk("%d pages of RAM\n",total); | |
88 | printk("%d reserved pages\n",reserved); | |
89 | printk("%d pages shared\n",shared); | |
90 | printk("%d pages swap cached\n",cached); | |
91 | } | |
92 | ||
93 | /* References to section boundaries */ | |
94 | ||
95 | extern unsigned long _text; | |
96 | extern unsigned long _etext; | |
97 | extern unsigned long _edata; | |
98 | extern unsigned long __bss_start; | |
99 | extern unsigned long _end; | |
100 | ||
101 | extern unsigned long __init_begin; | |
102 | extern unsigned long __init_end; | |
103 | ||
104 | /* | |
105 | * paging_init() sets up the page tables | |
106 | */ | |
107 | ||
108 | #ifndef CONFIG_ARCH_S390X | |
109 | void __init paging_init(void) | |
110 | { | |
111 | pgd_t * pg_dir; | |
112 | pte_t * pg_table; | |
113 | pte_t pte; | |
114 | int i; | |
115 | unsigned long tmp; | |
116 | unsigned long pfn = 0; | |
117 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | |
118 | static const int ssm_mask = 0x04000000L; | |
119 | ||
120 | /* unmap whole virtual address space */ | |
121 | ||
122 | pg_dir = swapper_pg_dir; | |
123 | ||
124 | for (i=0;i<KERNEL_PGD_PTRS;i++) | |
125 | pmd_clear((pmd_t*)pg_dir++); | |
126 | ||
127 | /* | |
128 | * map whole physical memory to virtual memory (identity mapping) | |
129 | */ | |
130 | ||
131 | pg_dir = swapper_pg_dir; | |
132 | ||
133 | while (pfn < max_low_pfn) { | |
134 | /* | |
135 | * pg_table is physical at this point | |
136 | */ | |
137 | pg_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
138 | ||
139 | pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table)); | |
140 | pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024)); | |
141 | pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048)); | |
142 | pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072)); | |
143 | pg_dir++; | |
144 | ||
145 | for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) { | |
146 | pte = pfn_pte(pfn, PAGE_KERNEL); | |
147 | if (pfn >= max_low_pfn) | |
148 | pte_clear(&init_mm, 0, &pte); | |
149 | set_pte(pg_table, pte); | |
150 | pfn++; | |
151 | } | |
152 | } | |
153 | ||
154 | S390_lowcore.kernel_asce = pgdir_k; | |
155 | ||
156 | /* enable virtual mapping in kernel mode */ | |
157 | __asm__ __volatile__(" LCTL 1,1,%0\n" | |
158 | " LCTL 7,7,%0\n" | |
159 | " LCTL 13,13,%0\n" | |
160 | " SSM %1" | |
161 | : : "m" (pgdir_k), "m" (ssm_mask)); | |
162 | ||
163 | local_flush_tlb(); | |
164 | ||
165 | { | |
166 | unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0}; | |
167 | ||
168 | zones_size[ZONE_DMA] = max_low_pfn; | |
169 | free_area_init(zones_size); | |
170 | } | |
171 | return; | |
172 | } | |
173 | ||
174 | #else /* CONFIG_ARCH_S390X */ | |
175 | void __init paging_init(void) | |
176 | { | |
177 | pgd_t * pg_dir; | |
178 | pmd_t * pm_dir; | |
179 | pte_t * pt_dir; | |
180 | pte_t pte; | |
181 | int i,j,k; | |
182 | unsigned long pfn = 0; | |
183 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | | |
184 | _KERN_REGION_TABLE; | |
185 | static const int ssm_mask = 0x04000000L; | |
186 | ||
187 | unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0}; | |
188 | unsigned long dma_pfn, high_pfn; | |
189 | ||
190 | dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; | |
191 | high_pfn = max_low_pfn; | |
192 | ||
193 | if (dma_pfn > high_pfn) | |
194 | zones_size[ZONE_DMA] = high_pfn; | |
195 | else { | |
196 | zones_size[ZONE_DMA] = dma_pfn; | |
197 | zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; | |
198 | } | |
199 | ||
200 | /* Initialize mem_map[]. */ | |
201 | free_area_init(zones_size); | |
202 | ||
203 | ||
204 | /* | |
205 | * map whole physical memory to virtual memory (identity mapping) | |
206 | */ | |
207 | ||
208 | pg_dir = swapper_pg_dir; | |
209 | ||
210 | for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) { | |
211 | ||
212 | if (pfn >= max_low_pfn) { | |
213 | pgd_clear(pg_dir); | |
214 | continue; | |
215 | } | |
216 | ||
217 | pm_dir = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE*4); | |
218 | pgd_populate(&init_mm, pg_dir, pm_dir); | |
219 | ||
220 | for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { | |
221 | if (pfn >= max_low_pfn) { | |
222 | pmd_clear(pm_dir); | |
223 | continue; | |
224 | } | |
225 | ||
226 | pt_dir = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
227 | pmd_populate_kernel(&init_mm, pm_dir, pt_dir); | |
228 | ||
229 | for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) { | |
230 | pte = pfn_pte(pfn, PAGE_KERNEL); | |
231 | if (pfn >= max_low_pfn) { | |
232 | pte_clear(&init_mm, 0, &pte); | |
233 | continue; | |
234 | } | |
235 | set_pte(pt_dir, pte); | |
236 | pfn++; | |
237 | } | |
238 | } | |
239 | } | |
240 | ||
241 | S390_lowcore.kernel_asce = pgdir_k; | |
242 | ||
243 | /* enable virtual mapping in kernel mode */ | |
244 | __asm__ __volatile__("lctlg 1,1,%0\n\t" | |
245 | "lctlg 7,7,%0\n\t" | |
246 | "lctlg 13,13,%0\n\t" | |
247 | "ssm %1" | |
248 | : :"m" (pgdir_k), "m" (ssm_mask)); | |
249 | ||
250 | local_flush_tlb(); | |
251 | ||
252 | return; | |
253 | } | |
254 | #endif /* CONFIG_ARCH_S390X */ | |
255 | ||
256 | void __init mem_init(void) | |
257 | { | |
258 | unsigned long codesize, reservedpages, datasize, initsize; | |
259 | ||
260 | max_mapnr = num_physpages = max_low_pfn; | |
261 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | |
262 | ||
263 | /* clear the zero-page */ | |
264 | memset(empty_zero_page, 0, PAGE_SIZE); | |
265 | ||
266 | /* this will put all low memory onto the freelists */ | |
267 | totalram_pages += free_all_bootmem(); | |
268 | ||
269 | reservedpages = 0; | |
270 | ||
271 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
272 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
273 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
274 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", | |
275 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | |
276 | max_mapnr << (PAGE_SHIFT-10), | |
277 | codesize >> 10, | |
278 | reservedpages << (PAGE_SHIFT-10), | |
279 | datasize >>10, | |
280 | initsize >> 10); | |
281 | } | |
282 | ||
283 | void free_initmem(void) | |
284 | { | |
285 | unsigned long addr; | |
286 | ||
287 | addr = (unsigned long)(&__init_begin); | |
288 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
289 | ClearPageReserved(virt_to_page(addr)); | |
290 | set_page_count(virt_to_page(addr), 1); | |
291 | free_page(addr); | |
292 | totalram_pages++; | |
293 | } | |
294 | printk ("Freeing unused kernel memory: %ldk freed\n", | |
295 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); | |
296 | } | |
297 | ||
298 | #ifdef CONFIG_BLK_DEV_INITRD | |
299 | void free_initrd_mem(unsigned long start, unsigned long end) | |
300 | { | |
301 | if (start < end) | |
302 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
303 | for (; start < end; start += PAGE_SIZE) { | |
304 | ClearPageReserved(virt_to_page(start)); | |
305 | set_page_count(virt_to_page(start), 1); | |
306 | free_page(start); | |
307 | totalram_pages++; | |
308 | } | |
309 | } | |
310 | #endif |