Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/arch/parisc/mm/init.c | |
4 | * | |
5 | * Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright 1999 SuSE GmbH | |
7 | * changed by Philipp Rumpf | |
8 | * Copyright 1999 Philipp Rumpf (prumpf@tux.org) | |
9 | * Copyright 2004 Randolph Chung (tausq@debian.org) | |
a8f44e38 | 10 | * Copyright 2006-2007 Helge Deller (deller@gmx.de) |
1da177e4 LT |
11 | * |
12 | */ | |
13 | ||
1da177e4 LT |
14 | |
15 | #include <linux/module.h> | |
16 | #include <linux/mm.h> | |
4fe9e1d9 | 17 | #include <linux/memblock.h> |
5a0e3ad6 | 18 | #include <linux/gfp.h> |
1da177e4 LT |
19 | #include <linux/delay.h> |
20 | #include <linux/init.h> | |
1da177e4 LT |
21 | #include <linux/initrd.h> |
22 | #include <linux/swap.h> | |
23 | #include <linux/unistd.h> | |
24 | #include <linux/nodemask.h> /* for node_online_map */ | |
ea1754a0 | 25 | #include <linux/pagemap.h> /* for release_pages */ |
d0cf62fb | 26 | #include <linux/compat.h> |
1da177e4 LT |
27 | |
28 | #include <asm/pgalloc.h> | |
ce8420bb | 29 | #include <asm/pgtable.h> |
1da177e4 LT |
30 | #include <asm/tlb.h> |
31 | #include <asm/pdc_chassis.h> | |
32 | #include <asm/mmzone.h> | |
a581c2a4 | 33 | #include <asm/sections.h> |
d0cf62fb | 34 | #include <asm/msgbuf.h> |
1da177e4 | 35 | |
1da177e4 | 36 | extern int data_start; |
161bd3bf | 37 | extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ |
1da177e4 | 38 | |
f24ffde4 | 39 | #if CONFIG_PGTABLE_LEVELS == 3 |
c39f52a9 TG |
40 | /* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout |
41 | * with the first pmd adjacent to the pgd and below it. gcc doesn't actually | |
42 | * guarantee that global objects will be laid out in memory in the same order | |
43 | * as the order of declaration, so put these in different sections and use | |
44 | * the linker script to order them. */ | |
45 | pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); | |
46 | #endif | |
47 | ||
48 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); | |
49 | pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); | |
50 | ||
1da177e4 | 51 | #ifdef CONFIG_DISCONTIGMEM |
8039de10 | 52 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; |
91ea8207 | 53 | signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; |
1da177e4 LT |
54 | #endif |
55 | ||
56 | static struct resource data_resource = { | |
57 | .name = "Kernel data", | |
35d98e93 | 58 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
1da177e4 LT |
59 | }; |
60 | ||
61 | static struct resource code_resource = { | |
62 | .name = "Kernel code", | |
35d98e93 | 63 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
1da177e4 LT |
64 | }; |
65 | ||
66 | static struct resource pdcdata_resource = { | |
67 | .name = "PDC data (Page Zero)", | |
68 | .start = 0, | |
69 | .end = 0x9ff, | |
70 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, | |
71 | }; | |
72 | ||
8039de10 | 73 | static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; |
1da177e4 LT |
74 | |
75 | /* The following array is initialized from the firmware specific | |
76 | * information retrieved in kernel/inventory.c. | |
77 | */ | |
78 | ||
8039de10 HD |
79 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; |
80 | int npmem_ranges __read_mostly; | |
1da177e4 | 81 | |
4fe9e1d9 HD |
82 | /* |
83 | * get_memblock() allocates pages via memblock. | |
84 | * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it | |
85 | * doesn't allocate from bottom to top which is needed because we only created | |
86 | * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. | |
87 | */ | |
88 | static void * __init get_memblock(unsigned long size) | |
89 | { | |
90 | static phys_addr_t search_addr __initdata; | |
91 | phys_addr_t phys; | |
92 | ||
93 | if (!search_addr) | |
94 | search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); | |
95 | search_addr = ALIGN(search_addr, size); | |
96 | while (!memblock_is_region_memory(search_addr, size) || | |
97 | memblock_is_region_reserved(search_addr, size)) { | |
98 | search_addr += size; | |
99 | } | |
100 | phys = search_addr; | |
101 | ||
102 | if (phys) | |
103 | memblock_reserve(phys, size); | |
104 | else | |
105 | panic("get_memblock() failed.\n"); | |
106 | ||
e3b6a028 HD |
107 | memset(__va(phys), 0, size); |
108 | ||
4fe9e1d9 HD |
109 | return __va(phys); |
110 | } | |
111 | ||
a8f44e38 | 112 | #ifdef CONFIG_64BIT |
1da177e4 | 113 | #define MAX_MEM (~0UL) |
a8f44e38 | 114 | #else /* !CONFIG_64BIT */ |
1da177e4 | 115 | #define MAX_MEM (3584U*1024U*1024U) |
a8f44e38 | 116 | #endif /* !CONFIG_64BIT */ |
1da177e4 | 117 | |
8039de10 | 118 | static unsigned long mem_limit __read_mostly = MAX_MEM; |
1da177e4 LT |
119 | |
120 | static void __init mem_limit_func(void) | |
121 | { | |
122 | char *cp, *end; | |
123 | unsigned long limit; | |
1da177e4 LT |
124 | |
125 | /* We need this before __setup() functions are called */ | |
126 | ||
127 | limit = MAX_MEM; | |
668f9931 | 128 | for (cp = boot_command_line; *cp; ) { |
1da177e4 LT |
129 | if (memcmp(cp, "mem=", 4) == 0) { |
130 | cp += 4; | |
131 | limit = memparse(cp, &end); | |
132 | if (end != cp) | |
133 | break; | |
134 | cp = end; | |
135 | } else { | |
136 | while (*cp != ' ' && *cp) | |
137 | ++cp; | |
138 | while (*cp == ' ') | |
139 | ++cp; | |
140 | } | |
141 | } | |
142 | ||
143 | if (limit < mem_limit) | |
144 | mem_limit = limit; | |
145 | } | |
146 | ||
147 | #define MAX_GAP (0x40000000UL >> PAGE_SHIFT) | |
148 | ||
149 | static void __init setup_bootmem(void) | |
150 | { | |
1da177e4 | 151 | unsigned long mem_max; |
1da177e4 LT |
152 | #ifndef CONFIG_DISCONTIGMEM |
153 | physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; | |
154 | int npmem_holes; | |
155 | #endif | |
156 | int i, sysram_resource_count; | |
157 | ||
158 | disable_sr_hashing(); /* Turn off space register hashing */ | |
159 | ||
160 | /* | |
161 | * Sort the ranges. Since the number of ranges is typically | |
162 | * small, and performance is not an issue here, just do | |
163 | * a simple insertion sort. | |
164 | */ | |
165 | ||
166 | for (i = 1; i < npmem_ranges; i++) { | |
167 | int j; | |
168 | ||
169 | for (j = i; j > 0; j--) { | |
170 | unsigned long tmp; | |
171 | ||
172 | if (pmem_ranges[j-1].start_pfn < | |
173 | pmem_ranges[j].start_pfn) { | |
174 | ||
175 | break; | |
176 | } | |
177 | tmp = pmem_ranges[j-1].start_pfn; | |
178 | pmem_ranges[j-1].start_pfn = pmem_ranges[j].start_pfn; | |
179 | pmem_ranges[j].start_pfn = tmp; | |
180 | tmp = pmem_ranges[j-1].pages; | |
181 | pmem_ranges[j-1].pages = pmem_ranges[j].pages; | |
182 | pmem_ranges[j].pages = tmp; | |
183 | } | |
184 | } | |
185 | ||
186 | #ifndef CONFIG_DISCONTIGMEM | |
187 | /* | |
188 | * Throw out ranges that are too far apart (controlled by | |
189 | * MAX_GAP). | |
190 | */ | |
191 | ||
192 | for (i = 1; i < npmem_ranges; i++) { | |
193 | if (pmem_ranges[i].start_pfn - | |
194 | (pmem_ranges[i-1].start_pfn + | |
195 | pmem_ranges[i-1].pages) > MAX_GAP) { | |
196 | npmem_ranges = i; | |
197 | printk("Large gap in memory detected (%ld pages). " | |
198 | "Consider turning on CONFIG_DISCONTIGMEM\n", | |
199 | pmem_ranges[i].start_pfn - | |
200 | (pmem_ranges[i-1].start_pfn + | |
201 | pmem_ranges[i-1].pages)); | |
202 | break; | |
203 | } | |
204 | } | |
205 | #endif | |
206 | ||
4fe9e1d9 HD |
207 | /* Print the memory ranges */ |
208 | pr_info("Memory Ranges:\n"); | |
1da177e4 | 209 | |
4fe9e1d9 HD |
210 | for (i = 0; i < npmem_ranges; i++) { |
211 | struct resource *res = &sysram_resources[i]; | |
212 | unsigned long start; | |
213 | unsigned long size; | |
1da177e4 | 214 | |
4fe9e1d9 HD |
215 | size = (pmem_ranges[i].pages << PAGE_SHIFT); |
216 | start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); | |
217 | pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", | |
218 | i, start, start + (size - 1), size >> 20); | |
1da177e4 | 219 | |
4fe9e1d9 | 220 | /* request memory resource */ |
1da177e4 | 221 | res->name = "System RAM"; |
4fe9e1d9 HD |
222 | res->start = start; |
223 | res->end = start + size - 1; | |
35d98e93 | 224 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; |
1da177e4 LT |
225 | request_resource(&iomem_resource, res); |
226 | } | |
227 | ||
4fe9e1d9 HD |
228 | sysram_resource_count = npmem_ranges; |
229 | ||
1da177e4 LT |
230 | /* |
231 | * For 32 bit kernels we limit the amount of memory we can | |
232 | * support, in order to preserve enough kernel address space | |
233 | * for other purposes. For 64 bit kernels we don't normally | |
234 | * limit the memory, but this mechanism can be used to | |
235 | * artificially limit the amount of memory (and it is written | |
236 | * to work with multiple memory ranges). | |
237 | */ | |
238 | ||
239 | mem_limit_func(); /* check for "mem=" argument */ | |
240 | ||
241 | mem_max = 0; | |
1da177e4 LT |
242 | for (i = 0; i < npmem_ranges; i++) { |
243 | unsigned long rsize; | |
244 | ||
245 | rsize = pmem_ranges[i].pages << PAGE_SHIFT; | |
246 | if ((mem_max + rsize) > mem_limit) { | |
247 | printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20); | |
248 | if (mem_max == mem_limit) | |
249 | npmem_ranges = i; | |
250 | else { | |
251 | pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT) | |
252 | - (mem_max >> PAGE_SHIFT); | |
253 | npmem_ranges = i + 1; | |
254 | mem_max = mem_limit; | |
255 | } | |
1da177e4 LT |
256 | break; |
257 | } | |
1da177e4 LT |
258 | mem_max += rsize; |
259 | } | |
260 | ||
261 | printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20); | |
262 | ||
263 | #ifndef CONFIG_DISCONTIGMEM | |
264 | /* Merge the ranges, keeping track of the holes */ | |
265 | ||
266 | { | |
267 | unsigned long end_pfn; | |
268 | unsigned long hole_pages; | |
269 | ||
270 | npmem_holes = 0; | |
271 | end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; | |
272 | for (i = 1; i < npmem_ranges; i++) { | |
273 | ||
274 | hole_pages = pmem_ranges[i].start_pfn - end_pfn; | |
275 | if (hole_pages) { | |
276 | pmem_holes[npmem_holes].start_pfn = end_pfn; | |
277 | pmem_holes[npmem_holes++].pages = hole_pages; | |
278 | end_pfn += hole_pages; | |
279 | } | |
280 | end_pfn += pmem_ranges[i].pages; | |
281 | } | |
282 | ||
283 | pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn; | |
284 | npmem_ranges = 1; | |
285 | } | |
286 | #endif | |
287 | ||
1da177e4 LT |
288 | #ifdef CONFIG_DISCONTIGMEM |
289 | for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { | |
290 | memset(NODE_DATA(i), 0, sizeof(pg_data_t)); | |
1da177e4 LT |
291 | } |
292 | memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); | |
293 | ||
d9b41e0b DR |
294 | for (i = 0; i < npmem_ranges; i++) { |
295 | node_set_state(i, N_NORMAL_MEMORY); | |
1da177e4 | 296 | node_set_online(i); |
d9b41e0b | 297 | } |
1da177e4 LT |
298 | #endif |
299 | ||
300 | /* | |
301 | * Initialize and free the full range of memory in each range. | |
1da177e4 LT |
302 | */ |
303 | ||
1da177e4 LT |
304 | max_pfn = 0; |
305 | for (i = 0; i < npmem_ranges; i++) { | |
306 | unsigned long start_pfn; | |
307 | unsigned long npages; | |
4fe9e1d9 HD |
308 | unsigned long start; |
309 | unsigned long size; | |
1da177e4 LT |
310 | |
311 | start_pfn = pmem_ranges[i].start_pfn; | |
312 | npages = pmem_ranges[i].pages; | |
313 | ||
4fe9e1d9 HD |
314 | start = start_pfn << PAGE_SHIFT; |
315 | size = npages << PAGE_SHIFT; | |
316 | ||
317 | /* add system RAM memblock */ | |
318 | memblock_add(start, size); | |
319 | ||
1da177e4 LT |
320 | if ((start_pfn + npages) > max_pfn) |
321 | max_pfn = start_pfn + npages; | |
322 | } | |
323 | ||
5cdb8205 GG |
324 | /* IOMMU is always used to access "high mem" on those boxes |
325 | * that can support enough mem that a PCI device couldn't | |
326 | * directly DMA to any physical addresses. | |
327 | * ISA DMA support will need to revisit this. | |
328 | */ | |
329 | max_low_pfn = max_pfn; | |
330 | ||
1da177e4 LT |
331 | /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ |
332 | ||
333 | #define PDC_CONSOLE_IO_IODC_SIZE 32768 | |
334 | ||
4fe9e1d9 HD |
335 | memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + |
336 | PDC_CONSOLE_IO_IODC_SIZE)); | |
337 | memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), | |
338 | (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); | |
1da177e4 LT |
339 | |
340 | #ifndef CONFIG_DISCONTIGMEM | |
341 | ||
342 | /* reserve the holes */ | |
343 | ||
344 | for (i = 0; i < npmem_holes; i++) { | |
4fe9e1d9 HD |
345 | memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), |
346 | (pmem_holes[i].pages << PAGE_SHIFT)); | |
1da177e4 LT |
347 | } |
348 | #endif | |
349 | ||
350 | #ifdef CONFIG_BLK_DEV_INITRD | |
351 | if (initrd_start) { | |
352 | printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end); | |
353 | if (__pa(initrd_start) < mem_max) { | |
354 | unsigned long initrd_reserve; | |
355 | ||
356 | if (__pa(initrd_end) > mem_max) { | |
357 | initrd_reserve = mem_max - __pa(initrd_start); | |
358 | } else { | |
359 | initrd_reserve = initrd_end - initrd_start; | |
360 | } | |
361 | initrd_below_start_ok = 1; | |
362 | printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); | |
363 | ||
4fe9e1d9 | 364 | memblock_reserve(__pa(initrd_start), initrd_reserve); |
1da177e4 LT |
365 | } |
366 | } | |
367 | #endif | |
368 | ||
369 | data_resource.start = virt_to_phys(&data_start); | |
c51d476a KM |
370 | data_resource.end = virt_to_phys(_end) - 1; |
371 | code_resource.start = virt_to_phys(_text); | |
1da177e4 LT |
372 | code_resource.end = virt_to_phys(&data_start)-1; |
373 | ||
374 | /* We don't know which region the kernel will be in, so try | |
375 | * all of them. | |
376 | */ | |
377 | for (i = 0; i < sysram_resource_count; i++) { | |
378 | struct resource *res = &sysram_resources[i]; | |
379 | request_resource(res, &code_resource); | |
380 | request_resource(res, &data_resource); | |
381 | } | |
382 | request_resource(&sysram_resources[0], &pdcdata_resource); | |
c9c2877d HD |
383 | |
384 | /* Initialize Page Deallocation Table (PDT) and check for bad memory. */ | |
385 | pdc_pdt_init(); | |
1da177e4 LT |
386 | } |
387 | ||
161bd3bf HD |
388 | static int __init parisc_text_address(unsigned long vaddr) |
389 | { | |
390 | static unsigned long head_ptr __initdata; | |
391 | ||
392 | if (!head_ptr) | |
393 | head_ptr = PAGE_MASK & (unsigned long) | |
394 | dereference_function_descriptor(&parisc_kernel_start); | |
395 | ||
396 | return core_kernel_text(vaddr) || vaddr == head_ptr; | |
397 | } | |
398 | ||
d7dd2ff1 JB |
399 | static void __init map_pages(unsigned long start_vaddr, |
400 | unsigned long start_paddr, unsigned long size, | |
401 | pgprot_t pgprot, int force) | |
402 | { | |
403 | pgd_t *pg_dir; | |
404 | pmd_t *pmd; | |
405 | pte_t *pg_table; | |
406 | unsigned long end_paddr; | |
407 | unsigned long start_pmd; | |
408 | unsigned long start_pte; | |
409 | unsigned long tmp1; | |
410 | unsigned long tmp2; | |
411 | unsigned long address; | |
412 | unsigned long vaddr; | |
413 | unsigned long ro_start; | |
414 | unsigned long ro_end; | |
41b85a11 | 415 | unsigned long kernel_end; |
d7dd2ff1 JB |
416 | |
417 | ro_start = __pa((unsigned long)_text); | |
418 | ro_end = __pa((unsigned long)&data_start); | |
41b85a11 | 419 | kernel_end = __pa((unsigned long)&_end); |
d7dd2ff1 JB |
420 | |
421 | end_paddr = start_paddr + size; | |
422 | ||
423 | pg_dir = pgd_offset_k(start_vaddr); | |
424 | ||
425 | #if PTRS_PER_PMD == 1 | |
426 | start_pmd = 0; | |
427 | #else | |
428 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | |
429 | #endif | |
430 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | |
431 | ||
432 | address = start_paddr; | |
433 | vaddr = start_vaddr; | |
434 | while (address < end_paddr) { | |
435 | #if PTRS_PER_PMD == 1 | |
436 | pmd = (pmd_t *)__pa(pg_dir); | |
437 | #else | |
438 | pmd = (pmd_t *)pgd_address(*pg_dir); | |
439 | ||
440 | /* | |
441 | * pmd is physical at this point | |
442 | */ | |
443 | ||
444 | if (!pmd) { | |
4fe9e1d9 | 445 | pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); |
d7dd2ff1 JB |
446 | pmd = (pmd_t *) __pa(pmd); |
447 | } | |
448 | ||
449 | pgd_populate(NULL, pg_dir, __va(pmd)); | |
450 | #endif | |
451 | pg_dir++; | |
452 | ||
453 | /* now change pmd to kernel virtual addresses */ | |
454 | ||
455 | pmd = (pmd_t *)__va(pmd) + start_pmd; | |
456 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { | |
457 | ||
458 | /* | |
459 | * pg_table is physical at this point | |
460 | */ | |
461 | ||
462 | pg_table = (pte_t *)pmd_address(*pmd); | |
463 | if (!pg_table) { | |
4fe9e1d9 | 464 | pg_table = (pte_t *) get_memblock(PAGE_SIZE); |
d7dd2ff1 JB |
465 | pg_table = (pte_t *) __pa(pg_table); |
466 | } | |
467 | ||
468 | pmd_populate_kernel(NULL, pmd, __va(pg_table)); | |
469 | ||
470 | /* now change pg_table to kernel virtual addresses */ | |
471 | ||
472 | pg_table = (pte_t *) __va(pg_table) + start_pte; | |
473 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { | |
474 | pte_t pte; | |
475 | ||
d7dd2ff1 JB |
476 | if (force) |
477 | pte = __mk_pte(address, pgprot); | |
41b85a11 | 478 | else if (parisc_text_address(vaddr)) { |
d7dd2ff1 | 479 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
41b85a11 HD |
480 | if (address >= ro_start && address < kernel_end) |
481 | pte = pte_mkhuge(pte); | |
482 | } | |
d7dd2ff1 JB |
483 | else |
484 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | |
41b85a11 HD |
485 | if (address >= ro_start && address < ro_end) { |
486 | pte = __mk_pte(address, PAGE_KERNEL_EXEC); | |
487 | pte = pte_mkhuge(pte); | |
488 | } else | |
d7dd2ff1 | 489 | #endif |
41b85a11 | 490 | { |
d7dd2ff1 | 491 | pte = __mk_pte(address, pgprot); |
41b85a11 HD |
492 | if (address >= ro_start && address < kernel_end) |
493 | pte = pte_mkhuge(pte); | |
494 | } | |
d7dd2ff1 | 495 | |
3c229b3f HD |
496 | if (address >= end_paddr) |
497 | break; | |
d7dd2ff1 JB |
498 | |
499 | set_pte(pg_table, pte); | |
500 | ||
501 | address += PAGE_SIZE; | |
502 | vaddr += PAGE_SIZE; | |
503 | } | |
504 | start_pte = 0; | |
505 | ||
506 | if (address >= end_paddr) | |
507 | break; | |
508 | } | |
509 | start_pmd = 0; | |
510 | } | |
511 | } | |
512 | ||
3847dab7 HD |
513 | void __init set_kernel_text_rw(int enable_read_write) |
514 | { | |
c9fa406f | 515 | unsigned long start = (unsigned long)__init_begin; |
3847dab7 HD |
516 | unsigned long end = (unsigned long)_etext; |
517 | ||
518 | map_pages(start, __pa(start), end-start, | |
519 | PAGE_KERNEL_RWX, enable_read_write ? 1:0); | |
520 | ||
c9fa406f JDA |
521 | /* force the kernel to see the new page table entries */ |
522 | flush_cache_all(); | |
523 | flush_tlb_all(); | |
3847dab7 HD |
524 | } |
525 | ||
8d73b180 | 526 | void __ref free_initmem(void) |
1da177e4 | 527 | { |
4fb11781 KM |
528 | unsigned long init_begin = (unsigned long)__init_begin; |
529 | unsigned long init_end = (unsigned long)__init_end; | |
1da177e4 | 530 | |
d7dd2ff1 JB |
531 | /* The init text pages are marked R-X. We have to |
532 | * flush the icache and mark them RW- | |
533 | * | |
534 | * This is tricky, because map_pages is in the init section. | |
535 | * Do a dummy remap of the data section first (the data | |
536 | * section is already PAGE_KERNEL) to pull in the TLB entries | |
537 | * for map_kernel */ | |
538 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | |
539 | PAGE_KERNEL_RWX, 1); | |
540 | /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute | |
541 | * map_pages */ | |
542 | map_pages(init_begin, __pa(init_begin), init_end - init_begin, | |
543 | PAGE_KERNEL, 1); | |
544 | ||
545 | /* force the kernel to see the new TLB entries */ | |
546 | __flush_tlb_range(0, init_begin, init_end); | |
41b85a11 | 547 | |
d7dd2ff1 JB |
548 | /* finally dump all the instructions which were cached, since the |
549 | * pages are no-longer executable */ | |
4fb11781 | 550 | flush_icache_range(init_begin, init_end); |
1da177e4 | 551 | |
41b85a11 | 552 | free_initmem_default(POISON_FREE_INITMEM); |
1da177e4 LT |
553 | |
554 | /* set up a new led state on systems shipped LED State panel */ | |
555 | pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); | |
1da177e4 LT |
556 | } |
557 | ||
1bcdd854 | 558 | |
0f5bf6d0 | 559 | #ifdef CONFIG_STRICT_KERNEL_RWX |
1bcdd854 HD |
560 | void mark_rodata_ro(void) |
561 | { | |
1bcdd854 HD |
562 | /* rodata memory was already mapped with KERNEL_RO access rights by |
563 | pagetable_init() and map_pages(). No need to do additional stuff here */ | |
564 | printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", | |
a581c2a4 | 565 | (unsigned long)(__end_rodata - __start_rodata) >> 10); |
1bcdd854 HD |
566 | } |
567 | #endif | |
568 | ||
569 | ||
1da177e4 LT |
570 | /* |
571 | * Just an arbitrary offset to serve as a "hole" between mapping areas | |
572 | * (between top of physical memory and a potential pcxl dma mapping | |
573 | * area, and below the vmalloc mapping area). | |
574 | * | |
575 | * The current 32K value just means that there will be a 32K "hole" | |
576 | * between mapping areas. That means that any out-of-bounds memory | |
577 | * accesses will hopefully be caught. The vmalloc() routines leaves | |
578 | * a hole of 4kB between each vmalloced area for the same reason. | |
579 | */ | |
580 | ||
581 | /* Leave room for gateway page expansion */ | |
582 | #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE | |
583 | #error KERNEL_MAP_START is in gateway reserved region | |
584 | #endif | |
585 | #define MAP_START (KERNEL_MAP_START) | |
586 | ||
587 | #define VM_MAP_OFFSET (32*1024) | |
588 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ | |
589 | & ~(VM_MAP_OFFSET-1))) | |
590 | ||
4255f0d2 HD |
591 | void *parisc_vmalloc_start __read_mostly; |
592 | EXPORT_SYMBOL(parisc_vmalloc_start); | |
1da177e4 LT |
593 | |
594 | #ifdef CONFIG_PA11 | |
8039de10 | 595 | unsigned long pcxl_dma_start __read_mostly; |
1da177e4 LT |
596 | #endif |
597 | ||
598 | void __init mem_init(void) | |
599 | { | |
d0cf62fb HD |
600 | /* Do sanity checks on IPC (compat) structures */ |
601 | BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48); | |
602 | #ifndef CONFIG_64BIT | |
603 | BUILD_BUG_ON(sizeof(struct semid64_ds) != 80); | |
604 | BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104); | |
605 | BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104); | |
606 | #endif | |
607 | #ifdef CONFIG_COMPAT | |
608 | BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm)); | |
609 | BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80); | |
610 | BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104); | |
611 | BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104); | |
612 | #endif | |
613 | ||
48d27cb2 HD |
614 | /* Do sanity checks on page table constants */ |
615 | BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t)); | |
616 | BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); | |
617 | BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); | |
618 | BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD | |
619 | > BITS_PER_LONG); | |
620 | ||
1da177e4 | 621 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
d5c017dd | 622 | set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); |
c6ffc5ca | 623 | memblock_free_all(); |
1da177e4 | 624 | |
1da177e4 | 625 | #ifdef CONFIG_PA11 |
a34a9b96 | 626 | if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { |
1da177e4 | 627 | pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); |
4255f0d2 HD |
628 | parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start |
629 | + PCXL_DMA_MAP_SIZE); | |
a34a9b96 | 630 | } else |
1da177e4 | 631 | #endif |
a34a9b96 | 632 | parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); |
1da177e4 | 633 | |
7d2c7747 | 634 | mem_init_print_info(NULL); |
fd8d0ca2 HD |
635 | |
636 | #if 0 | |
637 | /* | |
638 | * Do not expose the virtual kernel memory layout to userspace. | |
639 | * But keep code for debugging purposes. | |
640 | */ | |
ce8420bb | 641 | printk("virtual kernel memory layout:\n" |
63b2c373 HD |
642 | " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
643 | " memory : 0x%px - 0x%px (%4ld MB)\n" | |
644 | " .init : 0x%px - 0x%px (%4ld kB)\n" | |
645 | " .data : 0x%px - 0x%px (%4ld kB)\n" | |
646 | " .text : 0x%px - 0x%px (%4ld kB)\n", | |
ce8420bb HD |
647 | |
648 | (void*)VMALLOC_START, (void*)VMALLOC_END, | |
649 | (VMALLOC_END - VMALLOC_START) >> 20, | |
650 | ||
651 | __va(0), high_memory, | |
652 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
653 | ||
53faf291 KM |
654 | __init_begin, __init_end, |
655 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10, | |
ce8420bb | 656 | |
53faf291 KM |
657 | _etext, _edata, |
658 | ((unsigned long)_edata - (unsigned long)_etext) >> 10, | |
ce8420bb | 659 | |
53faf291 KM |
660 | _text, _etext, |
661 | ((unsigned long)_etext - (unsigned long)_text) >> 10); | |
ce8420bb | 662 | #endif |
1da177e4 LT |
663 | } |
664 | ||
8039de10 | 665 | unsigned long *empty_zero_page __read_mostly; |
22febf1f | 666 | EXPORT_SYMBOL(empty_zero_page); |
1da177e4 | 667 | |
1da177e4 LT |
668 | /* |
669 | * pagetable_init() sets up the page tables | |
670 | * | |
671 | * Note that gateway_init() places the Linux gateway page at page 0. | |
672 | * Since gateway pages cannot be dereferenced this has the desirable | |
673 | * side effect of trapping those pesky NULL-reference errors in the | |
674 | * kernel. | |
675 | */ | |
676 | static void __init pagetable_init(void) | |
677 | { | |
678 | int range; | |
679 | ||
680 | /* Map each physical memory range to its kernel vaddr */ | |
681 | ||
682 | for (range = 0; range < npmem_ranges; range++) { | |
683 | unsigned long start_paddr; | |
684 | unsigned long end_paddr; | |
685 | unsigned long size; | |
686 | ||
687 | start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT; | |
1da177e4 | 688 | size = pmem_ranges[range].pages << PAGE_SHIFT; |
41b85a11 | 689 | end_paddr = start_paddr + size; |
1da177e4 LT |
690 | |
691 | map_pages((unsigned long)__va(start_paddr), start_paddr, | |
d7dd2ff1 | 692 | size, PAGE_KERNEL, 0); |
1da177e4 LT |
693 | } |
694 | ||
695 | #ifdef CONFIG_BLK_DEV_INITRD | |
696 | if (initrd_end && initrd_end > mem_limit) { | |
1bcdd854 | 697 | printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end); |
1da177e4 | 698 | map_pages(initrd_start, __pa(initrd_start), |
d7dd2ff1 | 699 | initrd_end - initrd_start, PAGE_KERNEL, 0); |
1da177e4 LT |
700 | } |
701 | #endif | |
702 | ||
4fe9e1d9 | 703 | empty_zero_page = get_memblock(PAGE_SIZE); |
1da177e4 LT |
704 | } |
705 | ||
706 | static void __init gateway_init(void) | |
707 | { | |
708 | unsigned long linux_gateway_page_addr; | |
709 | /* FIXME: This is 'const' in order to trick the compiler | |
710 | into not treating it as DP-relative data. */ | |
711 | extern void * const linux_gateway_page; | |
712 | ||
713 | linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK; | |
714 | ||
715 | /* | |
716 | * Setup Linux Gateway page. | |
717 | * | |
718 | * The Linux gateway page will reside in kernel space (on virtual | |
719 | * page 0), so it doesn't need to be aliased into user space. | |
720 | */ | |
721 | ||
722 | map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), | |
d7dd2ff1 | 723 | PAGE_SIZE, PAGE_GATEWAY, 1); |
1da177e4 LT |
724 | } |
725 | ||
1da177e4 LT |
726 | void __init paging_init(void) |
727 | { | |
728 | int i; | |
729 | ||
730 | setup_bootmem(); | |
731 | pagetable_init(); | |
732 | gateway_init(); | |
733 | flush_cache_all_local(); /* start with known state */ | |
ce33941f | 734 | flush_tlb_all_local(NULL); |
1da177e4 LT |
735 | |
736 | for (i = 0; i < npmem_ranges; i++) { | |
f06a9684 | 737 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
1da177e4 | 738 | |
00592837 | 739 | zones_size[ZONE_NORMAL] = pmem_ranges[i].pages; |
1da177e4 LT |
740 | |
741 | #ifdef CONFIG_DISCONTIGMEM | |
742 | /* Need to initialize the pfnnid_map before we can initialize | |
743 | the zone */ | |
744 | { | |
745 | int j; | |
746 | for (j = (pmem_ranges[i].start_pfn >> PFNNID_SHIFT); | |
747 | j <= ((pmem_ranges[i].start_pfn + pmem_ranges[i].pages) >> PFNNID_SHIFT); | |
748 | j++) { | |
749 | pfnnid_map[j] = i; | |
750 | } | |
751 | } | |
752 | #endif | |
753 | ||
9109fb7b | 754 | free_area_init_node(i, zones_size, |
1da177e4 LT |
755 | pmem_ranges[i].start_pfn, NULL); |
756 | } | |
757 | } | |
758 | ||
759 | #ifdef CONFIG_PA20 | |
760 | ||
761 | /* | |
7022672e | 762 | * Currently, all PA20 chips have 18 bit protection IDs, which is the |
1da177e4 LT |
763 | * limiting factor (space ids are 32 bits). |
764 | */ | |
765 | ||
766 | #define NR_SPACE_IDS 262144 | |
767 | ||
768 | #else | |
769 | ||
770 | /* | |
7022672e SA |
771 | * Currently we have a one-to-one relationship between space IDs and |
772 | * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only | |
773 | * support 15 bit protection IDs, so that is the limiting factor. | |
774 | * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's | |
1da177e4 LT |
775 | * probably not worth the effort for a special case here. |
776 | */ | |
777 | ||
778 | #define NR_SPACE_IDS 32768 | |
779 | ||
780 | #endif /* !CONFIG_PA20 */ | |
781 | ||
782 | #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2) | |
783 | #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long))) | |
784 | ||
785 | static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */ | |
786 | static unsigned long dirty_space_id[SID_ARRAY_SIZE]; | |
787 | static unsigned long space_id_index; | |
788 | static unsigned long free_space_ids = NR_SPACE_IDS - 1; | |
789 | static unsigned long dirty_space_ids = 0; | |
790 | ||
791 | static DEFINE_SPINLOCK(sid_lock); | |
792 | ||
793 | unsigned long alloc_sid(void) | |
794 | { | |
795 | unsigned long index; | |
796 | ||
797 | spin_lock(&sid_lock); | |
798 | ||
799 | if (free_space_ids == 0) { | |
800 | if (dirty_space_ids != 0) { | |
801 | spin_unlock(&sid_lock); | |
802 | flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */ | |
803 | spin_lock(&sid_lock); | |
804 | } | |
2fd83038 | 805 | BUG_ON(free_space_ids == 0); |
1da177e4 LT |
806 | } |
807 | ||
808 | free_space_ids--; | |
809 | ||
810 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); | |
811 | space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); | |
812 | space_id_index = index; | |
813 | ||
814 | spin_unlock(&sid_lock); | |
815 | ||
816 | return index << SPACEID_SHIFT; | |
817 | } | |
818 | ||
819 | void free_sid(unsigned long spaceid) | |
820 | { | |
821 | unsigned long index = spaceid >> SPACEID_SHIFT; | |
822 | unsigned long *dirty_space_offset; | |
823 | ||
824 | dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); | |
825 | index &= (BITS_PER_LONG - 1); | |
826 | ||
827 | spin_lock(&sid_lock); | |
828 | ||
2fd83038 | 829 | BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ |
1da177e4 LT |
830 | |
831 | *dirty_space_offset |= (1L << index); | |
832 | dirty_space_ids++; | |
833 | ||
834 | spin_unlock(&sid_lock); | |
835 | } | |
836 | ||
837 | ||
838 | #ifdef CONFIG_SMP | |
839 | static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array) | |
840 | { | |
841 | int i; | |
842 | ||
843 | /* NOTE: sid_lock must be held upon entry */ | |
844 | ||
845 | *ndirtyptr = dirty_space_ids; | |
846 | if (dirty_space_ids != 0) { | |
847 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
848 | dirty_array[i] = dirty_space_id[i]; | |
849 | dirty_space_id[i] = 0; | |
850 | } | |
851 | dirty_space_ids = 0; | |
852 | } | |
853 | ||
854 | return; | |
855 | } | |
856 | ||
857 | static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array) | |
858 | { | |
859 | int i; | |
860 | ||
861 | /* NOTE: sid_lock must be held upon entry */ | |
862 | ||
863 | if (ndirty != 0) { | |
864 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
865 | space_id[i] ^= dirty_array[i]; | |
866 | } | |
867 | ||
868 | free_space_ids += ndirty; | |
869 | space_id_index = 0; | |
870 | } | |
871 | } | |
872 | ||
873 | #else /* CONFIG_SMP */ | |
874 | ||
875 | static void recycle_sids(void) | |
876 | { | |
877 | int i; | |
878 | ||
879 | /* NOTE: sid_lock must be held upon entry */ | |
880 | ||
881 | if (dirty_space_ids != 0) { | |
882 | for (i = 0; i < SID_ARRAY_SIZE; i++) { | |
883 | space_id[i] ^= dirty_space_id[i]; | |
884 | dirty_space_id[i] = 0; | |
885 | } | |
886 | ||
887 | free_space_ids += dirty_space_ids; | |
888 | dirty_space_ids = 0; | |
889 | space_id_index = 0; | |
890 | } | |
891 | } | |
892 | #endif | |
893 | ||
894 | /* | |
895 | * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is | |
896 | * purged, we can safely reuse the space ids that were released but | |
897 | * not flushed from the tlb. | |
898 | */ | |
899 | ||
900 | #ifdef CONFIG_SMP | |
901 | ||
902 | static unsigned long recycle_ndirty; | |
903 | static unsigned long recycle_dirty_array[SID_ARRAY_SIZE]; | |
2fd83038 | 904 | static unsigned int recycle_inuse; |
1da177e4 LT |
905 | |
906 | void flush_tlb_all(void) | |
907 | { | |
908 | int do_recycle; | |
909 | ||
416821d3 | 910 | __inc_irq_stat(irq_tlb_count); |
1da177e4 LT |
911 | do_recycle = 0; |
912 | spin_lock(&sid_lock); | |
913 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | |
2fd83038 | 914 | BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */ |
1da177e4 LT |
915 | get_dirty_sids(&recycle_ndirty,recycle_dirty_array); |
916 | recycle_inuse++; | |
917 | do_recycle++; | |
918 | } | |
919 | spin_unlock(&sid_lock); | |
15c8b6c1 | 920 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
1da177e4 LT |
921 | if (do_recycle) { |
922 | spin_lock(&sid_lock); | |
923 | recycle_sids(recycle_ndirty,recycle_dirty_array); | |
924 | recycle_inuse = 0; | |
925 | spin_unlock(&sid_lock); | |
926 | } | |
927 | } | |
928 | #else | |
929 | void flush_tlb_all(void) | |
930 | { | |
416821d3 | 931 | __inc_irq_stat(irq_tlb_count); |
1da177e4 | 932 | spin_lock(&sid_lock); |
1b2425e3 | 933 | flush_tlb_all_local(NULL); |
1da177e4 LT |
934 | recycle_sids(); |
935 | spin_unlock(&sid_lock); | |
936 | } | |
937 | #endif | |
938 | ||
939 | #ifdef CONFIG_BLK_DEV_INITRD | |
940 | void free_initrd_mem(unsigned long start, unsigned long end) | |
941 | { | |
7d2c7747 | 942 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
1da177e4 LT |
943 | } |
944 | #endif |