Revert "vfs: Delete the associated dentry when deleting a file"
[linux-2.6-block.git] / arch / parisc / mm / init.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * linux/arch/parisc/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright 1999 SuSE GmbH
7 * changed by Philipp Rumpf
8 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
9 * Copyright 2004 Randolph Chung (tausq@debian.org)
a8f44e38 10 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
1da177e4
LT
11 *
12 */
13
1da177e4
LT
14
15#include <linux/module.h>
16#include <linux/mm.h>
4fe9e1d9 17#include <linux/memblock.h>
5a0e3ad6 18#include <linux/gfp.h>
1da177e4
LT
19#include <linux/delay.h>
20#include <linux/init.h>
1da177e4
LT
21#include <linux/initrd.h>
22#include <linux/swap.h>
23#include <linux/unistd.h>
24#include <linux/nodemask.h> /* for node_online_map */
ea1754a0 25#include <linux/pagemap.h> /* for release_pages */
d0cf62fb 26#include <linux/compat.h>
0cc2dc49 27#include <linux/execmem.h>
1da177e4
LT
28
29#include <asm/pgalloc.h>
30#include <asm/tlb.h>
31#include <asm/pdc_chassis.h>
32#include <asm/mmzone.h>
a581c2a4 33#include <asm/sections.h>
d0cf62fb 34#include <asm/msgbuf.h>
dbdf0760 35#include <asm/sparsemem.h>
e5ef93d0 36#include <asm/asm-offsets.h>
bc46ef3c 37#include <asm/shmbuf.h>
1da177e4 38
1da177e4 39extern int data_start;
161bd3bf 40extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
1da177e4 41
f24ffde4 42#if CONFIG_PGTABLE_LEVELS == 3
33def849 43pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
c39f52a9
TG
44#endif
45
33def849
JP
46pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
47pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
c39f52a9 48
1da177e4
LT
49static struct resource data_resource = {
50 .name = "Kernel data",
35d98e93 51 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
1da177e4
LT
52};
53
54static struct resource code_resource = {
55 .name = "Kernel code",
35d98e93 56 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
1da177e4
LT
57};
58
59static struct resource pdcdata_resource = {
60 .name = "PDC data (Page Zero)",
61 .start = 0,
62 .end = 0x9ff,
63 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
64};
65
4e617c86 66static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
1da177e4
LT
67
68/* The following array is initialized from the firmware specific
69 * information retrieved in kernel/inventory.c.
70 */
71
dbdf0760
HD
72physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
73int npmem_ranges __initdata;
1da177e4 74
a8f44e38 75#ifdef CONFIG_64BIT
dbdf0760 76#define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
a8f44e38 77#else /* !CONFIG_64BIT */
1da177e4 78#define MAX_MEM (3584U*1024U*1024U)
a8f44e38 79#endif /* !CONFIG_64BIT */
1da177e4 80
8039de10 81static unsigned long mem_limit __read_mostly = MAX_MEM;
1da177e4
LT
82
83static void __init mem_limit_func(void)
84{
85 char *cp, *end;
86 unsigned long limit;
1da177e4
LT
87
88 /* We need this before __setup() functions are called */
89
90 limit = MAX_MEM;
668f9931 91 for (cp = boot_command_line; *cp; ) {
1da177e4
LT
92 if (memcmp(cp, "mem=", 4) == 0) {
93 cp += 4;
94 limit = memparse(cp, &end);
95 if (end != cp)
96 break;
97 cp = end;
98 } else {
99 while (*cp != ' ' && *cp)
100 ++cp;
101 while (*cp == ' ')
102 ++cp;
103 }
104 }
105
106 if (limit < mem_limit)
107 mem_limit = limit;
108}
109
110#define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
111
112static void __init setup_bootmem(void)
113{
1da177e4 114 unsigned long mem_max;
dbdf0760 115#ifndef CONFIG_SPARSEMEM
1da177e4
LT
116 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
117 int npmem_holes;
118#endif
119 int i, sysram_resource_count;
120
121 disable_sr_hashing(); /* Turn off space register hashing */
122
123 /*
124 * Sort the ranges. Since the number of ranges is typically
125 * small, and performance is not an issue here, just do
126 * a simple insertion sort.
127 */
128
129 for (i = 1; i < npmem_ranges; i++) {
130 int j;
131
132 for (j = i; j > 0; j--) {
1da177e4
LT
133 if (pmem_ranges[j-1].start_pfn <
134 pmem_ranges[j].start_pfn) {
135
136 break;
137 }
1ae8e91e 138 swap(pmem_ranges[j-1], pmem_ranges[j]);
1da177e4
LT
139 }
140 }
141
dbdf0760 142#ifndef CONFIG_SPARSEMEM
1da177e4
LT
143 /*
144 * Throw out ranges that are too far apart (controlled by
145 * MAX_GAP).
146 */
147
148 for (i = 1; i < npmem_ranges; i++) {
149 if (pmem_ranges[i].start_pfn -
150 (pmem_ranges[i-1].start_pfn +
151 pmem_ranges[i-1].pages) > MAX_GAP) {
152 npmem_ranges = i;
153 printk("Large gap in memory detected (%ld pages). "
dbdf0760 154 "Consider turning on CONFIG_SPARSEMEM\n",
1da177e4
LT
155 pmem_ranges[i].start_pfn -
156 (pmem_ranges[i-1].start_pfn +
157 pmem_ranges[i-1].pages));
158 break;
159 }
160 }
161#endif
162
4fe9e1d9
HD
163 /* Print the memory ranges */
164 pr_info("Memory Ranges:\n");
1da177e4 165
4fe9e1d9
HD
166 for (i = 0; i < npmem_ranges; i++) {
167 struct resource *res = &sysram_resources[i];
168 unsigned long start;
169 unsigned long size;
1da177e4 170
4fe9e1d9
HD
171 size = (pmem_ranges[i].pages << PAGE_SHIFT);
172 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
173 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
174 i, start, start + (size - 1), size >> 20);
1da177e4 175
4fe9e1d9 176 /* request memory resource */
1da177e4 177 res->name = "System RAM";
4fe9e1d9
HD
178 res->start = start;
179 res->end = start + size - 1;
35d98e93 180 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1da177e4
LT
181 request_resource(&iomem_resource, res);
182 }
183
4fe9e1d9
HD
184 sysram_resource_count = npmem_ranges;
185
1da177e4
LT
186 /*
187 * For 32 bit kernels we limit the amount of memory we can
188 * support, in order to preserve enough kernel address space
189 * for other purposes. For 64 bit kernels we don't normally
190 * limit the memory, but this mechanism can be used to
191 * artificially limit the amount of memory (and it is written
192 * to work with multiple memory ranges).
193 */
194
195 mem_limit_func(); /* check for "mem=" argument */
196
197 mem_max = 0;
1da177e4
LT
198 for (i = 0; i < npmem_ranges; i++) {
199 unsigned long rsize;
200
201 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
202 if ((mem_max + rsize) > mem_limit) {
203 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
204 if (mem_max == mem_limit)
205 npmem_ranges = i;
206 else {
207 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
208 - (mem_max >> PAGE_SHIFT);
209 npmem_ranges = i + 1;
210 mem_max = mem_limit;
211 }
1da177e4
LT
212 break;
213 }
1da177e4
LT
214 mem_max += rsize;
215 }
216
217 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
218
dbdf0760 219#ifndef CONFIG_SPARSEMEM
1da177e4 220 /* Merge the ranges, keeping track of the holes */
1da177e4
LT
221 {
222 unsigned long end_pfn;
223 unsigned long hole_pages;
224
225 npmem_holes = 0;
226 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
227 for (i = 1; i < npmem_ranges; i++) {
228
229 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
230 if (hole_pages) {
231 pmem_holes[npmem_holes].start_pfn = end_pfn;
232 pmem_holes[npmem_holes++].pages = hole_pages;
233 end_pfn += hole_pages;
234 }
235 end_pfn += pmem_ranges[i].pages;
236 }
237
238 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
239 npmem_ranges = 1;
240 }
241#endif
242
1da177e4
LT
243 /*
244 * Initialize and free the full range of memory in each range.
1da177e4
LT
245 */
246
1da177e4
LT
247 max_pfn = 0;
248 for (i = 0; i < npmem_ranges; i++) {
249 unsigned long start_pfn;
250 unsigned long npages;
4fe9e1d9
HD
251 unsigned long start;
252 unsigned long size;
1da177e4
LT
253
254 start_pfn = pmem_ranges[i].start_pfn;
255 npages = pmem_ranges[i].pages;
256
4fe9e1d9
HD
257 start = start_pfn << PAGE_SHIFT;
258 size = npages << PAGE_SHIFT;
259
260 /* add system RAM memblock */
261 memblock_add(start, size);
262
1da177e4
LT
263 if ((start_pfn + npages) > max_pfn)
264 max_pfn = start_pfn + npages;
265 }
266
6a528001
MR
267 /*
268 * We can't use memblock top-down allocations because we only
269 * created the initial mapping up to KERNEL_INITIAL_SIZE in
270 * the assembly bootup code.
271 */
272 memblock_set_bottom_up(true);
273
5cdb8205
GG
274 /* IOMMU is always used to access "high mem" on those boxes
275 * that can support enough mem that a PCI device couldn't
276 * directly DMA to any physical addresses.
277 * ISA DMA support will need to revisit this.
278 */
279 max_low_pfn = max_pfn;
280
1da177e4
LT
281 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
282
283#define PDC_CONSOLE_IO_IODC_SIZE 32768
284
4fe9e1d9
HD
285 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
286 PDC_CONSOLE_IO_IODC_SIZE));
287 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
288 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
1da177e4 289
dbdf0760 290#ifndef CONFIG_SPARSEMEM
1da177e4
LT
291
292 /* reserve the holes */
293
294 for (i = 0; i < npmem_holes; i++) {
4fe9e1d9
HD
295 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
296 (pmem_holes[i].pages << PAGE_SHIFT));
1da177e4
LT
297 }
298#endif
299
300#ifdef CONFIG_BLK_DEV_INITRD
301 if (initrd_start) {
302 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
303 if (__pa(initrd_start) < mem_max) {
304 unsigned long initrd_reserve;
305
306 if (__pa(initrd_end) > mem_max) {
307 initrd_reserve = mem_max - __pa(initrd_start);
308 } else {
309 initrd_reserve = initrd_end - initrd_start;
310 }
311 initrd_below_start_ok = 1;
312 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
313
4fe9e1d9 314 memblock_reserve(__pa(initrd_start), initrd_reserve);
1da177e4
LT
315 }
316 }
317#endif
318
319 data_resource.start = virt_to_phys(&data_start);
c51d476a
KM
320 data_resource.end = virt_to_phys(_end) - 1;
321 code_resource.start = virt_to_phys(_text);
1da177e4
LT
322 code_resource.end = virt_to_phys(&data_start)-1;
323
324 /* We don't know which region the kernel will be in, so try
325 * all of them.
326 */
327 for (i = 0; i < sysram_resource_count; i++) {
328 struct resource *res = &sysram_resources[i];
329 request_resource(res, &code_resource);
330 request_resource(res, &data_resource);
331 }
332 request_resource(&sysram_resources[0], &pdcdata_resource);
c9c2877d
HD
333
334 /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
335 pdc_pdt_init();
dbdf0760
HD
336
337 memblock_allow_resize();
338 memblock_dump_all();
1da177e4
LT
339}
340
8d0e051c 341static bool kernel_set_to_readonly;
161bd3bf 342
9129886b
JDA
343static void __ref map_pages(unsigned long start_vaddr,
344 unsigned long start_paddr, unsigned long size,
345 pgprot_t pgprot, int force)
d7dd2ff1 346{
d7dd2ff1
JB
347 pmd_t *pmd;
348 pte_t *pg_table;
349 unsigned long end_paddr;
350 unsigned long start_pmd;
351 unsigned long start_pte;
352 unsigned long tmp1;
353 unsigned long tmp2;
354 unsigned long address;
355 unsigned long vaddr;
356 unsigned long ro_start;
357 unsigned long ro_end;
8d0e051c 358 unsigned long kernel_start, kernel_end;
d7dd2ff1
JB
359
360 ro_start = __pa((unsigned long)_text);
361 ro_end = __pa((unsigned long)&data_start);
8d0e051c 362 kernel_start = __pa((unsigned long)&__init_begin);
41b85a11 363 kernel_end = __pa((unsigned long)&_end);
d7dd2ff1
JB
364
365 end_paddr = start_paddr + size;
366
8121fbc4 367 /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
d7dd2ff1 368 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
d7dd2ff1
JB
369 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
370
371 address = start_paddr;
372 vaddr = start_vaddr;
373 while (address < end_paddr) {
8121fbc4
MR
374 pgd_t *pgd = pgd_offset_k(vaddr);
375 p4d_t *p4d = p4d_offset(pgd, vaddr);
376 pud_t *pud = pud_offset(p4d, vaddr);
d7dd2ff1 377
8121fbc4
MR
378#if CONFIG_PGTABLE_LEVELS == 3
379 if (pud_none(*pud)) {
7bf82eb3
MWO
380 pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
381 PAGE_SIZE << PMD_TABLE_ORDER);
6a528001
MR
382 if (!pmd)
383 panic("pmd allocation failed.\n");
8121fbc4 384 pud_populate(NULL, pud, pmd);
d7dd2ff1 385 }
d7dd2ff1 386#endif
d7dd2ff1 387
8121fbc4 388 pmd = pmd_offset(pud, vaddr);
d7dd2ff1 389 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
8121fbc4
MR
390 if (pmd_none(*pmd)) {
391 pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
6a528001
MR
392 if (!pg_table)
393 panic("page table allocation failed\n");
8121fbc4 394 pmd_populate_kernel(NULL, pmd, pg_table);
d7dd2ff1
JB
395 }
396
8121fbc4 397 pg_table = pte_offset_kernel(pmd, vaddr);
d7dd2ff1
JB
398 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
399 pte_t pte;
8d0e051c
HD
400 pgprot_t prot;
401 bool huge = false;
402
403 if (force) {
404 prot = pgprot;
405 } else if (address < kernel_start || address >= kernel_end) {
406 /* outside kernel memory */
407 prot = PAGE_KERNEL;
408 } else if (!kernel_set_to_readonly) {
409 /* still initializing, allow writing to RO memory */
410 prot = PAGE_KERNEL_RWX;
411 huge = true;
412 } else if (address >= ro_start) {
413 /* Code (ro) and Data areas */
414 prot = (address < ro_end) ?
415 PAGE_KERNEL_EXEC : PAGE_KERNEL;
416 huge = true;
417 } else {
418 prot = PAGE_KERNEL;
41b85a11 419 }
8d0e051c
HD
420
421 pte = __mk_pte(address, prot);
422 if (huge)
41b85a11 423 pte = pte_mkhuge(pte);
d7dd2ff1 424
3c229b3f
HD
425 if (address >= end_paddr)
426 break;
d7dd2ff1
JB
427
428 set_pte(pg_table, pte);
429
430 address += PAGE_SIZE;
431 vaddr += PAGE_SIZE;
432 }
433 start_pte = 0;
434
435 if (address >= end_paddr)
436 break;
437 }
438 start_pmd = 0;
439 }
440}
441
3847dab7
HD
442void __init set_kernel_text_rw(int enable_read_write)
443{
d19a1290 444 unsigned long start = (unsigned long) __init_begin;
dfbaecb2 445 unsigned long end = (unsigned long) &data_start;
3847dab7
HD
446
447 map_pages(start, __pa(start), end-start,
448 PAGE_KERNEL_RWX, enable_read_write ? 1:0);
449
c9fa406f
JDA
450 /* force the kernel to see the new page table entries */
451 flush_cache_all();
452 flush_tlb_all();
3847dab7
HD
453}
454
9129886b 455void free_initmem(void)
1da177e4 456{
4fb11781
KM
457 unsigned long init_begin = (unsigned long)__init_begin;
458 unsigned long init_end = (unsigned long)__init_end;
8d0e051c
HD
459 unsigned long kernel_end = (unsigned long)&_end;
460
461 /* Remap kernel text and data, but do not touch init section yet. */
462 kernel_set_to_readonly = true;
463 map_pages(init_end, __pa(init_end), kernel_end - init_end,
464 PAGE_KERNEL, 0);
1da177e4 465
d7dd2ff1
JB
466 /* The init text pages are marked R-X. We have to
467 * flush the icache and mark them RW-
468 *
d7dd2ff1
JB
469 * Do a dummy remap of the data section first (the data
470 * section is already PAGE_KERNEL) to pull in the TLB entries
471 * for map_kernel */
472 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
473 PAGE_KERNEL_RWX, 1);
474 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
475 * map_pages */
476 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
477 PAGE_KERNEL, 1);
478
479 /* force the kernel to see the new TLB entries */
8d0e051c 480 __flush_tlb_range(0, init_begin, kernel_end);
41b85a11 481
d7dd2ff1
JB
482 /* finally dump all the instructions which were cached, since the
483 * pages are no-longer executable */
4fb11781 484 flush_icache_range(init_begin, init_end);
0cc2dc49 485
41b85a11 486 free_initmem_default(POISON_FREE_INITMEM);
1da177e4
LT
487
488 /* set up a new led state on systems shipped LED State panel */
489 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
1da177e4
LT
490}
491
1bcdd854 492
0f5bf6d0 493#ifdef CONFIG_STRICT_KERNEL_RWX
1bcdd854
HD
494void mark_rodata_ro(void)
495{
1bcdd854
HD
496 /* rodata memory was already mapped with KERNEL_RO access rights by
497 pagetable_init() and map_pages(). No need to do additional stuff here */
8d0e051c
HD
498 unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
499
500 pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
1bcdd854
HD
501}
502#endif
503
504
1da177e4
LT
505/*
506 * Just an arbitrary offset to serve as a "hole" between mapping areas
507 * (between top of physical memory and a potential pcxl dma mapping
508 * area, and below the vmalloc mapping area).
509 *
510 * The current 32K value just means that there will be a 32K "hole"
511 * between mapping areas. That means that any out-of-bounds memory
512 * accesses will hopefully be caught. The vmalloc() routines leaves
513 * a hole of 4kB between each vmalloced area for the same reason.
514 */
515
516 /* Leave room for gateway page expansion */
517#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
518#error KERNEL_MAP_START is in gateway reserved region
519#endif
520#define MAP_START (KERNEL_MAP_START)
521
522#define VM_MAP_OFFSET (32*1024)
523#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
524 & ~(VM_MAP_OFFSET-1)))
525
4e617c86 526void *parisc_vmalloc_start __ro_after_init;
4255f0d2 527EXPORT_SYMBOL(parisc_vmalloc_start);
1da177e4 528
1da177e4
LT
529void __init mem_init(void)
530{
d0cf62fb
HD
531 /* Do sanity checks on IPC (compat) structures */
532 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
533#ifndef CONFIG_64BIT
534 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
535 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
536 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
537#endif
538#ifdef CONFIG_COMPAT
539 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
540 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
541 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
542 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
543#endif
544
48d27cb2
HD
545 /* Do sanity checks on page table constants */
546 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
547 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
548 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
549 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
550 > BITS_PER_LONG);
b7795074
HD
551#if CONFIG_PGTABLE_LEVELS == 3
552 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
553#else
554 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
555#endif
48d27cb2 556
44eeb9b5
HD
557#ifdef CONFIG_64BIT
558 /* avoid ldil_%L() asm statements to sign-extend into upper 32-bits */
559 BUILD_BUG_ON(__PAGE_OFFSET >= 0x80000000);
560 BUILD_BUG_ON(TMPALIAS_MAP_START >= 0x80000000);
561#endif
562
1da177e4 563 high_memory = __va((max_pfn << PAGE_SHIFT));
bf71bc16 564 set_max_mapnr(max_low_pfn);
c6ffc5ca 565 memblock_free_all();
1da177e4 566
1da177e4 567#ifdef CONFIG_PA11
a34a9b96 568 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
1da177e4 569 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
4255f0d2
HD
570 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
571 + PCXL_DMA_MAP_SIZE);
a34a9b96 572 } else
1da177e4 573#endif
a34a9b96 574 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
1da177e4 575
fd8d0ca2
HD
576#if 0
577 /*
578 * Do not expose the virtual kernel memory layout to userspace.
579 * But keep code for debugging purposes.
580 */
ce8420bb 581 printk("virtual kernel memory layout:\n"
ccfbc68d
SS
582 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
583 " fixmap : 0x%px - 0x%px (%4ld kB)\n"
584 " memory : 0x%px - 0x%px (%4ld MB)\n"
585 " .init : 0x%px - 0x%px (%4ld kB)\n"
586 " .data : 0x%px - 0x%px (%4ld kB)\n"
587 " .text : 0x%px - 0x%px (%4ld kB)\n",
ce8420bb
HD
588
589 (void*)VMALLOC_START, (void*)VMALLOC_END,
590 (VMALLOC_END - VMALLOC_START) >> 20,
591
ccfbc68d
SS
592 (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
593 (unsigned long)(FIXMAP_SIZE / 1024),
594
ce8420bb
HD
595 __va(0), high_memory,
596 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
597
53faf291
KM
598 __init_begin, __init_end,
599 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
ce8420bb 600
53faf291
KM
601 _etext, _edata,
602 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
ce8420bb 603
53faf291
KM
604 _text, _etext,
605 ((unsigned long)_etext - (unsigned long)_text) >> 10);
ce8420bb 606#endif
1da177e4
LT
607}
608
4e617c86 609unsigned long *empty_zero_page __ro_after_init;
22febf1f 610EXPORT_SYMBOL(empty_zero_page);
1da177e4 611
1da177e4
LT
612/*
613 * pagetable_init() sets up the page tables
614 *
615 * Note that gateway_init() places the Linux gateway page at page 0.
616 * Since gateway pages cannot be dereferenced this has the desirable
617 * side effect of trapping those pesky NULL-reference errors in the
618 * kernel.
619 */
620static void __init pagetable_init(void)
621{
622 int range;
623
624 /* Map each physical memory range to its kernel vaddr */
625
626 for (range = 0; range < npmem_ranges; range++) {
627 unsigned long start_paddr;
1da177e4
LT
628 unsigned long size;
629
630 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
1da177e4
LT
631 size = pmem_ranges[range].pages << PAGE_SHIFT;
632
633 map_pages((unsigned long)__va(start_paddr), start_paddr,
d7dd2ff1 634 size, PAGE_KERNEL, 0);
1da177e4
LT
635 }
636
637#ifdef CONFIG_BLK_DEV_INITRD
638 if (initrd_end && initrd_end > mem_limit) {
1bcdd854 639 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
1da177e4 640 map_pages(initrd_start, __pa(initrd_start),
d7dd2ff1 641 initrd_end - initrd_start, PAGE_KERNEL, 0);
1da177e4
LT
642 }
643#endif
644
6a528001
MR
645 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
646 if (!empty_zero_page)
647 panic("zero page allocation failed.\n");
648
1da177e4
LT
649}
650
651static void __init gateway_init(void)
652{
653 unsigned long linux_gateway_page_addr;
654 /* FIXME: This is 'const' in order to trick the compiler
655 into not treating it as DP-relative data. */
656 extern void * const linux_gateway_page;
657
658 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
659
660 /*
661 * Setup Linux Gateway page.
662 *
663 * The Linux gateway page will reside in kernel space (on virtual
664 * page 0), so it doesn't need to be aliased into user space.
665 */
666
667 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
d7dd2ff1 668 PAGE_SIZE, PAGE_GATEWAY, 1);
1da177e4
LT
669}
670
c2ff2b73
MRI
671static void __init fixmap_init(void)
672{
673 unsigned long addr = FIXMAP_START;
674 unsigned long end = FIXMAP_START + FIXMAP_SIZE;
675 pgd_t *pgd = pgd_offset_k(addr);
676 p4d_t *p4d = p4d_offset(pgd, addr);
677 pud_t *pud = pud_offset(p4d, addr);
678 pmd_t *pmd;
679
680 BUILD_BUG_ON(FIXMAP_SIZE > PMD_SIZE);
681
682#if CONFIG_PGTABLE_LEVELS == 3
683 if (pud_none(*pud)) {
684 pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
685 PAGE_SIZE << PMD_TABLE_ORDER);
686 if (!pmd)
687 panic("fixmap: pmd allocation failed.\n");
688 pud_populate(NULL, pud, pmd);
689 }
690#endif
691
692 pmd = pmd_offset(pud, addr);
693 do {
694 pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
695 if (!pte)
696 panic("fixmap: pte allocation failed.\n");
697
698 pmd_populate_kernel(&init_mm, pmd, pte);
699
700 addr += PAGE_SIZE;
701 } while (addr < end);
702}
703
dbdf0760 704static void __init parisc_bootmem_free(void)
1da177e4 705{
625bf73e 706 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
1da177e4 707
625bf73e 708 max_zone_pfn[0] = memblock_end_of_DRAM();
dbdf0760 709
625bf73e 710 free_area_init(max_zone_pfn);
dbdf0760
HD
711}
712
713void __init paging_init(void)
714{
1da177e4
LT
715 setup_bootmem();
716 pagetable_init();
717 gateway_init();
c2ff2b73 718 fixmap_init();
1da177e4 719 flush_cache_all_local(); /* start with known state */
ce33941f 720 flush_tlb_all_local(NULL);
1da177e4 721
dbdf0760
HD
722 sparse_init();
723 parisc_bootmem_free();
1da177e4
LT
724}
725
e5ef93d0
HD
726static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
727 unsigned long entry_info)
728{
729 const int slot_max = btlb_info.fixed_range_info.num_comb;
730 int min_num_pages = btlb_info.min_size;
731 unsigned long size;
732
733 /* map at minimum 4 pages */
734 if (min_num_pages < 4)
735 min_num_pages = 4;
736
737 size = HUGEPAGE_SIZE;
738 while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
739 /* starting address must have same alignment as size! */
740 /* if correctly aligned and fits in double size, increase */
741 if (((start & (2 * size - 1)) == 0) &&
742 (end - start) >= (2 * size)) {
743 size <<= 1;
744 continue;
745 }
746 /* if current size alignment is too big, try smaller size */
747 if ((start & (size - 1)) != 0) {
748 size >>= 1;
749 continue;
750 }
751 if ((end - start) >= size) {
752 if ((size >> PAGE_SHIFT) >= min_num_pages)
753 pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
754 size >> PAGE_SHIFT, entry_info, *slot);
755 (*slot)++;
756 start += size;
757 continue;
758 }
759 size /= 2;
760 continue;
761 }
762}
763
764void btlb_init_per_cpu(void)
765{
766 unsigned long s, t, e;
767 int slot;
768
769 /* BTLBs are not available on 64-bit CPUs */
770 if (IS_ENABLED(CONFIG_PA20))
771 return;
772 else if (pdc_btlb_info(&btlb_info) < 0) {
773 memset(&btlb_info, 0, sizeof btlb_info);
774 }
775
776 /* insert BLTLBs for code and data segments */
777 s = (uintptr_t) dereference_function_descriptor(&_stext);
778 e = (uintptr_t) dereference_function_descriptor(&_etext);
779 t = (uintptr_t) dereference_function_descriptor(&_sdata);
780 BUG_ON(t != e);
781
782 /* code segments */
783 slot = 0;
784 alloc_btlb(s, e, &slot, 0x13800000);
785
786 /* sanity check */
787 t = (uintptr_t) dereference_function_descriptor(&_edata);
788 e = (uintptr_t) dereference_function_descriptor(&__bss_start);
789 BUG_ON(t != e);
790
791 /* data segments */
792 s = (uintptr_t) dereference_function_descriptor(&_sdata);
793 e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
794 alloc_btlb(s, e, &slot, 0x11800000);
795}
796
1da177e4
LT
797#ifdef CONFIG_PA20
798
799/*
7022672e 800 * Currently, all PA20 chips have 18 bit protection IDs, which is the
1da177e4
LT
801 * limiting factor (space ids are 32 bits).
802 */
803
804#define NR_SPACE_IDS 262144
805
806#else
807
808/*
7022672e
SA
809 * Currently we have a one-to-one relationship between space IDs and
810 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
811 * support 15 bit protection IDs, so that is the limiting factor.
812 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
1da177e4
LT
813 * probably not worth the effort for a special case here.
814 */
815
816#define NR_SPACE_IDS 32768
817
818#endif /* !CONFIG_PA20 */
819
820#define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
821#define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
822
823static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
824static unsigned long dirty_space_id[SID_ARRAY_SIZE];
825static unsigned long space_id_index;
826static unsigned long free_space_ids = NR_SPACE_IDS - 1;
cbe263b6 827static unsigned long dirty_space_ids;
1da177e4
LT
828
829static DEFINE_SPINLOCK(sid_lock);
830
831unsigned long alloc_sid(void)
832{
833 unsigned long index;
834
835 spin_lock(&sid_lock);
836
837 if (free_space_ids == 0) {
838 if (dirty_space_ids != 0) {
839 spin_unlock(&sid_lock);
840 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
841 spin_lock(&sid_lock);
842 }
2fd83038 843 BUG_ON(free_space_ids == 0);
1da177e4
LT
844 }
845
846 free_space_ids--;
847
848 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
208151bf 849 space_id[BIT_WORD(index)] |= BIT_MASK(index);
1da177e4
LT
850 space_id_index = index;
851
852 spin_unlock(&sid_lock);
853
854 return index << SPACEID_SHIFT;
855}
856
857void free_sid(unsigned long spaceid)
858{
859 unsigned long index = spaceid >> SPACEID_SHIFT;
208151bf 860 unsigned long *dirty_space_offset, mask;
1da177e4 861
208151bf
HD
862 dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
863 mask = BIT_MASK(index);
1da177e4
LT
864
865 spin_lock(&sid_lock);
866
208151bf 867 BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
1da177e4 868
208151bf 869 *dirty_space_offset |= mask;
1da177e4
LT
870 dirty_space_ids++;
871
872 spin_unlock(&sid_lock);
873}
874
875
876#ifdef CONFIG_SMP
877static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
878{
879 int i;
880
881 /* NOTE: sid_lock must be held upon entry */
882
883 *ndirtyptr = dirty_space_ids;
884 if (dirty_space_ids != 0) {
885 for (i = 0; i < SID_ARRAY_SIZE; i++) {
886 dirty_array[i] = dirty_space_id[i];
887 dirty_space_id[i] = 0;
888 }
889 dirty_space_ids = 0;
890 }
891
892 return;
893}
894
895static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
896{
897 int i;
898
899 /* NOTE: sid_lock must be held upon entry */
900
901 if (ndirty != 0) {
902 for (i = 0; i < SID_ARRAY_SIZE; i++) {
903 space_id[i] ^= dirty_array[i];
904 }
905
906 free_space_ids += ndirty;
907 space_id_index = 0;
908 }
909}
910
911#else /* CONFIG_SMP */
912
913static void recycle_sids(void)
914{
915 int i;
916
917 /* NOTE: sid_lock must be held upon entry */
918
919 if (dirty_space_ids != 0) {
920 for (i = 0; i < SID_ARRAY_SIZE; i++) {
921 space_id[i] ^= dirty_space_id[i];
922 dirty_space_id[i] = 0;
923 }
924
925 free_space_ids += dirty_space_ids;
926 dirty_space_ids = 0;
927 space_id_index = 0;
928 }
929}
930#endif
931
932/*
933 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
934 * purged, we can safely reuse the space ids that were released but
935 * not flushed from the tlb.
936 */
937
938#ifdef CONFIG_SMP
939
940static unsigned long recycle_ndirty;
941static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
2fd83038 942static unsigned int recycle_inuse;
1da177e4
LT
943
944void flush_tlb_all(void)
945{
946 int do_recycle;
947
948 do_recycle = 0;
949 spin_lock(&sid_lock);
1030d681 950 __inc_irq_stat(irq_tlb_count);
1da177e4 951 if (dirty_space_ids > RECYCLE_THRESHOLD) {
2fd83038 952 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
1da177e4
LT
953 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
954 recycle_inuse++;
955 do_recycle++;
956 }
957 spin_unlock(&sid_lock);
15c8b6c1 958 on_each_cpu(flush_tlb_all_local, NULL, 1);
1da177e4
LT
959 if (do_recycle) {
960 spin_lock(&sid_lock);
961 recycle_sids(recycle_ndirty,recycle_dirty_array);
962 recycle_inuse = 0;
963 spin_unlock(&sid_lock);
964 }
965}
966#else
967void flush_tlb_all(void)
968{
969 spin_lock(&sid_lock);
1030d681 970 __inc_irq_stat(irq_tlb_count);
1b2425e3 971 flush_tlb_all_local(NULL);
1da177e4
LT
972 recycle_sids();
973 spin_unlock(&sid_lock);
974}
975#endif
252358f1
AK
976
977static const pgprot_t protection_map[16] = {
978 [VM_NONE] = PAGE_NONE,
979 [VM_READ] = PAGE_READONLY,
980 [VM_WRITE] = PAGE_NONE,
981 [VM_WRITE | VM_READ] = PAGE_READONLY,
982 [VM_EXEC] = PAGE_EXECREAD,
983 [VM_EXEC | VM_READ] = PAGE_EXECREAD,
984 [VM_EXEC | VM_WRITE] = PAGE_EXECREAD,
985 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_EXECREAD,
986 [VM_SHARED] = PAGE_NONE,
987 [VM_SHARED | VM_READ] = PAGE_READONLY,
988 [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
989 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
990 [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
991 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
992 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
993 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
994};
995DECLARE_VM_GET_PAGE_PROT
0cc2dc49
MRI
996
997#ifdef CONFIG_EXECMEM
998static struct execmem_info execmem_info __ro_after_init;
999
1000struct execmem_info __init *execmem_arch_setup(void)
1001{
1002 execmem_info = (struct execmem_info){
1003 .ranges = {
1004 [EXECMEM_DEFAULT] = {
1005 .start = VMALLOC_START,
1006 .end = VMALLOC_END,
1007 .pgprot = PAGE_KERNEL_RWX,
1008 .alignment = 1,
1009 },
1010 },
1011 };
1012
1013 return &execmem_info;
1014}
1015#endif /* CONFIG_EXECMEM */