powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / pgtable-radix.c
CommitLineData
2bfd65e4
AK
1/*
2 * Page table handling routines for radix page table.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
bd350f71
ME
11
12#define pr_fmt(fmt) "radix-mmu: " fmt
13
14#include <linux/kernel.h>
589ee628 15#include <linux/sched/mm.h>
2bfd65e4
AK
16#include <linux/memblock.h>
17#include <linux/of_fdt.h>
7614ff32 18#include <linux/mm.h>
6deb6b47 19#include <linux/string_helpers.h>
4dd5f8a9 20#include <linux/stop_machine.h>
2bfd65e4
AK
21
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
eeb715c3 24#include <asm/mmu_context.h>
2bfd65e4
AK
25#include <asm/dma.h>
26#include <asm/machdep.h>
27#include <asm/mmu.h>
28#include <asm/firmware.h>
1d0761d2 29#include <asm/powernv.h>
9abcc981 30#include <asm/sections.h>
0428491c 31#include <asm/trace.h>
890274c2 32#include <asm/uaccess.h>
2bfd65e4 33
bde3eb62
AK
34#include <trace/events/thp.h>
35
a25bd72b
BH
36unsigned int mmu_pid_bits;
37unsigned int mmu_base_pid;
38
83209bc8
AK
39static int native_register_process_table(unsigned long base, unsigned long pg_sz,
40 unsigned long table_size)
2bfd65e4 41{
7cd2a869
SJS
42 unsigned long patb0, patb1;
43
44 patb0 = be64_to_cpu(partition_tb[0].patb0);
45 patb1 = base | table_size | PATB_GR;
46
47 mmu_partition_table_set_entry(0, patb0, patb1);
83209bc8 48
2bfd65e4
AK
49 return 0;
50}
51
2ad452ff
NP
52static __ref void *early_alloc_pgtable(unsigned long size, int nid,
53 unsigned long region_start, unsigned long region_end)
2bfd65e4 54{
f806714f
MR
55 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
56 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
8a7f97b9 57 void *ptr;
2bfd65e4 58
f806714f
MR
59 if (region_start)
60 min_addr = region_start;
61 if (region_end)
62 max_addr = region_end;
2ad452ff 63
8a7f97b9
MR
64 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
65
66 if (!ptr)
67 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
68 __func__, size, size, nid, &min_addr, &max_addr);
69
70 return ptr;
2bfd65e4
AK
71}
72
0633dafc 73static int early_map_kernel_page(unsigned long ea, unsigned long pa,
2bfd65e4 74 pgprot_t flags,
2ad452ff
NP
75 unsigned int map_page_size,
76 int nid,
77 unsigned long region_start, unsigned long region_end)
2bfd65e4 78{
2ad452ff 79 unsigned long pfn = pa >> PAGE_SHIFT;
0633dafc
NP
80 pgd_t *pgdp;
81 pud_t *pudp;
82 pmd_t *pmdp;
83 pte_t *ptep;
84
85 pgdp = pgd_offset_k(ea);
86 if (pgd_none(*pgdp)) {
2ad452ff
NP
87 pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
88 region_start, region_end);
0633dafc
NP
89 pgd_populate(&init_mm, pgdp, pudp);
90 }
91 pudp = pud_offset(pgdp, ea);
92 if (map_page_size == PUD_SIZE) {
93 ptep = (pte_t *)pudp;
94 goto set_the_pte;
95 }
96 if (pud_none(*pudp)) {
2ad452ff
NP
97 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
98 region_start, region_end);
0633dafc
NP
99 pud_populate(&init_mm, pudp, pmdp);
100 }
101 pmdp = pmd_offset(pudp, ea);
102 if (map_page_size == PMD_SIZE) {
103 ptep = pmdp_ptep(pmdp);
104 goto set_the_pte;
105 }
106 if (!pmd_present(*pmdp)) {
2ad452ff
NP
107 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
108 region_start, region_end);
0633dafc
NP
109 pmd_populate_kernel(&init_mm, pmdp, ptep);
110 }
111 ptep = pte_offset_kernel(pmdp, ea);
112
113set_the_pte:
2ad452ff 114 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
0633dafc
NP
115 smp_wmb();
116 return 0;
117}
118
2ad452ff
NP
119/*
120 * nid, region_start, and region_end are hints to try to place the page
121 * table memory in the same node or region.
122 */
123static int __map_kernel_page(unsigned long ea, unsigned long pa,
2bfd65e4 124 pgprot_t flags,
2ad452ff
NP
125 unsigned int map_page_size,
126 int nid,
127 unsigned long region_start, unsigned long region_end)
2bfd65e4 128{
2ad452ff 129 unsigned long pfn = pa >> PAGE_SHIFT;
2bfd65e4
AK
130 pgd_t *pgdp;
131 pud_t *pudp;
132 pmd_t *pmdp;
133 pte_t *ptep;
134 /*
135 * Make sure task size is correct as per the max adddr
136 */
137 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
0633dafc 138
0034d395
AK
139#ifdef CONFIG_PPC_64K_PAGES
140 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
141#endif
142
2ad452ff
NP
143 if (unlikely(!slab_is_available()))
144 return early_map_kernel_page(ea, pa, flags, map_page_size,
145 nid, region_start, region_end);
0633dafc 146
2ad452ff
NP
147 /*
148 * Should make page table allocation functions be able to take a
149 * node, so we can place kernel page tables on the right nodes after
150 * boot.
151 */
0633dafc
NP
152 pgdp = pgd_offset_k(ea);
153 pudp = pud_alloc(&init_mm, pgdp, ea);
154 if (!pudp)
155 return -ENOMEM;
156 if (map_page_size == PUD_SIZE) {
157 ptep = (pte_t *)pudp;
158 goto set_the_pte;
2bfd65e4 159 }
0633dafc
NP
160 pmdp = pmd_alloc(&init_mm, pudp, ea);
161 if (!pmdp)
162 return -ENOMEM;
163 if (map_page_size == PMD_SIZE) {
164 ptep = pmdp_ptep(pmdp);
165 goto set_the_pte;
2bfd65e4 166 }
0633dafc
NP
167 ptep = pte_alloc_kernel(pmdp, ea);
168 if (!ptep)
169 return -ENOMEM;
2bfd65e4
AK
170
171set_the_pte:
2ad452ff 172 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
2bfd65e4
AK
173 smp_wmb();
174 return 0;
175}
176
2ad452ff
NP
177int radix__map_kernel_page(unsigned long ea, unsigned long pa,
178 pgprot_t flags,
179 unsigned int map_page_size)
180{
181 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
182}
183
7614ff32 184#ifdef CONFIG_STRICT_KERNEL_RWX
b134bd90
ME
185void radix__change_memory_range(unsigned long start, unsigned long end,
186 unsigned long clear)
7614ff32 187{
7614ff32
BS
188 unsigned long idx;
189 pgd_t *pgdp;
190 pud_t *pudp;
191 pmd_t *pmdp;
192 pte_t *ptep;
193
194 start = ALIGN_DOWN(start, PAGE_SIZE);
195 end = PAGE_ALIGN(end); // aligns up
196
b134bd90
ME
197 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
198 start, end, clear);
7614ff32
BS
199
200 for (idx = start; idx < end; idx += PAGE_SIZE) {
201 pgdp = pgd_offset_k(idx);
202 pudp = pud_alloc(&init_mm, pgdp, idx);
203 if (!pudp)
204 continue;
205 if (pud_huge(*pudp)) {
206 ptep = (pte_t *)pudp;
207 goto update_the_pte;
208 }
209 pmdp = pmd_alloc(&init_mm, pudp, idx);
210 if (!pmdp)
211 continue;
212 if (pmd_huge(*pmdp)) {
213 ptep = pmdp_ptep(pmdp);
214 goto update_the_pte;
215 }
216 ptep = pte_alloc_kernel(pmdp, idx);
217 if (!ptep)
218 continue;
219update_the_pte:
b134bd90 220 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
7614ff32
BS
221 }
222
223 radix__flush_tlb_kernel_range(start, end);
224}
b134bd90
ME
225
226void radix__mark_rodata_ro(void)
227{
228 unsigned long start, end;
229
230 start = (unsigned long)_stext;
231 end = (unsigned long)__init_begin;
232
233 radix__change_memory_range(start, end, _PAGE_WRITE);
234}
029d9252
ME
235
236void radix__mark_initmem_nx(void)
237{
238 unsigned long start = (unsigned long)__init_begin;
239 unsigned long end = (unsigned long)__init_end;
240
241 radix__change_memory_range(start, end, _PAGE_EXEC);
242}
7614ff32
BS
243#endif /* CONFIG_STRICT_KERNEL_RWX */
244
afb6d064
ME
245static inline void __meminit
246print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
b5200ec9 247{
6deb6b47
ME
248 char buf[10];
249
b5200ec9
RA
250 if (end <= start)
251 return;
252
6deb6b47
ME
253 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
254
afb6d064
ME
255 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
256 exec ? " (exec)" : "");
b5200ec9
RA
257}
258
232aa407
ME
259static unsigned long next_boundary(unsigned long addr, unsigned long end)
260{
261#ifdef CONFIG_STRICT_KERNEL_RWX
262 if (addr < __pa_symbol(__init_begin))
263 return __pa_symbol(__init_begin);
264#endif
265 return end;
266}
267
b5200ec9 268static int __meminit create_physical_mapping(unsigned long start,
2ad452ff
NP
269 unsigned long end,
270 int nid)
b5200ec9 271{
9abcc981 272 unsigned long vaddr, addr, mapping_size = 0;
afb6d064 273 bool prev_exec, exec = false;
9abcc981 274 pgprot_t prot;
a2dc009a 275 int psize;
b5200ec9
RA
276
277 start = _ALIGN_UP(start, PAGE_SIZE);
278 for (addr = start; addr < end; addr += mapping_size) {
279 unsigned long gap, previous_size;
280 int rc;
281
232aa407 282 gap = next_boundary(addr, end) - addr;
b5200ec9 283 previous_size = mapping_size;
afb6d064 284 prev_exec = exec;
b5200ec9
RA
285
286 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
57306c66 287 mmu_psize_defs[MMU_PAGE_1G].shift) {
b5200ec9 288 mapping_size = PUD_SIZE;
a2dc009a
AK
289 psize = MMU_PAGE_1G;
290 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
291 mmu_psize_defs[MMU_PAGE_2M].shift) {
b5200ec9 292 mapping_size = PMD_SIZE;
a2dc009a
AK
293 psize = MMU_PAGE_2M;
294 } else {
b5200ec9 295 mapping_size = PAGE_SIZE;
a2dc009a
AK
296 psize = mmu_virtual_psize;
297 }
7614ff32 298
9abcc981
ME
299 vaddr = (unsigned long)__va(addr);
300
7f6d498e 301 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
afb6d064 302 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
9abcc981 303 prot = PAGE_KERNEL_X;
afb6d064
ME
304 exec = true;
305 } else {
9abcc981 306 prot = PAGE_KERNEL;
afb6d064
ME
307 exec = false;
308 }
309
310 if (mapping_size != previous_size || exec != prev_exec) {
311 print_mapping(start, addr, previous_size, prev_exec);
312 start = addr;
313 }
9abcc981 314
2ad452ff 315 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
b5200ec9
RA
316 if (rc)
317 return rc;
a2dc009a
AK
318
319 update_page_count(psize, 1);
b5200ec9
RA
320 }
321
afb6d064 322 print_mapping(start, addr, mapping_size, exec);
b5200ec9
RA
323 return 0;
324}
325
2ad452ff 326void __init radix_init_pgtable(void)
2bfd65e4 327{
2bfd65e4
AK
328 unsigned long rts_field;
329 struct memblock_region *reg;
2bfd65e4
AK
330
331 /* We don't support slb for radix */
332 mmu_slb_size = 0;
333 /*
334 * Create the linear mapping, using standard page size for now
335 */
2ad452ff
NP
336 for_each_memblock(memory, reg) {
337 /*
338 * The memblock allocator is up at this point, so the
339 * page tables will be allocated within the range. No
340 * need or a node (which we don't have yet).
341 */
b5200ec9 342 WARN_ON(create_physical_mapping(reg->base,
2ad452ff
NP
343 reg->base + reg->size,
344 -1));
345 }
a25bd72b
BH
346
347 /* Find out how many PID bits are supported */
348 if (cpu_has_feature(CPU_FTR_HVMODE)) {
349 if (!mmu_pid_bits)
350 mmu_pid_bits = 20;
351#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
352 /*
353 * When KVM is possible, we only use the top half of the
354 * PID space to avoid collisions between host and guest PIDs
355 * which can cause problems due to prefetch when exiting the
356 * guest with AIL=3
357 */
358 mmu_base_pid = 1 << (mmu_pid_bits - 1);
359#else
360 mmu_base_pid = 1;
361#endif
362 } else {
363 /* The guest uses the bottom half of the PID space */
364 if (!mmu_pid_bits)
365 mmu_pid_bits = 19;
366 mmu_base_pid = 1;
367 }
368
2bfd65e4
AK
369 /*
370 * Allocate Partition table and process table for the
371 * host.
372 */
a25bd72b 373 BUG_ON(PRTB_SIZE_SHIFT > 36);
2ad452ff 374 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
2bfd65e4
AK
375 /*
376 * Fill in the process table.
2bfd65e4 377 */
b23d9c5b 378 rts_field = radix__get_tree_size();
2bfd65e4
AK
379 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
380 /*
381 * Fill in the partition table. We are suppose to use effective address
382 * of process table here. But our linear mapping also enable us to use
383 * physical address here.
384 */
eea8148c 385 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
2bfd65e4 386 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
7a70d728
PM
387 asm volatile("ptesync" : : : "memory");
388 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
389 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
390 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
0428491c 391 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
eeb715c3
NP
392
393 /*
394 * The init_mm context is given the first available (non-zero) PID,
395 * which is the "guard PID" and contains no page table. PIDR should
396 * never be set to zero because that duplicates the kernel address
397 * space at the 0x0... offset (quadrant 0)!
398 *
399 * An arbitrary PID that may later be allocated by the PID allocator
400 * for userspace processes must not be used either, because that
401 * would cause stale user mappings for that PID on CPUs outside of
402 * the TLB invalidation scheme (because it won't be in mm_cpumask).
403 *
404 * So permanently carve out one PID for the purpose of a guard PID.
405 */
406 init_mm.context.id = mmu_base_pid;
407 mmu_base_pid++;
2bfd65e4
AK
408}
409
410static void __init radix_init_partition_table(void)
411{
9d661958 412 unsigned long rts_field, dw0;
b23d9c5b 413
9d661958 414 mmu_partition_table_init();
b23d9c5b 415 rts_field = radix__get_tree_size();
9d661958
PM
416 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
417 mmu_partition_table_set_entry(0, dw0, 0);
2bfd65e4 418
56547411
AK
419 pr_info("Initializing Radix MMU\n");
420 pr_info("Partition table %p\n", partition_tb);
2bfd65e4
AK
421}
422
423void __init radix_init_native(void)
424{
eea8148c 425 register_process_table = native_register_process_table;
2bfd65e4
AK
426}
427
428static int __init get_idx_from_shift(unsigned int shift)
429{
430 int idx = -1;
431
432 switch (shift) {
433 case 0xc:
434 idx = MMU_PAGE_4K;
435 break;
436 case 0x10:
437 idx = MMU_PAGE_64K;
438 break;
439 case 0x15:
440 idx = MMU_PAGE_2M;
441 break;
442 case 0x1e:
443 idx = MMU_PAGE_1G;
444 break;
445 }
446 return idx;
447}
448
449static int __init radix_dt_scan_page_sizes(unsigned long node,
450 const char *uname, int depth,
451 void *data)
452{
453 int size = 0;
454 int shift, idx;
455 unsigned int ap;
456 const __be32 *prop;
457 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
458
459 /* We are scanning "cpu" nodes only */
460 if (type == NULL || strcmp(type, "cpu") != 0)
461 return 0;
462
a25bd72b
BH
463 /* Find MMU PID size */
464 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
465 if (prop && size == 4)
466 mmu_pid_bits = be32_to_cpup(prop);
467
468 /* Grab page size encodings */
2bfd65e4
AK
469 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
470 if (!prop)
471 return 0;
472
473 pr_info("Page sizes from device-tree:\n");
474 for (; size >= 4; size -= 4, ++prop) {
475
476 struct mmu_psize_def *def;
477
478 /* top 3 bit is AP encoding */
479 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
480 ap = be32_to_cpu(prop[0]) >> 29;
ac8d3818 481 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
2bfd65e4
AK
482
483 idx = get_idx_from_shift(shift);
484 if (idx < 0)
485 continue;
486
487 def = &mmu_psize_defs[idx];
488 def->shift = shift;
489 def->ap = ap;
490 }
491
492 /* needed ? */
493 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
494 return 1;
495}
496
2537b09c 497void __init radix__early_init_devtree(void)
2bfd65e4
AK
498{
499 int rc;
500
501 /*
502 * Try to find the available page sizes in the device-tree
503 */
504 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
505 if (rc != 0) /* Found */
506 goto found;
507 /*
508 * let's assume we have page 4k and 64k support
509 */
510 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
511 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
512
513 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
514 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
515found:
516#ifdef CONFIG_SPARSEMEM_VMEMMAP
517 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
518 /*
519 * map vmemmap using 2M if available
520 */
521 mmu_vmemmap_psize = MMU_PAGE_2M;
522 }
523#endif /* CONFIG_SPARSEMEM_VMEMMAP */
524 return;
525}
526
ee97b6b9
BS
527static void radix_init_amor(void)
528{
529 /*
530 * In HV mode, we init AMOR (Authority Mask Override Register) so that
531 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
532 * Register), enable key 0 and set it to 1.
533 *
534 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
535 */
536 mtspr(SPRN_AMOR, (3ul << 62));
537}
538
1bb2bae2
RC
539#ifdef CONFIG_PPC_KUEP
540void setup_kuep(bool disabled)
3b10d009 541{
1bb2bae2
RC
542 if (disabled || !early_radix_enabled())
543 return;
544
545 if (smp_processor_id() == boot_cpuid)
546 pr_info("Activating Kernel Userspace Execution Prevention\n");
547
3b10d009
BS
548 /*
549 * Radix always uses key0 of the IAMR to determine if an access is
550 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
551 * fetch.
552 */
2bf1071a 553 mtspr(SPRN_IAMR, (1ul << 62));
3b10d009 554}
1bb2bae2 555#endif
3b10d009 556
890274c2
ME
557#ifdef CONFIG_PPC_KUAP
558void setup_kuap(bool disabled)
559{
560 if (disabled || !early_radix_enabled())
561 return;
562
563 if (smp_processor_id() == boot_cpuid) {
564 pr_info("Activating Kernel Userspace Access Prevention\n");
565 cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
566 }
567
568 /* Make sure userspace can't change the AMR */
569 mtspr(SPRN_UAMOR, 0);
570 mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
571 isync();
572}
573#endif
574
2bfd65e4
AK
575void __init radix__early_init_mmu(void)
576{
577 unsigned long lpcr;
2bfd65e4
AK
578
579#ifdef CONFIG_PPC_64K_PAGES
580 /* PAGE_SIZE mappings */
581 mmu_virtual_psize = MMU_PAGE_64K;
582#else
583 mmu_virtual_psize = MMU_PAGE_4K;
584#endif
585
586#ifdef CONFIG_SPARSEMEM_VMEMMAP
587 /* vmemmap mapping */
588 mmu_vmemmap_psize = mmu_virtual_psize;
589#endif
590 /*
591 * initialize page table size
592 */
593 __pte_index_size = RADIX_PTE_INDEX_SIZE;
594 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
595 __pud_index_size = RADIX_PUD_INDEX_SIZE;
596 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
fae22116 597 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
2bfd65e4
AK
598 __pte_table_size = RADIX_PTE_TABLE_SIZE;
599 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
600 __pud_table_size = RADIX_PUD_TABLE_SIZE;
601 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
602
a2f41eb9
AK
603 __pmd_val_bits = RADIX_PMD_VAL_BITS;
604 __pud_val_bits = RADIX_PUD_VAL_BITS;
605 __pgd_val_bits = RADIX_PGD_VAL_BITS;
2bfd65e4 606
d6a9996e 607 __kernel_virt_start = RADIX_KERN_VIRT_START;
d6a9996e
AK
608 __vmalloc_start = RADIX_VMALLOC_START;
609 __vmalloc_end = RADIX_VMALLOC_END;
63ee9b2f 610 __kernel_io_start = RADIX_KERN_IO_START;
a35a3c6f 611 __kernel_io_end = RADIX_KERN_IO_END;
0034d395 612 vmemmap = (struct page *)RADIX_VMEMMAP_START;
d6a9996e 613 ioremap_bot = IOREMAP_BASE;
bfa37087
DS
614
615#ifdef CONFIG_PCI
616 pci_io_base = ISA_IO_BASE;
617#endif
fb4e5dbd
AK
618 __pte_frag_nr = RADIX_PTE_FRAG_NR;
619 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
8a6c697b
AK
620 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
621 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
d6a9996e 622
d6c88600 623 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
166dd7d3 624 radix_init_native();
d6c88600 625 lpcr = mfspr(SPRN_LPCR);
bf16cdf4 626 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
2bfd65e4 627 radix_init_partition_table();
ee97b6b9 628 radix_init_amor();
cc3d2940
PM
629 } else {
630 radix_init_pseries();
d6c88600 631 }
2bfd65e4 632
9d661958
PM
633 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
634
2bfd65e4 635 radix_init_pgtable();
eeb715c3
NP
636 /* Switch to the guard PID before turning on MMU */
637 radix__switch_mmu_context(NULL, &init_mm);
d4748276
NP
638 if (cpu_has_feature(CPU_FTR_HVMODE))
639 tlbiel_all();
2bfd65e4
AK
640}
641
642void radix__early_init_mmu_secondary(void)
643{
644 unsigned long lpcr;
645 /*
d6c88600 646 * update partition table control register and UPRT
2bfd65e4 647 */
d6c88600
AK
648 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
649 lpcr = mfspr(SPRN_LPCR);
bf16cdf4 650 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
d6c88600 651
2bfd65e4
AK
652 mtspr(SPRN_PTCR,
653 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
ee97b6b9 654 radix_init_amor();
d6c88600 655 }
d4748276 656
eeb715c3 657 radix__switch_mmu_context(NULL, &init_mm);
d4748276
NP
658 if (cpu_has_feature(CPU_FTR_HVMODE))
659 tlbiel_all();
2bfd65e4
AK
660}
661
fe036a06
BH
662void radix__mmu_cleanup_all(void)
663{
664 unsigned long lpcr;
665
666 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
667 lpcr = mfspr(SPRN_LPCR);
668 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
669 mtspr(SPRN_PTCR, 0);
1d0761d2 670 powernv_set_nmmu_ptcr(0);
fe036a06
BH
671 radix__flush_tlb_all();
672 }
673}
674
2bfd65e4
AK
675void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
676 phys_addr_t first_memblock_size)
677{
177ba7c6
AK
678 /* We don't currently support the first MEMBLOCK not mapping 0
679 * physical on those processors
680 */
681 BUG_ON(first_memblock_base != 0);
1513c33d 682
5eae82ca
NP
683 /*
684 * Radix mode is not limited by RMA / VRMA addressing.
685 */
686 ppc64_rma_size = ULONG_MAX;
2bfd65e4 687}
d9225ad9 688
6cc27341 689#ifdef CONFIG_MEMORY_HOTPLUG
4b5d62ca
RA
690static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
691{
692 pte_t *pte;
693 int i;
694
695 for (i = 0; i < PTRS_PER_PTE; i++) {
696 pte = pte_start + i;
697 if (!pte_none(*pte))
698 return;
699 }
700
701 pte_free_kernel(&init_mm, pte_start);
702 pmd_clear(pmd);
703}
704
705static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
706{
707 pmd_t *pmd;
708 int i;
709
710 for (i = 0; i < PTRS_PER_PMD; i++) {
711 pmd = pmd_start + i;
712 if (!pmd_none(*pmd))
713 return;
714 }
715
716 pmd_free(&init_mm, pmd_start);
717 pud_clear(pud);
718}
719
4dd5f8a9
BS
720struct change_mapping_params {
721 pte_t *pte;
722 unsigned long start;
723 unsigned long end;
724 unsigned long aligned_start;
725 unsigned long aligned_end;
726};
727
bde709a7 728static int __meminit stop_machine_change_mapping(void *data)
4dd5f8a9
BS
729{
730 struct change_mapping_params *params =
731 (struct change_mapping_params *)data;
732
733 if (!data)
734 return -1;
735
736 spin_unlock(&init_mm.page_table_lock);
737 pte_clear(&init_mm, params->aligned_start, params->pte);
f437c517
ME
738 create_physical_mapping(params->aligned_start, params->start, -1);
739 create_physical_mapping(params->end, params->aligned_end, -1);
4dd5f8a9
BS
740 spin_lock(&init_mm.page_table_lock);
741 return 0;
742}
743
4b5d62ca
RA
744static void remove_pte_table(pte_t *pte_start, unsigned long addr,
745 unsigned long end)
746{
747 unsigned long next;
748 pte_t *pte;
749
750 pte = pte_start + pte_index(addr);
751 for (; addr < end; addr = next, pte++) {
752 next = (addr + PAGE_SIZE) & PAGE_MASK;
753 if (next > end)
754 next = end;
755
756 if (!pte_present(*pte))
757 continue;
758
0d0a4bc2
RA
759 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
760 /*
761 * The vmemmap_free() and remove_section_mapping()
762 * codepaths call us with aligned addresses.
763 */
764 WARN_ONCE(1, "%s: unaligned range\n", __func__);
765 continue;
766 }
767
4b5d62ca
RA
768 pte_clear(&init_mm, addr, pte);
769 }
770}
771
4dd5f8a9
BS
772/*
773 * clear the pte and potentially split the mapping helper
774 */
bde709a7 775static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
4dd5f8a9
BS
776 unsigned long size, pte_t *pte)
777{
778 unsigned long mask = ~(size - 1);
779 unsigned long aligned_start = addr & mask;
780 unsigned long aligned_end = addr + size;
781 struct change_mapping_params params;
782 bool split_region = false;
783
784 if ((end - addr) < size) {
785 /*
786 * We're going to clear the PTE, but not flushed
787 * the mapping, time to remap and flush. The
788 * effects if visible outside the processor or
789 * if we are running in code close to the
790 * mapping we cleared, we are in trouble.
791 */
792 if (overlaps_kernel_text(aligned_start, addr) ||
793 overlaps_kernel_text(end, aligned_end)) {
794 /*
795 * Hack, just return, don't pte_clear
796 */
797 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
798 "text, not splitting\n", addr, end);
799 return;
800 }
801 split_region = true;
802 }
803
804 if (split_region) {
805 params.pte = pte;
806 params.start = addr;
807 params.end = end;
808 params.aligned_start = addr & ~(size - 1);
809 params.aligned_end = min_t(unsigned long, aligned_end,
810 (unsigned long)__va(memblock_end_of_DRAM()));
811 stop_machine(stop_machine_change_mapping, &params, NULL);
812 return;
813 }
814
815 pte_clear(&init_mm, addr, pte);
816}
817
4b5d62ca
RA
818static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
819 unsigned long end)
820{
821 unsigned long next;
822 pte_t *pte_base;
823 pmd_t *pmd;
824
825 pmd = pmd_start + pmd_index(addr);
826 for (; addr < end; addr = next, pmd++) {
827 next = pmd_addr_end(addr, end);
828
829 if (!pmd_present(*pmd))
830 continue;
831
832 if (pmd_huge(*pmd)) {
4dd5f8a9 833 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
4b5d62ca
RA
834 continue;
835 }
836
837 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
838 remove_pte_table(pte_base, addr, next);
839 free_pte_table(pte_base, pmd);
840 }
841}
842
843static void remove_pud_table(pud_t *pud_start, unsigned long addr,
844 unsigned long end)
845{
846 unsigned long next;
847 pmd_t *pmd_base;
848 pud_t *pud;
849
850 pud = pud_start + pud_index(addr);
851 for (; addr < end; addr = next, pud++) {
852 next = pud_addr_end(addr, end);
853
854 if (!pud_present(*pud))
855 continue;
856
857 if (pud_huge(*pud)) {
4dd5f8a9 858 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
4b5d62ca
RA
859 continue;
860 }
861
862 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
863 remove_pmd_table(pmd_base, addr, next);
864 free_pmd_table(pmd_base, pud);
865 }
866}
867
bde709a7 868static void __meminit remove_pagetable(unsigned long start, unsigned long end)
4b5d62ca
RA
869{
870 unsigned long addr, next;
871 pud_t *pud_base;
872 pgd_t *pgd;
873
874 spin_lock(&init_mm.page_table_lock);
875
876 for (addr = start; addr < end; addr = next) {
877 next = pgd_addr_end(addr, end);
878
879 pgd = pgd_offset_k(addr);
880 if (!pgd_present(*pgd))
881 continue;
882
883 if (pgd_huge(*pgd)) {
4dd5f8a9 884 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
4b5d62ca
RA
885 continue;
886 }
887
888 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
889 remove_pud_table(pud_base, addr, next);
890 }
891
892 spin_unlock(&init_mm.page_table_lock);
893 radix__flush_tlb_kernel_range(start, end);
894}
895
f437c517 896int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
6cc27341 897{
29ab6c47 898 return create_physical_mapping(start, end, nid);
6cc27341 899}
4b5d62ca 900
bde709a7 901int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
4b5d62ca
RA
902{
903 remove_pagetable(start, end);
904 return 0;
905}
6cc27341
RA
906#endif /* CONFIG_MEMORY_HOTPLUG */
907
d9225ad9 908#ifdef CONFIG_SPARSEMEM_VMEMMAP
29ab6c47
NP
909static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
910 pgprot_t flags, unsigned int map_page_size,
911 int nid)
912{
913 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
914}
915
d9225ad9
AK
916int __meminit radix__vmemmap_create_mapping(unsigned long start,
917 unsigned long page_size,
918 unsigned long phys)
919{
920 /* Create a PTE encoding */
921 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
2ad452ff
NP
922 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
923 int ret;
924
925 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
926 BUG_ON(ret);
d9225ad9 927
d9225ad9
AK
928 return 0;
929}
930
931#ifdef CONFIG_MEMORY_HOTPLUG
bde709a7 932void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
d9225ad9 933{
0d0a4bc2 934 remove_pagetable(start, start + page_size);
d9225ad9
AK
935}
936#endif
937#endif
bde3eb62
AK
938
939#ifdef CONFIG_TRANSPARENT_HUGEPAGE
940
941unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
942 pmd_t *pmdp, unsigned long clr,
943 unsigned long set)
944{
945 unsigned long old;
946
947#ifdef CONFIG_DEBUG_VM
ebd31197 948 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
af60a4cf 949 assert_spin_locked(pmd_lockptr(mm, pmdp));
bde3eb62
AK
950#endif
951
952 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
953 trace_hugepage_update(addr, old, clr, set);
954
955 return old;
956}
957
958pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
959 pmd_t *pmdp)
960
961{
962 pmd_t pmd;
963
964 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
965 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
ebd31197 966 VM_BUG_ON(pmd_devmap(*pmdp));
bde3eb62
AK
967 /*
968 * khugepaged calls this for normal pmd
969 */
970 pmd = *pmdp;
971 pmd_clear(pmdp);
424de9c6 972
bde3eb62 973 /*FIXME!! Verify whether we need this kick below */
fa4531f7 974 serialize_against_pte_lookup(vma->vm_mm);
424de9c6
BH
975
976 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
977
bde3eb62
AK
978 return pmd;
979}
980
981/*
982 * For us pgtable_t is pte_t *. Inorder to save the deposisted
983 * page table, we consider the allocated page table as a list
984 * head. On withdraw we need to make sure we zero out the used
985 * list_head memory area.
986 */
987void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
988 pgtable_t pgtable)
989{
990 struct list_head *lh = (struct list_head *) pgtable;
991
992 assert_spin_locked(pmd_lockptr(mm, pmdp));
993
994 /* FIFO */
995 if (!pmd_huge_pte(mm, pmdp))
996 INIT_LIST_HEAD(lh);
997 else
998 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
999 pmd_huge_pte(mm, pmdp) = pgtable;
1000}
1001
1002pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1003{
1004 pte_t *ptep;
1005 pgtable_t pgtable;
1006 struct list_head *lh;
1007
1008 assert_spin_locked(pmd_lockptr(mm, pmdp));
1009
1010 /* FIFO */
1011 pgtable = pmd_huge_pte(mm, pmdp);
1012 lh = (struct list_head *) pgtable;
1013 if (list_empty(lh))
1014 pmd_huge_pte(mm, pmdp) = NULL;
1015 else {
1016 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1017 list_del(lh);
1018 }
1019 ptep = (pte_t *) pgtable;
1020 *ptep = __pte(0);
1021 ptep++;
1022 *ptep = __pte(0);
1023 return pgtable;
1024}
1025
1026
1027pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1028 unsigned long addr, pmd_t *pmdp)
1029{
1030 pmd_t old_pmd;
1031 unsigned long old;
1032
1033 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1034 old_pmd = __pmd(old);
1035 /*
fa4531f7 1036 * Serialize against find_current_mm_pte which does lock-less
bde3eb62
AK
1037 * lookup in page tables with local interrupts disabled. For huge pages
1038 * it casts pmd_t to pte_t. Since format of pte_t is different from
1039 * pmd_t we want to prevent transit from pmd pointing to page table
1040 * to pmd pointing to huge page (and back) while interrupts are disabled.
1041 * We clear pmd to possibly replace it with page table pointer in
1042 * different code paths. So make sure we wait for the parallel
fa4531f7 1043 * find_current_mm_pte to finish.
bde3eb62 1044 */
fa4531f7 1045 serialize_against_pte_lookup(mm);
bde3eb62
AK
1046 return old_pmd;
1047}
1048
1049int radix__has_transparent_hugepage(void)
1050{
1051 /* For radix 2M at PMD level means thp */
1052 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1053 return 1;
1054 return 0;
1055}
1056#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
044003b5 1057
e4c1112c
AK
1058void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1059 pte_t entry, unsigned long address, int psize)
044003b5 1060{
e4c1112c 1061 struct mm_struct *mm = vma->vm_mm;
044003b5
AK
1062 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1063 _PAGE_RW | _PAGE_EXEC);
f08d08f3
AK
1064
1065 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
bd5050e3
AK
1066 /*
1067 * To avoid NMMU hang while relaxing access, we need mark
1068 * the pte invalid in between.
1069 */
f08d08f3 1070 if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
044003b5
AK
1071 unsigned long old_pte, new_pte;
1072
f08d08f3 1073 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
044003b5
AK
1074 /*
1075 * new value of pte
1076 */
1077 new_pte = old_pte | set;
bd5050e3 1078 radix__flush_tlb_page_psize(mm, address, psize);
f08d08f3 1079 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
bd5050e3 1080 } else {
044003b5 1081 __radix_pte_update(ptep, 0, set);
e5f7cb58
NP
1082 /*
1083 * Book3S does not require a TLB flush when relaxing access
1084 * restrictions when the address space is not attached to a
1085 * NMMU, because the core MMU will reload the pte after taking
1086 * an access fault, which is defined by the architectue.
1087 */
bd5050e3 1088 }
f1cb8f9b 1089 /* See ptesync comment in radix__set_pte_at */
044003b5 1090}
5b323367
AK
1091
1092void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1093 unsigned long addr, pte_t *ptep,
1094 pte_t old_pte, pte_t pte)
1095{
1096 struct mm_struct *mm = vma->vm_mm;
1097
1098 /*
1099 * To avoid NMMU hang while relaxing access we need to flush the tlb before
1100 * we set the new value. We need to do this only for radix, because hash
1101 * translation does flush when updating the linux pte.
1102 */
1103 if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1104 (atomic_read(&mm->context.copros) > 0))
1105 radix__flush_tlb_page(vma, addr);
1106
1107 set_pte_at(mm, addr, ptep, pte);
1108}