2 * Page table handling routines for radix page table.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) "radix-mmu: " fmt
14 #include <linux/kernel.h>
15 #include <linux/sched/mm.h>
16 #include <linux/memblock.h>
17 #include <linux/of_fdt.h>
19 #include <linux/string_helpers.h>
20 #include <linux/stop_machine.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/mmu_context.h>
26 #include <asm/machdep.h>
28 #include <asm/firmware.h>
29 #include <asm/powernv.h>
30 #include <asm/sections.h>
31 #include <asm/trace.h>
33 #include <trace/events/thp.h>
35 unsigned int mmu_pid_bits;
36 unsigned int mmu_base_pid;
38 static int native_register_process_table(unsigned long base, unsigned long pg_sz,
39 unsigned long table_size)
41 unsigned long patb0, patb1;
43 patb0 = be64_to_cpu(partition_tb[0].patb0);
44 patb1 = base | table_size | PATB_GR;
46 mmu_partition_table_set_entry(0, patb0, patb1);
51 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
52 unsigned long region_start, unsigned long region_end)
57 if (region_start || region_end) /* has region hint */
58 pa = memblock_alloc_range(size, size, region_start, region_end,
60 else if (nid != -1) /* has node hint */
61 pa = memblock_alloc_base_nid(size, size,
62 MEMBLOCK_ALLOC_ANYWHERE,
66 pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE);
76 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
78 unsigned int map_page_size,
80 unsigned long region_start, unsigned long region_end)
82 unsigned long pfn = pa >> PAGE_SHIFT;
88 pgdp = pgd_offset_k(ea);
89 if (pgd_none(*pgdp)) {
90 pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
91 region_start, region_end);
92 pgd_populate(&init_mm, pgdp, pudp);
94 pudp = pud_offset(pgdp, ea);
95 if (map_page_size == PUD_SIZE) {
99 if (pud_none(*pudp)) {
100 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
101 region_start, region_end);
102 pud_populate(&init_mm, pudp, pmdp);
104 pmdp = pmd_offset(pudp, ea);
105 if (map_page_size == PMD_SIZE) {
106 ptep = pmdp_ptep(pmdp);
109 if (!pmd_present(*pmdp)) {
110 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
111 region_start, region_end);
112 pmd_populate_kernel(&init_mm, pmdp, ptep);
114 ptep = pte_offset_kernel(pmdp, ea);
117 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
123 * nid, region_start, and region_end are hints to try to place the page
124 * table memory in the same node or region.
126 static int __map_kernel_page(unsigned long ea, unsigned long pa,
128 unsigned int map_page_size,
130 unsigned long region_start, unsigned long region_end)
132 unsigned long pfn = pa >> PAGE_SHIFT;
138 * Make sure task size is correct as per the max adddr
140 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
142 if (unlikely(!slab_is_available()))
143 return early_map_kernel_page(ea, pa, flags, map_page_size,
144 nid, region_start, region_end);
147 * Should make page table allocation functions be able to take a
148 * node, so we can place kernel page tables on the right nodes after
151 pgdp = pgd_offset_k(ea);
152 pudp = pud_alloc(&init_mm, pgdp, ea);
155 if (map_page_size == PUD_SIZE) {
156 ptep = (pte_t *)pudp;
159 pmdp = pmd_alloc(&init_mm, pudp, ea);
162 if (map_page_size == PMD_SIZE) {
163 ptep = pmdp_ptep(pmdp);
166 ptep = pte_alloc_kernel(pmdp, ea);
171 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
176 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
178 unsigned int map_page_size)
180 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
183 #ifdef CONFIG_STRICT_KERNEL_RWX
184 void radix__change_memory_range(unsigned long start, unsigned long end,
193 start = ALIGN_DOWN(start, PAGE_SIZE);
194 end = PAGE_ALIGN(end); // aligns up
196 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
199 for (idx = start; idx < end; idx += PAGE_SIZE) {
200 pgdp = pgd_offset_k(idx);
201 pudp = pud_alloc(&init_mm, pgdp, idx);
204 if (pud_huge(*pudp)) {
205 ptep = (pte_t *)pudp;
208 pmdp = pmd_alloc(&init_mm, pudp, idx);
211 if (pmd_huge(*pmdp)) {
212 ptep = pmdp_ptep(pmdp);
215 ptep = pte_alloc_kernel(pmdp, idx);
219 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
222 radix__flush_tlb_kernel_range(start, end);
225 void radix__mark_rodata_ro(void)
227 unsigned long start, end;
230 * mark_rodata_ro() will mark itself as !writable at some point.
231 * Due to DD1 workaround in radix__pte_update(), we'll end up with
232 * an invalid pte and the system will crash quite severly.
234 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
235 pr_warn("Warning: Unable to mark rodata read only on P9 DD1\n");
239 start = (unsigned long)_stext;
240 end = (unsigned long)__init_begin;
242 radix__change_memory_range(start, end, _PAGE_WRITE);
245 void radix__mark_initmem_nx(void)
247 unsigned long start = (unsigned long)__init_begin;
248 unsigned long end = (unsigned long)__init_end;
250 radix__change_memory_range(start, end, _PAGE_EXEC);
252 #endif /* CONFIG_STRICT_KERNEL_RWX */
254 static inline void __meminit print_mapping(unsigned long start,
263 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
265 pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
268 static int __meminit create_physical_mapping(unsigned long start,
272 unsigned long vaddr, addr, mapping_size = 0;
274 unsigned long max_mapping_size;
275 #ifdef CONFIG_STRICT_KERNEL_RWX
276 int split_text_mapping = 1;
278 int split_text_mapping = 0;
281 start = _ALIGN_UP(start, PAGE_SIZE);
282 for (addr = start; addr < end; addr += mapping_size) {
283 unsigned long gap, previous_size;
287 previous_size = mapping_size;
288 max_mapping_size = PUD_SIZE;
291 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
292 mmu_psize_defs[MMU_PAGE_1G].shift &&
293 PUD_SIZE <= max_mapping_size)
294 mapping_size = PUD_SIZE;
295 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
296 mmu_psize_defs[MMU_PAGE_2M].shift)
297 mapping_size = PMD_SIZE;
299 mapping_size = PAGE_SIZE;
301 if (split_text_mapping && (mapping_size == PUD_SIZE) &&
302 (addr <= __pa_symbol(__init_begin)) &&
303 (addr + mapping_size) >= __pa_symbol(_stext)) {
304 max_mapping_size = PMD_SIZE;
308 if (split_text_mapping && (mapping_size == PMD_SIZE) &&
309 (addr <= __pa_symbol(__init_begin)) &&
310 (addr + mapping_size) >= __pa_symbol(_stext))
311 mapping_size = PAGE_SIZE;
313 if (mapping_size != previous_size) {
314 print_mapping(start, addr, previous_size);
318 vaddr = (unsigned long)__va(addr);
320 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
321 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
322 prot = PAGE_KERNEL_X;
326 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
331 print_mapping(start, addr, mapping_size);
335 void __init radix_init_pgtable(void)
337 unsigned long rts_field;
338 struct memblock_region *reg;
340 /* We don't support slb for radix */
343 * Create the linear mapping, using standard page size for now
345 for_each_memblock(memory, reg) {
347 * The memblock allocator is up at this point, so the
348 * page tables will be allocated within the range. No
349 * need or a node (which we don't have yet).
351 WARN_ON(create_physical_mapping(reg->base,
352 reg->base + reg->size,
356 /* Find out how many PID bits are supported */
357 if (cpu_has_feature(CPU_FTR_HVMODE)) {
360 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
362 * When KVM is possible, we only use the top half of the
363 * PID space to avoid collisions between host and guest PIDs
364 * which can cause problems due to prefetch when exiting the
367 mmu_base_pid = 1 << (mmu_pid_bits - 1);
372 /* The guest uses the bottom half of the PID space */
379 * Allocate Partition table and process table for the
382 BUG_ON(PRTB_SIZE_SHIFT > 36);
383 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
385 * Fill in the process table.
387 rts_field = radix__get_tree_size();
388 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
390 * Fill in the partition table. We are suppose to use effective address
391 * of process table here. But our linear mapping also enable us to use
392 * physical address here.
394 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
395 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
396 asm volatile("ptesync" : : : "memory");
397 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
398 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
399 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
400 trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
403 * The init_mm context is given the first available (non-zero) PID,
404 * which is the "guard PID" and contains no page table. PIDR should
405 * never be set to zero because that duplicates the kernel address
406 * space at the 0x0... offset (quadrant 0)!
408 * An arbitrary PID that may later be allocated by the PID allocator
409 * for userspace processes must not be used either, because that
410 * would cause stale user mappings for that PID on CPUs outside of
411 * the TLB invalidation scheme (because it won't be in mm_cpumask).
413 * So permanently carve out one PID for the purpose of a guard PID.
415 init_mm.context.id = mmu_base_pid;
419 static void __init radix_init_partition_table(void)
421 unsigned long rts_field, dw0;
423 mmu_partition_table_init();
424 rts_field = radix__get_tree_size();
425 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
426 mmu_partition_table_set_entry(0, dw0, 0);
428 pr_info("Initializing Radix MMU\n");
429 pr_info("Partition table %p\n", partition_tb);
432 void __init radix_init_native(void)
434 register_process_table = native_register_process_table;
437 static int __init get_idx_from_shift(unsigned int shift)
458 static int __init radix_dt_scan_page_sizes(unsigned long node,
459 const char *uname, int depth,
466 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
468 /* We are scanning "cpu" nodes only */
469 if (type == NULL || strcmp(type, "cpu") != 0)
472 /* Find MMU PID size */
473 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
474 if (prop && size == 4)
475 mmu_pid_bits = be32_to_cpup(prop);
477 /* Grab page size encodings */
478 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
482 pr_info("Page sizes from device-tree:\n");
483 for (; size >= 4; size -= 4, ++prop) {
485 struct mmu_psize_def *def;
487 /* top 3 bit is AP encoding */
488 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
489 ap = be32_to_cpu(prop[0]) >> 29;
490 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
492 idx = get_idx_from_shift(shift);
496 def = &mmu_psize_defs[idx];
502 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
506 void __init radix__early_init_devtree(void)
511 * Try to find the available page sizes in the device-tree
513 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
514 if (rc != 0) /* Found */
517 * let's assume we have page 4k and 64k support
519 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
520 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
522 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
523 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
525 #ifdef CONFIG_SPARSEMEM_VMEMMAP
526 if (mmu_psize_defs[MMU_PAGE_2M].shift) {
528 * map vmemmap using 2M if available
530 mmu_vmemmap_psize = MMU_PAGE_2M;
532 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
536 static void update_hid_for_radix(void)
539 unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */
541 asm volatile("ptesync": : :"memory");
542 /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */
543 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
544 : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory");
545 /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */
546 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
547 : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory");
548 asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory");
549 trace_tlbie(0, 0, rb, 0, 2, 0, 1);
550 trace_tlbie(0, 0, rb, 0, 2, 1, 1);
555 hid0 = mfspr(SPRN_HID0);
556 hid0 |= HID0_POWER9_RADIX;
557 mtspr(SPRN_HID0, hid0);
558 asm volatile("isync": : :"memory");
560 /* Wait for it to happen */
561 while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX))
565 static void radix_init_amor(void)
568 * In HV mode, we init AMOR (Authority Mask Override Register) so that
569 * the hypervisor and guest can setup IAMR (Instruction Authority Mask
570 * Register), enable key 0 and set it to 1.
572 * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
574 mtspr(SPRN_AMOR, (3ul << 62));
577 static void radix_init_iamr(void)
582 * The IAMR should set to 0 on DD1.
584 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
590 * Radix always uses key0 of the IAMR to determine if an access is
591 * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
594 mtspr(SPRN_IAMR, iamr);
597 void __init radix__early_init_mmu(void)
601 #ifdef CONFIG_PPC_64K_PAGES
602 /* PAGE_SIZE mappings */
603 mmu_virtual_psize = MMU_PAGE_64K;
605 mmu_virtual_psize = MMU_PAGE_4K;
608 #ifdef CONFIG_SPARSEMEM_VMEMMAP
609 /* vmemmap mapping */
610 mmu_vmemmap_psize = mmu_virtual_psize;
613 * initialize page table size
615 __pte_index_size = RADIX_PTE_INDEX_SIZE;
616 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
617 __pud_index_size = RADIX_PUD_INDEX_SIZE;
618 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
619 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
620 __pmd_cache_index = RADIX_PMD_INDEX_SIZE;
621 __pte_table_size = RADIX_PTE_TABLE_SIZE;
622 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
623 __pud_table_size = RADIX_PUD_TABLE_SIZE;
624 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
626 __pmd_val_bits = RADIX_PMD_VAL_BITS;
627 __pud_val_bits = RADIX_PUD_VAL_BITS;
628 __pgd_val_bits = RADIX_PGD_VAL_BITS;
630 __kernel_virt_start = RADIX_KERN_VIRT_START;
631 __kernel_virt_size = RADIX_KERN_VIRT_SIZE;
632 __vmalloc_start = RADIX_VMALLOC_START;
633 __vmalloc_end = RADIX_VMALLOC_END;
634 __kernel_io_start = RADIX_KERN_IO_START;
635 vmemmap = (struct page *)RADIX_VMEMMAP_BASE;
636 ioremap_bot = IOREMAP_BASE;
639 pci_io_base = ISA_IO_BASE;
643 * For now radix also use the same frag size
645 __pte_frag_nr = H_PTE_FRAG_NR;
646 __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT;
648 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
650 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
651 update_hid_for_radix();
652 lpcr = mfspr(SPRN_LPCR);
653 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
654 radix_init_partition_table();
657 radix_init_pseries();
660 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
663 radix_init_pgtable();
664 /* Switch to the guard PID before turning on MMU */
665 radix__switch_mmu_context(NULL, &init_mm);
666 if (cpu_has_feature(CPU_FTR_HVMODE))
670 void radix__early_init_mmu_secondary(void)
674 * update partition table control register and UPRT
676 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
678 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
679 update_hid_for_radix();
681 lpcr = mfspr(SPRN_LPCR);
682 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
685 __pa(partition_tb) | (PATB_SIZE_SHIFT - 12));
690 radix__switch_mmu_context(NULL, &init_mm);
691 if (cpu_has_feature(CPU_FTR_HVMODE))
695 void radix__mmu_cleanup_all(void)
699 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
700 lpcr = mfspr(SPRN_LPCR);
701 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
703 powernv_set_nmmu_ptcr(0);
704 radix__flush_tlb_all();
708 void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
709 phys_addr_t first_memblock_size)
711 /* We don't currently support the first MEMBLOCK not mapping 0
712 * physical on those processors
714 BUG_ON(first_memblock_base != 0);
717 * Radix mode is not limited by RMA / VRMA addressing.
719 ppc64_rma_size = ULONG_MAX;
722 #ifdef CONFIG_MEMORY_HOTPLUG
723 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
728 for (i = 0; i < PTRS_PER_PTE; i++) {
734 pte_free_kernel(&init_mm, pte_start);
738 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
743 for (i = 0; i < PTRS_PER_PMD; i++) {
749 pmd_free(&init_mm, pmd_start);
753 struct change_mapping_params {
757 unsigned long aligned_start;
758 unsigned long aligned_end;
761 static int __meminit stop_machine_change_mapping(void *data)
763 struct change_mapping_params *params =
764 (struct change_mapping_params *)data;
769 spin_unlock(&init_mm.page_table_lock);
770 pte_clear(&init_mm, params->aligned_start, params->pte);
771 create_physical_mapping(params->aligned_start, params->start, -1);
772 create_physical_mapping(params->end, params->aligned_end, -1);
773 spin_lock(&init_mm.page_table_lock);
777 static void remove_pte_table(pte_t *pte_start, unsigned long addr,
783 pte = pte_start + pte_index(addr);
784 for (; addr < end; addr = next, pte++) {
785 next = (addr + PAGE_SIZE) & PAGE_MASK;
789 if (!pte_present(*pte))
792 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
794 * The vmemmap_free() and remove_section_mapping()
795 * codepaths call us with aligned addresses.
797 WARN_ONCE(1, "%s: unaligned range\n", __func__);
801 pte_clear(&init_mm, addr, pte);
806 * clear the pte and potentially split the mapping helper
808 static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
809 unsigned long size, pte_t *pte)
811 unsigned long mask = ~(size - 1);
812 unsigned long aligned_start = addr & mask;
813 unsigned long aligned_end = addr + size;
814 struct change_mapping_params params;
815 bool split_region = false;
817 if ((end - addr) < size) {
819 * We're going to clear the PTE, but not flushed
820 * the mapping, time to remap and flush. The
821 * effects if visible outside the processor or
822 * if we are running in code close to the
823 * mapping we cleared, we are in trouble.
825 if (overlaps_kernel_text(aligned_start, addr) ||
826 overlaps_kernel_text(end, aligned_end)) {
828 * Hack, just return, don't pte_clear
830 WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
831 "text, not splitting\n", addr, end);
841 params.aligned_start = addr & ~(size - 1);
842 params.aligned_end = min_t(unsigned long, aligned_end,
843 (unsigned long)__va(memblock_end_of_DRAM()));
844 stop_machine(stop_machine_change_mapping, ¶ms, NULL);
848 pte_clear(&init_mm, addr, pte);
851 static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
858 pmd = pmd_start + pmd_index(addr);
859 for (; addr < end; addr = next, pmd++) {
860 next = pmd_addr_end(addr, end);
862 if (!pmd_present(*pmd))
865 if (pmd_huge(*pmd)) {
866 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
870 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
871 remove_pte_table(pte_base, addr, next);
872 free_pte_table(pte_base, pmd);
876 static void remove_pud_table(pud_t *pud_start, unsigned long addr,
883 pud = pud_start + pud_index(addr);
884 for (; addr < end; addr = next, pud++) {
885 next = pud_addr_end(addr, end);
887 if (!pud_present(*pud))
890 if (pud_huge(*pud)) {
891 split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
895 pmd_base = (pmd_t *)pud_page_vaddr(*pud);
896 remove_pmd_table(pmd_base, addr, next);
897 free_pmd_table(pmd_base, pud);
901 static void __meminit remove_pagetable(unsigned long start, unsigned long end)
903 unsigned long addr, next;
907 spin_lock(&init_mm.page_table_lock);
909 for (addr = start; addr < end; addr = next) {
910 next = pgd_addr_end(addr, end);
912 pgd = pgd_offset_k(addr);
913 if (!pgd_present(*pgd))
916 if (pgd_huge(*pgd)) {
917 split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
921 pud_base = (pud_t *)pgd_page_vaddr(*pgd);
922 remove_pud_table(pud_base, addr, next);
925 spin_unlock(&init_mm.page_table_lock);
926 radix__flush_tlb_kernel_range(start, end);
929 int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
931 return create_physical_mapping(start, end, nid);
934 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
936 remove_pagetable(start, end);
939 #endif /* CONFIG_MEMORY_HOTPLUG */
941 #ifdef CONFIG_SPARSEMEM_VMEMMAP
942 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
943 pgprot_t flags, unsigned int map_page_size,
946 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
949 int __meminit radix__vmemmap_create_mapping(unsigned long start,
950 unsigned long page_size,
953 /* Create a PTE encoding */
954 unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
955 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
958 ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
964 #ifdef CONFIG_MEMORY_HOTPLUG
965 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
967 remove_pagetable(start, start + page_size);
972 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
974 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
975 pmd_t *pmdp, unsigned long clr,
980 #ifdef CONFIG_DEBUG_VM
981 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
982 assert_spin_locked(&mm->page_table_lock);
985 old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
986 trace_hugepage_update(addr, old, clr, set);
991 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
997 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
998 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
999 VM_BUG_ON(pmd_devmap(*pmdp));
1001 * khugepaged calls this for normal pmd
1006 /*FIXME!! Verify whether we need this kick below */
1007 serialize_against_pte_lookup(vma->vm_mm);
1009 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1015 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1016 * page table, we consider the allocated page table as a list
1017 * head. On withdraw we need to make sure we zero out the used
1018 * list_head memory area.
1020 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1023 struct list_head *lh = (struct list_head *) pgtable;
1025 assert_spin_locked(pmd_lockptr(mm, pmdp));
1028 if (!pmd_huge_pte(mm, pmdp))
1031 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1032 pmd_huge_pte(mm, pmdp) = pgtable;
1035 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1039 struct list_head *lh;
1041 assert_spin_locked(pmd_lockptr(mm, pmdp));
1044 pgtable = pmd_huge_pte(mm, pmdp);
1045 lh = (struct list_head *) pgtable;
1047 pmd_huge_pte(mm, pmdp) = NULL;
1049 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1052 ptep = (pte_t *) pgtable;
1060 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1061 unsigned long addr, pmd_t *pmdp)
1066 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1067 old_pmd = __pmd(old);
1069 * Serialize against find_current_mm_pte which does lock-less
1070 * lookup in page tables with local interrupts disabled. For huge pages
1071 * it casts pmd_t to pte_t. Since format of pte_t is different from
1072 * pmd_t we want to prevent transit from pmd pointing to page table
1073 * to pmd pointing to huge page (and back) while interrupts are disabled.
1074 * We clear pmd to possibly replace it with page table pointer in
1075 * different code paths. So make sure we wait for the parallel
1076 * find_current_mm_pte to finish.
1078 serialize_against_pte_lookup(mm);
1082 int radix__has_transparent_hugepage(void)
1084 /* For radix 2M at PMD level means thp */
1085 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
1089 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */