powerpc/mm/radix: Add radix callbacks for vmemmap and map_kernel page()
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Fri, 29 Apr 2016 13:26:00 +0000 (23:26 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 1 May 2016 08:33:03 +0000 (18:33 +1000)
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/mm/pgtable-radix.c

index c1b787dcfbb65e12b89d471e0fc3acd0c58d2ca0..9bc2471a82073716ea3d9817c0f9e1f786010c7e 100644 (file)
@@ -725,6 +725,13 @@ void pgtable_cache_init(void);
 static inline int map_kernel_page(unsigned long ea, unsigned long pa,
                                  unsigned long flags)
 {
+       if (radix_enabled()) {
+#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
+               unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
+               WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
+#endif
+               return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+       }
        return hash__map_kernel_page(ea, pa, flags);
 }
 
@@ -732,6 +739,8 @@ static inline int __meminit vmemmap_create_mapping(unsigned long start,
                                                   unsigned long page_size,
                                                   unsigned long phys)
 {
+       if (radix_enabled())
+               return radix__vmemmap_create_mapping(start, page_size, phys);
        return hash__vmemmap_create_mapping(start, page_size, phys);
 }
 
@@ -739,6 +748,9 @@ static inline int __meminit vmemmap_create_mapping(unsigned long start,
 static inline void vmemmap_remove_mapping(unsigned long start,
                                          unsigned long page_size)
 {
+
+       if (radix_enabled())
+               return radix__vmemmap_remove_mapping(start, page_size);
        return hash__vmemmap_remove_mapping(start, page_size);
 }
 #endif
index a26259fcffd6cc701100004ecd2e6f2eb810f7bd..63eb629a8b64f507fa5ae69f8a4d44cf3d080671 100644 (file)
@@ -130,6 +130,12 @@ static inline int radix__pmd_trans_huge(pmd_t pmd)
 
 #endif
 
+extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
+                                            unsigned long page_size,
+                                            unsigned long phys);
+extern void radix__vmemmap_remove_mapping(unsigned long start,
+                                   unsigned long page_size);
+
 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                                 pgprot_t flags, unsigned int psz);
 #endif /* __ASSEMBLY__ */
index 4918850e1061e87f820494f293e94409bfe4ac44..0472f8aa40e1e68d4df2c03a3e7f2a0781b18f4b 100644 (file)
@@ -354,3 +354,23 @@ void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
        /* Finally limit subsequent allocations */
        memblock_set_current_limit(first_memblock_base + first_memblock_size);
 }
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit radix__vmemmap_create_mapping(unsigned long start,
+                                     unsigned long page_size,
+                                     unsigned long phys)
+{
+       /* Create a PTE encoding */
+       unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
+
+       BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size));
+       return 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
+{
+       /* FIXME!! intel does more. We should free page tables mapping vmemmap ? */
+}
+#endif
+#endif