powerpc/mm: Implement STRICT_KERNEL_RWX on PPC32
authorChristophe Leroy <christophe.leroy@c-s.fr>
Wed, 2 Aug 2017 13:51:05 +0000 (15:51 +0200)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 15 Aug 2017 12:55:57 +0000 (22:55 +1000)
This patch implements STRICT_KERNEL_RWX on PPC32.

As for CONFIG_DEBUG_PAGEALLOC, it deactivates BAT and LTLB mappings
in order to allow page protection setup at the level of each page.

As BAT/LTLB mappings are deactivated, there might be a performance
impact.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/Kconfig
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/mm/init_32.c
arch/powerpc/mm/pgtable_32.c

index a2b669729440733a6bec0e340c315f25f13cb9a7..bf6abab46dccb560c8e3e41029653459c259b1fa 100644 (file)
@@ -176,7 +176,7 @@ config PPC
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
-       select ARCH_HAS_STRICT_KERNEL_RWX       if (PPC_BOOK3S_64 && !RELOCATABLE && !HIBERNATION)
+       select ARCH_HAS_STRICT_KERNEL_RWX       if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
        select ARCH_OPTIONAL_KERNEL_RWX         if ARCH_HAS_STRICT_KERNEL_RWX
        select HAVE_CBPF_JIT                    if !PPC64
        select HAVE_CONTEXT_TRACKING            if PPC64
index b1a250560198a7960f5d7bc9f197a8a4fd4c8e6f..882628fa6987cfdf161aa3afee565a67f03765f8 100644 (file)
@@ -8,7 +8,7 @@
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 
-#ifdef CONFIG_STRICT_KERNEL_RWX
+#if defined(CONFIG_STRICT_KERNEL_RWX) && !defined(CONFIG_PPC32)
 #define STRICT_ALIGN_SIZE      (1 << 24)
 #else
 #define STRICT_ALIGN_SIZE      PAGE_SIZE
index 8a7c38b8d335d733917962fbf0c2eaad31b982e4..7d5fee1bb116f06c5cb585062188e20d9d0ee661 100644 (file)
@@ -113,6 +113,12 @@ void __init MMU_setup(void)
                __map_without_bats = 1;
                __map_without_ltlbs = 1;
        }
+#ifdef CONFIG_STRICT_KERNEL_RWX
+       if (rodata_enabled) {
+               __map_without_bats = 1;
+               __map_without_ltlbs = 1;
+       }
+#endif
 }
 
 /*
index 85e8f0e0efe69e4ea04e6c0583f3a12915f5c184..4a3dd9fc6989a4a5422f8b38a47badf688308b00 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/fixmap.h>
 #include <asm/io.h>
 #include <asm/setup.h>
+#include <asm/sections.h>
 
 #include "mmu_decl.h"
 
@@ -375,6 +376,29 @@ void mark_initmem_nx(void)
        change_page_attr(page, numpages, PAGE_KERNEL);
 }
 
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mark_rodata_ro(void)
+{
+       struct page *page;
+       unsigned long numpages;
+
+       page = virt_to_page(_stext);
+       numpages = PFN_UP((unsigned long)_etext) -
+                  PFN_DOWN((unsigned long)_stext);
+
+       change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+       /*
+        * mark .rodata as read only. Use __init_begin rather than __end_rodata
+        * to cover NOTES and EXCEPTION_TABLE.
+        */
+       page = virt_to_page(__start_rodata);
+       numpages = PFN_UP((unsigned long)__init_begin) -
+                  PFN_DOWN((unsigned long)__start_rodata);
+
+       change_page_attr(page, numpages, PAGE_KERNEL_RO);
+}
+#endif
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {