arm64: reduce ID map to a single page
authorArd Biesheuvel <ard.biesheuvel@linaro.org>
Mon, 1 Jun 2015 11:40:33 +0000 (13:40 +0200)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 2 Jun 2015 16:44:51 +0000 (17:44 +0100)
Commit ea8c2e112445 ("arm64: Extend the idmap to the whole kernel
image") changed the early page table code so that the entire kernel
Image is covered by the identity map. This allows functions that
need to enable or disable the MMU to reside anywhere in the kernel
Image.

However, this change has the unfortunate side effect that the Image
cannot cross a physical 512 MB alignment boundary anymore, since the
early page table code cannot deal with the Image crossing a /virtual/
512 MB alignment boundary.

So instead, reduce the ID map to a single page, that is populated by
the contents of the .idmap.text section. Only three functions reside
there at the moment: __enable_mmu(), cpu_resume_mmu() and cpu_reset().
If new code is introduced that needs to manipulate the MMU state, it
should be added to this section as well.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/kernel/head.S
arch/arm64/kernel/sleep.S
arch/arm64/kernel/vmlinux.lds.S

index 30cffc5e74020f4d59c9c3e03c26785329674329..c0ff3ce4299e979d11692540a76da4ec5ade5bba 100644 (file)
@@ -361,7 +361,7 @@ __create_page_tables:
         * Create the identity mapping.
         */
        mov     x0, x25                         // idmap_pg_dir
-       adrp    x3, KERNEL_START                // __pa(KERNEL_START)
+       adrp    x3, __idmap_text_start          // __pa(__idmap_text_start)
 
 #ifndef CONFIG_ARM64_VA_BITS_48
 #define EXTRA_SHIFT    (PGDIR_SHIFT + PAGE_SHIFT - 3)
@@ -384,11 +384,11 @@ __create_page_tables:
 
        /*
         * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
-        * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used),
+        * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
         * this number conveniently equals the number of leading zeroes in
-        * the physical address of KERNEL_END.
+        * the physical address of __idmap_text_end.
         */
-       adrp    x5, KERNEL_END
+       adrp    x5, __idmap_text_end
        clz     x5, x5
        cmp     x5, TCR_T0SZ(VA_BITS)   // default T0SZ small enough?
        b.ge    1f                      // .. then skip additional level
@@ -403,8 +403,8 @@ __create_page_tables:
 #endif
 
        create_pgd_entry x0, x3, x5, x6
-       mov     x5, x3                          // __pa(KERNEL_START)
-       adr_l   x6, KERNEL_END                  // __pa(KERNEL_END)
+       mov     x5, x3                          // __pa(__idmap_text_start)
+       adr_l   x6, __idmap_text_end            // __pa(__idmap_text_end)
        create_block_map x0, x7, x3, x5, x6
 
        /*
@@ -632,6 +632,7 @@ ENDPROC(__secondary_switched)
  *
  * other registers depend on the function called upon completion
  */
+       .section        ".idmap.text", "ax"
 __enable_mmu:
        ldr     x5, =vectors
        msr     vbar_el1, x5
index ede186cdd4520169fd2ea9259d4738b8796bf45d..811e61a2d8472e5de53e7081fb3261bd01c8ec49 100644 (file)
@@ -130,12 +130,14 @@ ENDPROC(__cpu_suspend_enter)
 /*
  * x0 must contain the sctlr value retrieved from restored context
  */
+       .pushsection    ".idmap.text", "ax"
 ENTRY(cpu_resume_mmu)
        ldr     x3, =cpu_resume_after_mmu
        msr     sctlr_el1, x0           // restore sctlr_el1
        isb
        br      x3                      // global jump to virtual address
 ENDPROC(cpu_resume_mmu)
+       .popsection
 cpu_resume_after_mmu:
        mov     x0, #0                  // return zero on success
        ldp     x19, x20, [sp, #16]
index a2c29865c3fe54e2d3c8c7ce1b956ba5ce5c0bf7..98073332e2d05b62c12be5c9f4172ddcc3363736 100644 (file)
@@ -38,6 +38,12 @@ jiffies = jiffies_64;
        *(.hyp.text)                                    \
        VMLINUX_SYMBOL(__hyp_text_end) = .;
 
+#define IDMAP_TEXT                                     \
+       . = ALIGN(SZ_4K);                               \
+       VMLINUX_SYMBOL(__idmap_text_start) = .;         \
+       *(.idmap.text)                                  \
+       VMLINUX_SYMBOL(__idmap_text_end) = .;
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -95,6 +101,7 @@ SECTIONS
                        SCHED_TEXT
                        LOCK_TEXT
                        HYPERVISOR_TEXT
+                       IDMAP_TEXT
                        *(.fixup)
                        *(.gnu.warning)
                . = ALIGN(16);
@@ -167,11 +174,13 @@ SECTIONS
 }
 
 /*
- * The HYP init code can't be more than a page long,
+ * The HYP init code and ID map text can't be longer than a page each,
  * and should not cross a page boundary.
  */
 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
        "HYP init code too big or misaligned")
+ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
+       "ID map text too big or misaligned")
 
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.