x86/mm/pti: Add infrastructure for page table isolation
[linux-2.6-block.git] / arch / x86 / entry / calling.h
index 640aafebdc00e460687a796f5011c1ab917a4ab9..3d3389a92c339558e8a41ac08c98cf514b9a3584 100644 (file)
@@ -1,5 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #include <linux/jump_label.h>
 #include <asm/unwind_hints.h>
+#include <asm/cpufeatures.h>
+#include <asm/page_types.h>
 
 /*
 
@@ -141,56 +144,25 @@ For 32-bit we have the following conventions - kernel is built with
        UNWIND_HINT_REGS offset=\offset
        .endm
 
-       .macro RESTORE_EXTRA_REGS offset=0
-       movq 0*8+\offset(%rsp), %r15
-       movq 1*8+\offset(%rsp), %r14
-       movq 2*8+\offset(%rsp), %r13
-       movq 3*8+\offset(%rsp), %r12
-       movq 4*8+\offset(%rsp), %rbp
-       movq 5*8+\offset(%rsp), %rbx
-       UNWIND_HINT_REGS offset=\offset extra=0
+       .macro POP_EXTRA_REGS
+       popq %r15
+       popq %r14
+       popq %r13
+       popq %r12
+       popq %rbp
+       popq %rbx
        .endm
 
-       .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
-       .if \rstor_r11
-       movq 6*8(%rsp), %r11
-       .endif
-       .if \rstor_r8910
-       movq 7*8(%rsp), %r10
-       movq 8*8(%rsp), %r9
-       movq 9*8(%rsp), %r8
-       .endif
-       .if \rstor_rax
-       movq 10*8(%rsp), %rax
-       .endif
-       .if \rstor_rcx
-       movq 11*8(%rsp), %rcx
-       .endif
-       .if \rstor_rdx
-       movq 12*8(%rsp), %rdx
-       .endif
-       movq 13*8(%rsp), %rsi
-       movq 14*8(%rsp), %rdi
-       UNWIND_HINT_IRET_REGS offset=16*8
-       .endm
-       .macro RESTORE_C_REGS
-       RESTORE_C_REGS_HELPER 1,1,1,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_RAX
-       RESTORE_C_REGS_HELPER 0,1,1,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_RCX
-       RESTORE_C_REGS_HELPER 1,0,1,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_R11
-       RESTORE_C_REGS_HELPER 1,1,0,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_RCX_R11
-       RESTORE_C_REGS_HELPER 1,0,0,1,1
-       .endm
-
-       .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
-       subq $-(15*8+\addskip), %rsp
+       .macro POP_C_REGS
+       popq %r11
+       popq %r10
+       popq %r9
+       popq %r8
+       popq %rax
+       popq %rcx
+       popq %rdx
+       popq %rsi
+       popq %rdi
        .endm
 
        .macro icebp
@@ -217,6 +189,77 @@ For 32-bit we have the following conventions - kernel is built with
 #endif
 .endm
 
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+
+/* PAGE_TABLE_ISOLATION PGDs are 8k.  Flip bit 12 to switch between the two halves: */
+#define PTI_SWITCH_MASK (1<<PAGE_SHIFT)
+
+.macro ADJUST_KERNEL_CR3 reg:req
+       /* Clear "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
+       andq    $(~PTI_SWITCH_MASK), \reg
+.endm
+
+.macro ADJUST_USER_CR3 reg:req
+       /* Move CR3 up a page to the user page tables: */
+       orq     $(PTI_SWITCH_MASK), \reg
+.endm
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       mov     %cr3, \scratch_reg
+       ADJUST_KERNEL_CR3 \scratch_reg
+       mov     \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       mov     %cr3, \scratch_reg
+       ADJUST_USER_CR3 \scratch_reg
+       mov     \scratch_reg, %cr3
+.Lend_\@:
+.endm
+
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+       ALTERNATIVE "jmp .Ldone_\@", "", X86_FEATURE_PTI
+       movq    %cr3, \scratch_reg
+       movq    \scratch_reg, \save_reg
+       /*
+        * Is the switch bit zero?  This means the address is
+        * up in real PAGE_TABLE_ISOLATION patches in a moment.
+        */
+       testq   $(PTI_SWITCH_MASK), \scratch_reg
+       jz      .Ldone_\@
+
+       ADJUST_KERNEL_CR3 \scratch_reg
+       movq    \scratch_reg, %cr3
+
+.Ldone_\@:
+.endm
+
+.macro RESTORE_CR3 save_reg:req
+       ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
+       /*
+        * The CR3 write could be avoided when not changing its value,
+        * but would require a CR3 read *and* a scratch register.
+        */
+       movq    \save_reg, %cr3
+.Lend_\@:
+.endm
+
+#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
+
+.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
+.endm
+.macro SWITCH_TO_USER_CR3 scratch_reg:req
+.endm
+.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
+.endm
+.macro RESTORE_CR3 save_reg:req
+.endm
+
+#endif
+
 #endif /* CONFIG_X86_64 */
 
 /*