Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux-block.git] / arch / x86 / mm / mem_encrypt_boot.S
CommitLineData
6ebcb060
TL
1/*
2 * AMD Memory Encryption Support
3 *
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14#include <asm/pgtable.h>
15#include <asm/page.h>
16#include <asm/processor-flags.h>
17#include <asm/msr-index.h>
531bb52a 18#include <asm/nospec-branch.h>
6ebcb060
TL
19
20 .text
21 .code64
22ENTRY(sme_encrypt_execute)
23
24 /*
25 * Entry parameters:
107cd253
TL
26 * RDI - virtual address for the encrypted mapping
27 * RSI - virtual address for the decrypted mapping
28 * RDX - length to encrypt
6ebcb060
TL
29 * RCX - virtual address of the encryption workarea, including:
30 * - stack page (PAGE_SIZE)
31 * - encryption routine page (PAGE_SIZE)
32 * - intermediate copy buffer (PMD_PAGE_SIZE)
33 * R8 - physcial address of the pagetables to use for encryption
34 */
35
6e0b52d4
BP
36 push %rbp
37 movq %rsp, %rbp /* RBP now has original stack pointer */
6ebcb060
TL
38
39 /* Set up a one page stack in the non-encrypted memory area */
40 movq %rcx, %rax /* Workarea stack page */
41 leaq PAGE_SIZE(%rax), %rsp /* Set new stack pointer */
42 addq $PAGE_SIZE, %rax /* Workarea encryption routine */
43
44 push %r12
107cd253
TL
45 movq %rdi, %r10 /* Encrypted area */
46 movq %rsi, %r11 /* Decrypted area */
47 movq %rdx, %r12 /* Area length */
6ebcb060
TL
48
49 /* Copy encryption routine into the workarea */
50 movq %rax, %rdi /* Workarea encryption routine */
51 leaq __enc_copy(%rip), %rsi /* Encryption routine */
52 movq $(.L__enc_copy_end - __enc_copy), %rcx /* Encryption routine length */
53 rep movsb
54
55 /* Setup registers for call */
107cd253
TL
56 movq %r10, %rdi /* Encrypted area */
57 movq %r11, %rsi /* Decrypted area */
6ebcb060 58 movq %r8, %rdx /* Pagetables used for encryption */
107cd253 59 movq %r12, %rcx /* Area length */
6ebcb060
TL
60 movq %rax, %r8 /* Workarea encryption routine */
61 addq $PAGE_SIZE, %r8 /* Workarea intermediate copy buffer */
62
531bb52a 63 ANNOTATE_RETPOLINE_SAFE
6ebcb060
TL
64 call *%rax /* Call the encryption routine */
65
66 pop %r12
67
68 movq %rbp, %rsp /* Restore original stack pointer */
6e0b52d4 69 pop %rbp
6ebcb060
TL
70
71 ret
72ENDPROC(sme_encrypt_execute)
73
74ENTRY(__enc_copy)
75/*
107cd253 76 * Routine used to encrypt memory in place.
6ebcb060
TL
77 * This routine must be run outside of the kernel proper since
78 * the kernel will be encrypted during the process. So this
79 * routine is defined here and then copied to an area outside
80 * of the kernel where it will remain and run decrypted
81 * during execution.
82 *
83 * On entry the registers must be:
107cd253
TL
84 * RDI - virtual address for the encrypted mapping
85 * RSI - virtual address for the decrypted mapping
6ebcb060 86 * RDX - address of the pagetables to use for encryption
107cd253 87 * RCX - length of area
6ebcb060
TL
88 * R8 - intermediate copy buffer
89 *
90 * RAX - points to this routine
91 *
107cd253
TL
92 * The area will be encrypted by copying from the non-encrypted
93 * memory space to an intermediate buffer and then copying from the
94 * intermediate buffer back to the encrypted memory space. The physical
95 * addresses of the two mappings are the same which results in the area
96 * being encrypted "in place".
6ebcb060
TL
97 */
98 /* Enable the new page tables */
99 mov %rdx, %cr3
100
101 /* Flush any global TLBs */
102 mov %cr4, %rdx
103 andq $~X86_CR4_PGE, %rdx
104 mov %rdx, %cr4
105 orq $X86_CR4_PGE, %rdx
106 mov %rdx, %cr4
107
13038801 108 push %r15
cc5f01e2 109 push %r12
13038801 110
107cd253
TL
111 movq %rcx, %r9 /* Save area length */
112 movq %rdi, %r10 /* Save encrypted area address */
113 movq %rsi, %r11 /* Save decrypted area address */
13038801 114
6ebcb060 115 /* Set the PAT register PA5 entry to write-protect */
6ebcb060
TL
116 movl $MSR_IA32_CR_PAT, %ecx
117 rdmsr
13038801 118 mov %rdx, %r15 /* Save original PAT value */
6ebcb060
TL
119 andl $0xffff00ff, %edx /* Clear PA5 */
120 orl $0x00000500, %edx /* Set PA5 to WP */
121 wrmsr
6ebcb060
TL
122
123 wbinvd /* Invalidate any cache entries */
124
cc5f01e2
TL
125 /* Copy/encrypt up to 2MB at a time */
126 movq $PMD_PAGE_SIZE, %r12
6ebcb060 1271:
cc5f01e2
TL
128 cmpq %r12, %r9
129 jnb 2f
130 movq %r9, %r12
131
1322:
107cd253 133 movq %r11, %rsi /* Source - decrypted area */
6ebcb060 134 movq %r8, %rdi /* Dest - intermediate copy buffer */
cc5f01e2 135 movq %r12, %rcx
6ebcb060
TL
136 rep movsb
137
138 movq %r8, %rsi /* Source - intermediate copy buffer */
107cd253 139 movq %r10, %rdi /* Dest - encrypted area */
cc5f01e2 140 movq %r12, %rcx
6ebcb060
TL
141 rep movsb
142
cc5f01e2
TL
143 addq %r12, %r11
144 addq %r12, %r10
145 subq %r12, %r9 /* Kernel length decrement */
6ebcb060
TL
146 jnz 1b /* Kernel length not zero? */
147
148 /* Restore PAT register */
6ebcb060
TL
149 movl $MSR_IA32_CR_PAT, %ecx
150 rdmsr
13038801 151 mov %r15, %rdx /* Restore original PAT value */
6ebcb060
TL
152 wrmsr
153
cc5f01e2 154 pop %r12
13038801
TL
155 pop %r15
156
6ebcb060
TL
157 ret
158.L__enc_copy_end:
159ENDPROC(__enc_copy)