Commit | Line | Data |
---|---|---|
199cd1d7 UB |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/linkage.h> | |
3 | #include <asm/asm.h> | |
4 | #include <asm/bitsperlong.h> | |
5 | #include <asm/kvm_vcpu_regs.h> | |
f14eec0a | 6 | #include <asm/nospec-branch.h> |
199cd1d7 UB |
7 | |
8 | #define WORD_SIZE (BITS_PER_LONG / 8) | |
9 | ||
10 | /* Intentionally omit RAX as it's context switched by hardware */ | |
11 | #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE | |
12 | #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE | |
13 | #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE | |
14 | /* Intentionally omit RSP as it's context switched by hardware */ | |
15 | #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE | |
16 | #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE | |
17 | #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE | |
18 | ||
19 | #ifdef CONFIG_X86_64 | |
20 | #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE | |
21 | #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE | |
22 | #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE | |
23 | #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE | |
24 | #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE | |
25 | #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE | |
26 | #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE | |
27 | #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE | |
28 | #endif | |
29 | ||
135961e0 | 30 | .section .noinstr.text, "ax" |
199cd1d7 UB |
31 | |
32 | /** | |
33 | * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode | |
34 | * @vmcb_pa: unsigned long | |
35 | * @regs: unsigned long * (to guest registers) | |
36 | */ | |
37 | SYM_FUNC_START(__svm_vcpu_run) | |
38 | push %_ASM_BP | |
199cd1d7 UB |
39 | #ifdef CONFIG_X86_64 |
40 | push %r15 | |
41 | push %r14 | |
42 | push %r13 | |
43 | push %r12 | |
44 | #else | |
45 | push %edi | |
46 | push %esi | |
47 | #endif | |
48 | push %_ASM_BX | |
49 | ||
50 | /* Save @regs. */ | |
51 | push %_ASM_ARG2 | |
52 | ||
53 | /* Save @vmcb. */ | |
54 | push %_ASM_ARG1 | |
55 | ||
56 | /* Move @regs to RAX. */ | |
57 | mov %_ASM_ARG2, %_ASM_AX | |
58 | ||
59 | /* Load guest registers. */ | |
60 | mov VCPU_RCX(%_ASM_AX), %_ASM_CX | |
61 | mov VCPU_RDX(%_ASM_AX), %_ASM_DX | |
62 | mov VCPU_RBX(%_ASM_AX), %_ASM_BX | |
63 | mov VCPU_RBP(%_ASM_AX), %_ASM_BP | |
64 | mov VCPU_RSI(%_ASM_AX), %_ASM_SI | |
65 | mov VCPU_RDI(%_ASM_AX), %_ASM_DI | |
66 | #ifdef CONFIG_X86_64 | |
67 | mov VCPU_R8 (%_ASM_AX), %r8 | |
68 | mov VCPU_R9 (%_ASM_AX), %r9 | |
69 | mov VCPU_R10(%_ASM_AX), %r10 | |
70 | mov VCPU_R11(%_ASM_AX), %r11 | |
71 | mov VCPU_R12(%_ASM_AX), %r12 | |
72 | mov VCPU_R13(%_ASM_AX), %r13 | |
73 | mov VCPU_R14(%_ASM_AX), %r14 | |
74 | mov VCPU_R15(%_ASM_AX), %r15 | |
75 | #endif | |
76 | ||
77 | /* "POP" @vmcb to RAX. */ | |
78 | pop %_ASM_AX | |
79 | ||
80 | /* Enter guest mode */ | |
f14eec0a | 81 | sti |
199cd1d7 | 82 | |
7531b47c | 83 | 1: vmrun %_ASM_AX |
199cd1d7 | 84 | |
7531b47c | 85 | 2: cli |
f14eec0a PB |
86 | |
87 | #ifdef CONFIG_RETPOLINE | |
88 | /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ | |
89 | FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE | |
90 | #endif | |
91 | ||
199cd1d7 UB |
92 | /* "POP" @regs to RAX. */ |
93 | pop %_ASM_AX | |
94 | ||
95 | /* Save all guest registers. */ | |
96 | mov %_ASM_CX, VCPU_RCX(%_ASM_AX) | |
97 | mov %_ASM_DX, VCPU_RDX(%_ASM_AX) | |
98 | mov %_ASM_BX, VCPU_RBX(%_ASM_AX) | |
99 | mov %_ASM_BP, VCPU_RBP(%_ASM_AX) | |
100 | mov %_ASM_SI, VCPU_RSI(%_ASM_AX) | |
101 | mov %_ASM_DI, VCPU_RDI(%_ASM_AX) | |
102 | #ifdef CONFIG_X86_64 | |
103 | mov %r8, VCPU_R8 (%_ASM_AX) | |
104 | mov %r9, VCPU_R9 (%_ASM_AX) | |
105 | mov %r10, VCPU_R10(%_ASM_AX) | |
106 | mov %r11, VCPU_R11(%_ASM_AX) | |
107 | mov %r12, VCPU_R12(%_ASM_AX) | |
108 | mov %r13, VCPU_R13(%_ASM_AX) | |
109 | mov %r14, VCPU_R14(%_ASM_AX) | |
110 | mov %r15, VCPU_R15(%_ASM_AX) | |
111 | #endif | |
112 | ||
a149180f PZ |
113 | /* |
114 | * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be | |
115 | * untrained as soon as we exit the VM and are back to the | |
116 | * kernel. This should be done before re-enabling interrupts | |
117 | * because interrupt handlers won't sanitize 'ret' if the return is | |
118 | * from the kernel. | |
119 | */ | |
120 | UNTRAIN_RET | |
121 | ||
199cd1d7 UB |
122 | /* |
123 | * Clear all general purpose registers except RSP and RAX to prevent | |
124 | * speculative use of the guest's values, even those that are reloaded | |
125 | * via the stack. In theory, an L1 cache miss when restoring registers | |
126 | * could lead to speculative execution with the guest's values. | |
127 | * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially | |
128 | * free. RSP and RAX are exempt as they are restored by hardware | |
129 | * during VM-Exit. | |
130 | */ | |
131 | xor %ecx, %ecx | |
132 | xor %edx, %edx | |
133 | xor %ebx, %ebx | |
134 | xor %ebp, %ebp | |
135 | xor %esi, %esi | |
136 | xor %edi, %edi | |
137 | #ifdef CONFIG_X86_64 | |
138 | xor %r8d, %r8d | |
139 | xor %r9d, %r9d | |
140 | xor %r10d, %r10d | |
141 | xor %r11d, %r11d | |
142 | xor %r12d, %r12d | |
143 | xor %r13d, %r13d | |
144 | xor %r14d, %r14d | |
145 | xor %r15d, %r15d | |
146 | #endif | |
147 | ||
148 | pop %_ASM_BX | |
149 | ||
150 | #ifdef CONFIG_X86_64 | |
151 | pop %r12 | |
152 | pop %r13 | |
153 | pop %r14 | |
154 | pop %r15 | |
155 | #else | |
156 | pop %esi | |
157 | pop %edi | |
158 | #endif | |
159 | pop %_ASM_BP | |
f94909ce | 160 | RET |
7531b47c UB |
161 | |
162 | 3: cmpb $0, kvm_rebooting | |
163 | jne 2b | |
164 | ud2 | |
165 | ||
166 | _ASM_EXTABLE(1b, 3b) | |
167 | ||
199cd1d7 | 168 | SYM_FUNC_END(__svm_vcpu_run) |
16809ecd TL |
169 | |
170 | /** | |
171 | * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode | |
172 | * @vmcb_pa: unsigned long | |
173 | */ | |
174 | SYM_FUNC_START(__svm_sev_es_vcpu_run) | |
175 | push %_ASM_BP | |
176 | #ifdef CONFIG_X86_64 | |
177 | push %r15 | |
178 | push %r14 | |
179 | push %r13 | |
180 | push %r12 | |
181 | #else | |
182 | push %edi | |
183 | push %esi | |
184 | #endif | |
185 | push %_ASM_BX | |
186 | ||
7531b47c | 187 | /* Move @vmcb to RAX. */ |
16809ecd | 188 | mov %_ASM_ARG1, %_ASM_AX |
7531b47c UB |
189 | |
190 | /* Enter guest mode */ | |
16809ecd TL |
191 | sti |
192 | ||
193 | 1: vmrun %_ASM_AX | |
16809ecd | 194 | |
7531b47c | 195 | 2: cli |
16809ecd TL |
196 | |
197 | #ifdef CONFIG_RETPOLINE | |
198 | /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ | |
199 | FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE | |
200 | #endif | |
201 | ||
a149180f PZ |
202 | /* |
203 | * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be | |
204 | * untrained as soon as we exit the VM and are back to the | |
205 | * kernel. This should be done before re-enabling interrupts | |
206 | * because interrupt handlers won't sanitize RET if the return is | |
207 | * from the kernel. | |
208 | */ | |
209 | UNTRAIN_RET | |
210 | ||
16809ecd TL |
211 | pop %_ASM_BX |
212 | ||
213 | #ifdef CONFIG_X86_64 | |
214 | pop %r12 | |
215 | pop %r13 | |
216 | pop %r14 | |
217 | pop %r15 | |
218 | #else | |
219 | pop %esi | |
220 | pop %edi | |
221 | #endif | |
222 | pop %_ASM_BP | |
f94909ce | 223 | RET |
7531b47c UB |
224 | |
225 | 3: cmpb $0, kvm_rebooting | |
226 | jne 2b | |
227 | ud2 | |
228 | ||
229 | _ASM_EXTABLE(1b, 3b) | |
230 | ||
16809ecd | 231 | SYM_FUNC_END(__svm_sev_es_vcpu_run) |