Commit | Line | Data |
---|---|---|
d17051cb AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2010 | |
940b45ec | 16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
d17051cb AG |
17 | * |
18 | * Authors: Alexander Graf <agraf@suse.de> | |
19 | */ | |
20 | ||
21 | #include <asm/ppc_asm.h> | |
22 | #include <asm/kvm_asm.h> | |
23 | #include <asm/reg.h> | |
24 | #include <asm/page.h> | |
25 | #include <asm/asm-offsets.h> | |
26 | ||
d17051cb | 27 | #define KVM_MAGIC_PAGE (-4096) |
92234722 AG |
28 | |
29 | #ifdef CONFIG_64BIT | |
30 | #define LL64(reg, offs, reg2) ld reg, (offs)(reg2) | |
31 | #define STL64(reg, offs, reg2) std reg, (offs)(reg2) | |
32 | #else | |
33 | #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2) | |
34 | #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2) | |
35 | #endif | |
36 | ||
37 | #define SCRATCH_SAVE \ | |
38 | /* Enable critical section. We are critical if \ | |
39 | shared->critical == r1 */ \ | |
40 | STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \ | |
41 | \ | |
42 | /* Save state */ \ | |
43 | PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ | |
44 | PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ | |
45 | mfcr r31; \ | |
46 | stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); | |
47 | ||
48 | #define SCRATCH_RESTORE \ | |
49 | /* Restore state */ \ | |
50 | PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ | |
51 | lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \ | |
52 | mtcr r30; \ | |
53 | PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ | |
54 | \ | |
55 | /* Disable critical section. We are critical if \ | |
56 | shared->critical == r1 and r2 is always != r1 */ \ | |
57 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); | |
819a63dc | 58 | |
befdc0a6 LYB |
59 | .global kvm_template_start |
60 | kvm_template_start: | |
61 | ||
819a63dc AG |
62 | .global kvm_emulate_mtmsrd |
63 | kvm_emulate_mtmsrd: | |
64 | ||
65 | SCRATCH_SAVE | |
66 | ||
67 | /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ | |
68 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
69 | lis r30, (~(MSR_EE | MSR_RI))@h | |
70 | ori r30, r30, (~(MSR_EE | MSR_RI))@l | |
71 | and r31, r31, r30 | |
72 | ||
73 | /* OR the register's (MSR_EE|MSR_RI) on MSR */ | |
74 | kvm_emulate_mtmsrd_reg: | |
df08bd10 AG |
75 | ori r30, r0, 0 |
76 | andi. r30, r30, (MSR_EE|MSR_RI) | |
819a63dc AG |
77 | or r31, r31, r30 |
78 | ||
79 | /* Put MSR back into magic page */ | |
80 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
81 | ||
82 | /* Check if we have to fetch an interrupt */ | |
83 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | |
84 | cmpwi r31, 0 | |
85 | beq+ no_check | |
86 | ||
87 | /* Check if we may trigger an interrupt */ | |
88 | andi. r30, r30, MSR_EE | |
89 | beq no_check | |
90 | ||
91 | SCRATCH_RESTORE | |
92 | ||
93 | /* Nag hypervisor */ | |
df08bd10 | 94 | kvm_emulate_mtmsrd_orig_ins: |
819a63dc AG |
95 | tlbsync |
96 | ||
97 | b kvm_emulate_mtmsrd_branch | |
98 | ||
99 | no_check: | |
100 | ||
101 | SCRATCH_RESTORE | |
102 | ||
103 | /* Go back to caller */ | |
104 | kvm_emulate_mtmsrd_branch: | |
105 | b . | |
106 | kvm_emulate_mtmsrd_end: | |
107 | ||
108 | .global kvm_emulate_mtmsrd_branch_offs | |
109 | kvm_emulate_mtmsrd_branch_offs: | |
110 | .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 | |
111 | ||
112 | .global kvm_emulate_mtmsrd_reg_offs | |
113 | kvm_emulate_mtmsrd_reg_offs: | |
114 | .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 | |
115 | ||
df08bd10 AG |
116 | .global kvm_emulate_mtmsrd_orig_ins_offs |
117 | kvm_emulate_mtmsrd_orig_ins_offs: | |
118 | .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4 | |
119 | ||
819a63dc AG |
120 | .global kvm_emulate_mtmsrd_len |
121 | kvm_emulate_mtmsrd_len: | |
122 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 | |
78109277 AG |
123 | |
124 | ||
d35b1075 | 125 | #define MSR_SAFE_BITS (MSR_EE | MSR_RI) |
78109277 AG |
126 | #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS |
127 | ||
128 | .global kvm_emulate_mtmsr | |
129 | kvm_emulate_mtmsr: | |
130 | ||
131 | SCRATCH_SAVE | |
132 | ||
133 | /* Fetch old MSR in r31 */ | |
134 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
135 | ||
136 | /* Find the changed bits between old and new MSR */ | |
137 | kvm_emulate_mtmsr_reg1: | |
512ba59e AG |
138 | ori r30, r0, 0 |
139 | xor r31, r30, r31 | |
78109277 AG |
140 | |
141 | /* Check if we need to really do mtmsr */ | |
142 | LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) | |
143 | and. r31, r31, r30 | |
144 | ||
145 | /* No critical bits changed? Maybe we can stay in the guest. */ | |
146 | beq maybe_stay_in_guest | |
147 | ||
148 | do_mtmsr: | |
149 | ||
150 | SCRATCH_RESTORE | |
151 | ||
152 | /* Just fire off the mtmsr if it's critical */ | |
153 | kvm_emulate_mtmsr_orig_ins: | |
154 | mtmsr r0 | |
155 | ||
156 | b kvm_emulate_mtmsr_branch | |
157 | ||
158 | maybe_stay_in_guest: | |
159 | ||
512ba59e AG |
160 | /* Get the target register in r30 */ |
161 | kvm_emulate_mtmsr_reg2: | |
162 | ori r30, r0, 0 | |
163 | ||
f9208427 BB |
164 | /* Put MSR into magic page because we don't call mtmsr */ |
165 | STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
166 | ||
78109277 AG |
167 | /* Check if we have to fetch an interrupt */ |
168 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | |
169 | cmpwi r31, 0 | |
170 | beq+ no_mtmsr | |
171 | ||
172 | /* Check if we may trigger an interrupt */ | |
512ba59e | 173 | andi. r31, r30, MSR_EE |
f9208427 | 174 | bne do_mtmsr |
78109277 AG |
175 | |
176 | no_mtmsr: | |
177 | ||
78109277 AG |
178 | SCRATCH_RESTORE |
179 | ||
180 | /* Go back to caller */ | |
181 | kvm_emulate_mtmsr_branch: | |
182 | b . | |
183 | kvm_emulate_mtmsr_end: | |
184 | ||
185 | .global kvm_emulate_mtmsr_branch_offs | |
186 | kvm_emulate_mtmsr_branch_offs: | |
187 | .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 | |
188 | ||
189 | .global kvm_emulate_mtmsr_reg1_offs | |
190 | kvm_emulate_mtmsr_reg1_offs: | |
191 | .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 | |
192 | ||
193 | .global kvm_emulate_mtmsr_reg2_offs | |
194 | kvm_emulate_mtmsr_reg2_offs: | |
195 | .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 | |
196 | ||
78109277 AG |
197 | .global kvm_emulate_mtmsr_orig_ins_offs |
198 | kvm_emulate_mtmsr_orig_ins_offs: | |
199 | .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 | |
200 | ||
201 | .global kvm_emulate_mtmsr_len | |
202 | kvm_emulate_mtmsr_len: | |
203 | .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 | |
644bfa01 | 204 | |
940b45ec SW |
205 | /* also used for wrteei 1 */ |
206 | .global kvm_emulate_wrtee | |
207 | kvm_emulate_wrtee: | |
644bfa01 | 208 | |
940b45ec SW |
209 | SCRATCH_SAVE |
210 | ||
211 | /* Fetch old MSR in r31 */ | |
212 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
644bfa01 | 213 | |
940b45ec SW |
214 | /* Insert new MSR[EE] */ |
215 | kvm_emulate_wrtee_reg: | |
216 | ori r30, r0, 0 | |
217 | rlwimi r31, r30, 0, MSR_EE | |
218 | ||
219 | /* | |
220 | * If MSR[EE] is now set, check for a pending interrupt. | |
221 | * We could skip this if MSR[EE] was already on, but that | |
222 | * should be rare, so don't bother. | |
223 | */ | |
224 | andi. r30, r30, MSR_EE | |
644bfa01 | 225 | |
940b45ec SW |
226 | /* Put MSR into magic page because we don't call wrtee */ |
227 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
228 | ||
229 | beq no_wrtee | |
230 | ||
231 | /* Check if we have to fetch an interrupt */ | |
232 | lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | |
233 | cmpwi r30, 0 | |
234 | bne do_wrtee | |
235 | ||
236 | no_wrtee: | |
237 | SCRATCH_RESTORE | |
238 | ||
239 | /* Go back to caller */ | |
240 | kvm_emulate_wrtee_branch: | |
241 | b . | |
242 | ||
243 | do_wrtee: | |
244 | SCRATCH_RESTORE | |
245 | ||
246 | /* Just fire off the wrtee if it's critical */ | |
247 | kvm_emulate_wrtee_orig_ins: | |
248 | wrtee r0 | |
249 | ||
250 | b kvm_emulate_wrtee_branch | |
251 | ||
252 | kvm_emulate_wrtee_end: | |
253 | ||
254 | .global kvm_emulate_wrtee_branch_offs | |
255 | kvm_emulate_wrtee_branch_offs: | |
256 | .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4 | |
257 | ||
258 | .global kvm_emulate_wrtee_reg_offs | |
259 | kvm_emulate_wrtee_reg_offs: | |
260 | .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4 | |
261 | ||
262 | .global kvm_emulate_wrtee_orig_ins_offs | |
263 | kvm_emulate_wrtee_orig_ins_offs: | |
264 | .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4 | |
265 | ||
266 | .global kvm_emulate_wrtee_len | |
267 | kvm_emulate_wrtee_len: | |
268 | .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4 | |
269 | ||
270 | .global kvm_emulate_wrteei_0 | |
271 | kvm_emulate_wrteei_0: | |
644bfa01 AG |
272 | SCRATCH_SAVE |
273 | ||
274 | /* Fetch old MSR in r31 */ | |
275 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
276 | ||
277 | /* Remove MSR_EE from old MSR */ | |
940b45ec | 278 | rlwinm r31, r31, 0, ~MSR_EE |
644bfa01 AG |
279 | |
280 | /* Write new MSR value back */ | |
281 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
282 | ||
283 | SCRATCH_RESTORE | |
284 | ||
285 | /* Go back to caller */ | |
940b45ec | 286 | kvm_emulate_wrteei_0_branch: |
644bfa01 | 287 | b . |
940b45ec | 288 | kvm_emulate_wrteei_0_end: |
644bfa01 | 289 | |
940b45ec SW |
290 | .global kvm_emulate_wrteei_0_branch_offs |
291 | kvm_emulate_wrteei_0_branch_offs: | |
292 | .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4 | |
cbe487fa | 293 | |
940b45ec SW |
294 | .global kvm_emulate_wrteei_0_len |
295 | kvm_emulate_wrteei_0_len: | |
296 | .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4 | |
cbe487fa AG |
297 | |
298 | .global kvm_emulate_mtsrin | |
299 | kvm_emulate_mtsrin: | |
300 | ||
301 | SCRATCH_SAVE | |
302 | ||
303 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | |
304 | andi. r31, r31, MSR_DR | MSR_IR | |
305 | beq kvm_emulate_mtsrin_reg1 | |
306 | ||
307 | SCRATCH_RESTORE | |
308 | ||
309 | kvm_emulate_mtsrin_orig_ins: | |
310 | nop | |
311 | b kvm_emulate_mtsrin_branch | |
312 | ||
313 | kvm_emulate_mtsrin_reg1: | |
314 | /* rX >> 26 */ | |
315 | rlwinm r30,r0,6,26,29 | |
316 | ||
317 | kvm_emulate_mtsrin_reg2: | |
318 | stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30) | |
319 | ||
320 | SCRATCH_RESTORE | |
321 | ||
322 | /* Go back to caller */ | |
323 | kvm_emulate_mtsrin_branch: | |
324 | b . | |
325 | kvm_emulate_mtsrin_end: | |
326 | ||
327 | .global kvm_emulate_mtsrin_branch_offs | |
328 | kvm_emulate_mtsrin_branch_offs: | |
329 | .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4 | |
330 | ||
331 | .global kvm_emulate_mtsrin_reg1_offs | |
332 | kvm_emulate_mtsrin_reg1_offs: | |
333 | .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4 | |
334 | ||
335 | .global kvm_emulate_mtsrin_reg2_offs | |
336 | kvm_emulate_mtsrin_reg2_offs: | |
337 | .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4 | |
338 | ||
339 | .global kvm_emulate_mtsrin_orig_ins_offs | |
340 | kvm_emulate_mtsrin_orig_ins_offs: | |
341 | .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4 | |
342 | ||
343 | .global kvm_emulate_mtsrin_len | |
344 | kvm_emulate_mtsrin_len: | |
345 | .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 | |
befdc0a6 LYB |
346 | |
347 | .global kvm_template_end | |
348 | kvm_template_end: |