Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
4cd35f67 | 16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
bbf45ba5 HB |
17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
d30f6e48 SW |
20 | * Scott Wood <scottwood@freescale.com> |
21 | * Varun Sethi <varun.sethi@freescale.com> | |
bbf45ba5 HB |
22 | */ |
23 | ||
24 | #include <linux/errno.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/kvm_host.h> | |
5a0e3ad6 | 27 | #include <linux/gfp.h> |
bbf45ba5 HB |
28 | #include <linux/module.h> |
29 | #include <linux/vmalloc.h> | |
30 | #include <linux/fs.h> | |
7924bd41 | 31 | |
bbf45ba5 HB |
32 | #include <asm/cputable.h> |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/kvm_ppc.h> | |
d9fbd03d | 35 | #include <asm/cacheflush.h> |
d30f6e48 SW |
36 | #include <asm/dbell.h> |
37 | #include <asm/hw_irq.h> | |
38 | #include <asm/irq.h> | |
b50df19c | 39 | #include <asm/time.h> |
bbf45ba5 | 40 | |
d30f6e48 | 41 | #include "timing.h" |
75f74f0d | 42 | #include "booke.h" |
97c95059 | 43 | #include "trace.h" |
bbf45ba5 | 44 | |
d9fbd03d HB |
45 | unsigned long kvmppc_booke_handlers; |
46 | ||
bbf45ba5 HB |
47 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
48 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | |
49 | ||
50 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
bbf45ba5 HB |
51 | { "mmio", VCPU_STAT(mmio_exits) }, |
52 | { "dcr", VCPU_STAT(dcr_exits) }, | |
53 | { "sig", VCPU_STAT(signal_exits) }, | |
bbf45ba5 HB |
54 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
55 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | |
56 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | |
57 | { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, | |
58 | { "sysc", VCPU_STAT(syscall_exits) }, | |
59 | { "isi", VCPU_STAT(isi_exits) }, | |
60 | { "dsi", VCPU_STAT(dsi_exits) }, | |
61 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | |
62 | { "dec", VCPU_STAT(dec_exits) }, | |
63 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | |
45c5eb67 | 64 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
d30f6e48 SW |
65 | { "doorbell", VCPU_STAT(dbell_exits) }, |
66 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | |
cf1c5ca4 | 67 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, |
bbf45ba5 HB |
68 | { NULL } |
69 | }; | |
70 | ||
bbf45ba5 HB |
71 | /* TODO: use vcpu_printf() */ |
72 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | int i; | |
75 | ||
666e7252 | 76 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
5cf8ca22 | 77 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
de7906c3 AG |
78 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
79 | vcpu->arch.shared->srr1); | |
bbf45ba5 HB |
80 | |
81 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | |
82 | ||
83 | for (i = 0; i < 32; i += 4) { | |
5cf8ca22 | 84 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
8e5b26b5 AG |
85 | kvmppc_get_gpr(vcpu, i), |
86 | kvmppc_get_gpr(vcpu, i+1), | |
87 | kvmppc_get_gpr(vcpu, i+2), | |
88 | kvmppc_get_gpr(vcpu, i+3)); | |
bbf45ba5 HB |
89 | } |
90 | } | |
91 | ||
4cd35f67 SW |
92 | #ifdef CONFIG_SPE |
93 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) | |
94 | { | |
95 | preempt_disable(); | |
96 | enable_kernel_spe(); | |
97 | kvmppc_save_guest_spe(vcpu); | |
98 | vcpu->arch.shadow_msr &= ~MSR_SPE; | |
99 | preempt_enable(); | |
100 | } | |
101 | ||
102 | static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) | |
103 | { | |
104 | preempt_disable(); | |
105 | enable_kernel_spe(); | |
106 | kvmppc_load_guest_spe(vcpu); | |
107 | vcpu->arch.shadow_msr |= MSR_SPE; | |
108 | preempt_enable(); | |
109 | } | |
110 | ||
111 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | |
112 | { | |
113 | if (vcpu->arch.shared->msr & MSR_SPE) { | |
114 | if (!(vcpu->arch.shadow_msr & MSR_SPE)) | |
115 | kvmppc_vcpu_enable_spe(vcpu); | |
116 | } else if (vcpu->arch.shadow_msr & MSR_SPE) { | |
117 | kvmppc_vcpu_disable_spe(vcpu); | |
118 | } | |
119 | } | |
120 | #else | |
121 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | |
122 | { | |
123 | } | |
124 | #endif | |
125 | ||
7a08c274 AG |
126 | static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) |
127 | { | |
128 | #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) | |
129 | /* We always treat the FP bit as enabled from the host | |
130 | perspective, so only need to adjust the shadow MSR */ | |
131 | vcpu->arch.shadow_msr &= ~MSR_FP; | |
132 | vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; | |
133 | #endif | |
134 | } | |
135 | ||
dd9ebf1f LY |
136 | /* |
137 | * Helper function for "full" MSR writes. No need to call this if only | |
138 | * EE/CE/ME/DE/RI are changing. | |
139 | */ | |
4cd35f67 SW |
140 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) |
141 | { | |
dd9ebf1f | 142 | u32 old_msr = vcpu->arch.shared->msr; |
4cd35f67 | 143 | |
d30f6e48 SW |
144 | #ifdef CONFIG_KVM_BOOKE_HV |
145 | new_msr |= MSR_GS; | |
146 | #endif | |
147 | ||
4cd35f67 SW |
148 | vcpu->arch.shared->msr = new_msr; |
149 | ||
dd9ebf1f | 150 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
4cd35f67 | 151 | kvmppc_vcpu_sync_spe(vcpu); |
7a08c274 | 152 | kvmppc_vcpu_sync_fpu(vcpu); |
4cd35f67 SW |
153 | } |
154 | ||
d4cf3892 HB |
155 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
156 | unsigned int priority) | |
9dd921cf | 157 | { |
6346046c | 158 | trace_kvm_booke_queue_irqprio(vcpu, priority); |
9dd921cf HB |
159 | set_bit(priority, &vcpu->arch.pending_exceptions); |
160 | } | |
161 | ||
daf5e271 LY |
162 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
163 | ulong dear_flags, ulong esr_flags) | |
9dd921cf | 164 | { |
daf5e271 LY |
165 | vcpu->arch.queued_dear = dear_flags; |
166 | vcpu->arch.queued_esr = esr_flags; | |
167 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | |
168 | } | |
169 | ||
170 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | |
171 | ulong dear_flags, ulong esr_flags) | |
172 | { | |
173 | vcpu->arch.queued_dear = dear_flags; | |
174 | vcpu->arch.queued_esr = esr_flags; | |
175 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | |
176 | } | |
177 | ||
178 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | |
179 | ulong esr_flags) | |
180 | { | |
181 | vcpu->arch.queued_esr = esr_flags; | |
182 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | |
183 | } | |
184 | ||
011da899 AG |
185 | static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags, |
186 | ulong esr_flags) | |
187 | { | |
188 | vcpu->arch.queued_dear = dear_flags; | |
189 | vcpu->arch.queued_esr = esr_flags; | |
190 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT); | |
191 | } | |
192 | ||
daf5e271 LY |
193 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) |
194 | { | |
195 | vcpu->arch.queued_esr = esr_flags; | |
d4cf3892 | 196 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
9dd921cf HB |
197 | } |
198 | ||
199 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |
200 | { | |
d4cf3892 | 201 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
9dd921cf HB |
202 | } |
203 | ||
204 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |
205 | { | |
d4cf3892 | 206 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
9dd921cf HB |
207 | } |
208 | ||
7706664d AG |
209 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
210 | { | |
211 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | |
212 | } | |
213 | ||
9dd921cf HB |
214 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
215 | struct kvm_interrupt *irq) | |
216 | { | |
c5335f17 AG |
217 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
218 | ||
219 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | |
220 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | |
221 | ||
222 | kvmppc_booke_queue_irqprio(vcpu, prio); | |
9dd921cf HB |
223 | } |
224 | ||
4fe27d2a | 225 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) |
4496f974 AG |
226 | { |
227 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | |
c5335f17 | 228 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
4496f974 AG |
229 | } |
230 | ||
f61c94bb BB |
231 | static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu) |
232 | { | |
233 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG); | |
234 | } | |
235 | ||
236 | static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) | |
237 | { | |
238 | clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); | |
239 | } | |
240 | ||
d30f6e48 SW |
241 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) |
242 | { | |
243 | #ifdef CONFIG_KVM_BOOKE_HV | |
244 | mtspr(SPRN_GSRR0, srr0); | |
245 | mtspr(SPRN_GSRR1, srr1); | |
246 | #else | |
247 | vcpu->arch.shared->srr0 = srr0; | |
248 | vcpu->arch.shared->srr1 = srr1; | |
249 | #endif | |
250 | } | |
251 | ||
252 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
253 | { | |
254 | vcpu->arch.csrr0 = srr0; | |
255 | vcpu->arch.csrr1 = srr1; | |
256 | } | |
257 | ||
258 | static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
259 | { | |
260 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { | |
261 | vcpu->arch.dsrr0 = srr0; | |
262 | vcpu->arch.dsrr1 = srr1; | |
263 | } else { | |
264 | set_guest_csrr(vcpu, srr0, srr1); | |
265 | } | |
266 | } | |
267 | ||
268 | static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
269 | { | |
270 | vcpu->arch.mcsrr0 = srr0; | |
271 | vcpu->arch.mcsrr1 = srr1; | |
272 | } | |
273 | ||
274 | static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) | |
275 | { | |
276 | #ifdef CONFIG_KVM_BOOKE_HV | |
277 | return mfspr(SPRN_GDEAR); | |
278 | #else | |
279 | return vcpu->arch.shared->dar; | |
280 | #endif | |
281 | } | |
282 | ||
283 | static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) | |
284 | { | |
285 | #ifdef CONFIG_KVM_BOOKE_HV | |
286 | mtspr(SPRN_GDEAR, dear); | |
287 | #else | |
288 | vcpu->arch.shared->dar = dear; | |
289 | #endif | |
290 | } | |
291 | ||
292 | static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) | |
293 | { | |
294 | #ifdef CONFIG_KVM_BOOKE_HV | |
295 | return mfspr(SPRN_GESR); | |
296 | #else | |
297 | return vcpu->arch.shared->esr; | |
298 | #endif | |
299 | } | |
300 | ||
301 | static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) | |
302 | { | |
303 | #ifdef CONFIG_KVM_BOOKE_HV | |
304 | mtspr(SPRN_GESR, esr); | |
305 | #else | |
306 | vcpu->arch.shared->esr = esr; | |
307 | #endif | |
308 | } | |
309 | ||
324b3e63 AG |
310 | static unsigned long get_guest_epr(struct kvm_vcpu *vcpu) |
311 | { | |
312 | #ifdef CONFIG_KVM_BOOKE_HV | |
313 | return mfspr(SPRN_GEPR); | |
314 | #else | |
315 | return vcpu->arch.epr; | |
316 | #endif | |
317 | } | |
318 | ||
d4cf3892 HB |
319 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
320 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |
321 | unsigned int priority) | |
bbf45ba5 | 322 | { |
d4cf3892 | 323 | int allowed = 0; |
79300f8c | 324 | ulong msr_mask = 0; |
1c810636 | 325 | bool update_esr = false, update_dear = false, update_epr = false; |
5c6cedf4 AG |
326 | ulong crit_raw = vcpu->arch.shared->critical; |
327 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | |
328 | bool crit; | |
c5335f17 | 329 | bool keep_irq = false; |
d30f6e48 | 330 | enum int_class int_class; |
95e90b43 | 331 | ulong new_msr = vcpu->arch.shared->msr; |
5c6cedf4 AG |
332 | |
333 | /* Truncate crit indicators in 32 bit mode */ | |
334 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | |
335 | crit_raw &= 0xffffffff; | |
336 | crit_r1 &= 0xffffffff; | |
337 | } | |
338 | ||
339 | /* Critical section when crit == r1 */ | |
340 | crit = (crit_raw == crit_r1); | |
341 | /* ... and we're in supervisor mode */ | |
342 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | |
d4cf3892 | 343 | |
c5335f17 AG |
344 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { |
345 | priority = BOOKE_IRQPRIO_EXTERNAL; | |
346 | keep_irq = true; | |
347 | } | |
348 | ||
1c810636 AG |
349 | if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled) |
350 | update_epr = true; | |
351 | ||
d4cf3892 | 352 | switch (priority) { |
d4cf3892 | 353 | case BOOKE_IRQPRIO_DTLB_MISS: |
d4cf3892 | 354 | case BOOKE_IRQPRIO_DATA_STORAGE: |
011da899 | 355 | case BOOKE_IRQPRIO_ALIGNMENT: |
daf5e271 LY |
356 | update_dear = true; |
357 | /* fall through */ | |
d4cf3892 | 358 | case BOOKE_IRQPRIO_INST_STORAGE: |
daf5e271 LY |
359 | case BOOKE_IRQPRIO_PROGRAM: |
360 | update_esr = true; | |
361 | /* fall through */ | |
362 | case BOOKE_IRQPRIO_ITLB_MISS: | |
363 | case BOOKE_IRQPRIO_SYSCALL: | |
d4cf3892 | 364 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
bb3a8a17 HB |
365 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
366 | case BOOKE_IRQPRIO_SPE_FP_DATA: | |
367 | case BOOKE_IRQPRIO_SPE_FP_ROUND: | |
d4cf3892 | 368 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
d4cf3892 | 369 | allowed = 1; |
79300f8c | 370 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
d30f6e48 | 371 | int_class = INT_CLASS_NONCRIT; |
bbf45ba5 | 372 | break; |
f61c94bb | 373 | case BOOKE_IRQPRIO_WATCHDOG: |
d4cf3892 | 374 | case BOOKE_IRQPRIO_CRITICAL: |
4ab96919 | 375 | case BOOKE_IRQPRIO_DBELL_CRIT: |
666e7252 | 376 | allowed = vcpu->arch.shared->msr & MSR_CE; |
d30f6e48 | 377 | allowed = allowed && !crit; |
79300f8c | 378 | msr_mask = MSR_ME; |
d30f6e48 | 379 | int_class = INT_CLASS_CRIT; |
bbf45ba5 | 380 | break; |
d4cf3892 | 381 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
666e7252 | 382 | allowed = vcpu->arch.shared->msr & MSR_ME; |
d30f6e48 | 383 | allowed = allowed && !crit; |
d30f6e48 | 384 | int_class = INT_CLASS_MC; |
bbf45ba5 | 385 | break; |
d4cf3892 HB |
386 | case BOOKE_IRQPRIO_DECREMENTER: |
387 | case BOOKE_IRQPRIO_FIT: | |
dfd4d47e SW |
388 | keep_irq = true; |
389 | /* fall through */ | |
390 | case BOOKE_IRQPRIO_EXTERNAL: | |
4ab96919 | 391 | case BOOKE_IRQPRIO_DBELL: |
666e7252 | 392 | allowed = vcpu->arch.shared->msr & MSR_EE; |
5c6cedf4 | 393 | allowed = allowed && !crit; |
79300f8c | 394 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
d30f6e48 | 395 | int_class = INT_CLASS_NONCRIT; |
bbf45ba5 | 396 | break; |
d4cf3892 | 397 | case BOOKE_IRQPRIO_DEBUG: |
666e7252 | 398 | allowed = vcpu->arch.shared->msr & MSR_DE; |
d30f6e48 | 399 | allowed = allowed && !crit; |
79300f8c | 400 | msr_mask = MSR_ME; |
d30f6e48 | 401 | int_class = INT_CLASS_CRIT; |
bbf45ba5 | 402 | break; |
bbf45ba5 HB |
403 | } |
404 | ||
d4cf3892 | 405 | if (allowed) { |
d30f6e48 SW |
406 | switch (int_class) { |
407 | case INT_CLASS_NONCRIT: | |
408 | set_guest_srr(vcpu, vcpu->arch.pc, | |
409 | vcpu->arch.shared->msr); | |
410 | break; | |
411 | case INT_CLASS_CRIT: | |
412 | set_guest_csrr(vcpu, vcpu->arch.pc, | |
413 | vcpu->arch.shared->msr); | |
414 | break; | |
415 | case INT_CLASS_DBG: | |
416 | set_guest_dsrr(vcpu, vcpu->arch.pc, | |
417 | vcpu->arch.shared->msr); | |
418 | break; | |
419 | case INT_CLASS_MC: | |
420 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | |
421 | vcpu->arch.shared->msr); | |
422 | break; | |
423 | } | |
424 | ||
d4cf3892 | 425 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
daf5e271 | 426 | if (update_esr == true) |
d30f6e48 | 427 | set_guest_esr(vcpu, vcpu->arch.queued_esr); |
daf5e271 | 428 | if (update_dear == true) |
d30f6e48 | 429 | set_guest_dear(vcpu, vcpu->arch.queued_dear); |
1c810636 AG |
430 | if (update_epr == true) |
431 | kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); | |
95e90b43 MC |
432 | |
433 | new_msr &= msr_mask; | |
434 | #if defined(CONFIG_64BIT) | |
435 | if (vcpu->arch.epcr & SPRN_EPCR_ICM) | |
436 | new_msr |= MSR_CM; | |
437 | #endif | |
438 | kvmppc_set_msr(vcpu, new_msr); | |
bbf45ba5 | 439 | |
c5335f17 AG |
440 | if (!keep_irq) |
441 | clear_bit(priority, &vcpu->arch.pending_exceptions); | |
bbf45ba5 HB |
442 | } |
443 | ||
d30f6e48 SW |
444 | #ifdef CONFIG_KVM_BOOKE_HV |
445 | /* | |
446 | * If an interrupt is pending but masked, raise a guest doorbell | |
447 | * so that we are notified when the guest enables the relevant | |
448 | * MSR bit. | |
449 | */ | |
450 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) | |
451 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); | |
452 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) | |
453 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); | |
454 | if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) | |
455 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); | |
456 | #endif | |
457 | ||
d4cf3892 | 458 | return allowed; |
bbf45ba5 HB |
459 | } |
460 | ||
f61c94bb BB |
461 | /* |
462 | * Return the number of jiffies until the next timeout. If the timeout is | |
463 | * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA | |
464 | * because the larger value can break the timer APIs. | |
465 | */ | |
466 | static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) | |
467 | { | |
468 | u64 tb, wdt_tb, wdt_ticks = 0; | |
469 | u64 nr_jiffies = 0; | |
470 | u32 period = TCR_GET_WP(vcpu->arch.tcr); | |
471 | ||
472 | wdt_tb = 1ULL << (63 - period); | |
473 | tb = get_tb(); | |
474 | /* | |
475 | * The watchdog timeout will hapeen when TB bit corresponding | |
476 | * to watchdog will toggle from 0 to 1. | |
477 | */ | |
478 | if (tb & wdt_tb) | |
479 | wdt_ticks = wdt_tb; | |
480 | ||
481 | wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); | |
482 | ||
483 | /* Convert timebase ticks to jiffies */ | |
484 | nr_jiffies = wdt_ticks; | |
485 | ||
486 | if (do_div(nr_jiffies, tb_ticks_per_jiffy)) | |
487 | nr_jiffies++; | |
488 | ||
489 | return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); | |
490 | } | |
491 | ||
492 | static void arm_next_watchdog(struct kvm_vcpu *vcpu) | |
493 | { | |
494 | unsigned long nr_jiffies; | |
495 | unsigned long flags; | |
496 | ||
497 | /* | |
498 | * If TSR_ENW and TSR_WIS are not set then no need to exit to | |
499 | * userspace, so clear the KVM_REQ_WATCHDOG request. | |
500 | */ | |
501 | if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) | |
502 | clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests); | |
503 | ||
504 | spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); | |
505 | nr_jiffies = watchdog_next_timeout(vcpu); | |
506 | /* | |
507 | * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA | |
508 | * then do not run the watchdog timer as this can break timer APIs. | |
509 | */ | |
510 | if (nr_jiffies < NEXT_TIMER_MAX_DELTA) | |
511 | mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); | |
512 | else | |
513 | del_timer(&vcpu->arch.wdt_timer); | |
514 | spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); | |
515 | } | |
516 | ||
517 | void kvmppc_watchdog_func(unsigned long data) | |
518 | { | |
519 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
520 | u32 tsr, new_tsr; | |
521 | int final; | |
522 | ||
523 | do { | |
524 | new_tsr = tsr = vcpu->arch.tsr; | |
525 | final = 0; | |
526 | ||
527 | /* Time out event */ | |
528 | if (tsr & TSR_ENW) { | |
529 | if (tsr & TSR_WIS) | |
530 | final = 1; | |
531 | else | |
532 | new_tsr = tsr | TSR_WIS; | |
533 | } else { | |
534 | new_tsr = tsr | TSR_ENW; | |
535 | } | |
536 | } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); | |
537 | ||
538 | if (new_tsr & TSR_WIS) { | |
539 | smp_wmb(); | |
540 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | |
541 | kvm_vcpu_kick(vcpu); | |
542 | } | |
543 | ||
544 | /* | |
545 | * If this is final watchdog expiry and some action is required | |
546 | * then exit to userspace. | |
547 | */ | |
548 | if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && | |
549 | vcpu->arch.watchdog_enabled) { | |
550 | smp_wmb(); | |
551 | kvm_make_request(KVM_REQ_WATCHDOG, vcpu); | |
552 | kvm_vcpu_kick(vcpu); | |
553 | } | |
554 | ||
555 | /* | |
556 | * Stop running the watchdog timer after final expiration to | |
557 | * prevent the host from being flooded with timers if the | |
558 | * guest sets a short period. | |
559 | * Timers will resume when TSR/TCR is updated next time. | |
560 | */ | |
561 | if (!final) | |
562 | arm_next_watchdog(vcpu); | |
563 | } | |
564 | ||
dfd4d47e SW |
565 | static void update_timer_ints(struct kvm_vcpu *vcpu) |
566 | { | |
567 | if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) | |
568 | kvmppc_core_queue_dec(vcpu); | |
569 | else | |
570 | kvmppc_core_dequeue_dec(vcpu); | |
f61c94bb BB |
571 | |
572 | if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) | |
573 | kvmppc_core_queue_watchdog(vcpu); | |
574 | else | |
575 | kvmppc_core_dequeue_watchdog(vcpu); | |
dfd4d47e SW |
576 | } |
577 | ||
c59a6a3e | 578 | static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) |
bbf45ba5 HB |
579 | { |
580 | unsigned long *pending = &vcpu->arch.pending_exceptions; | |
bbf45ba5 HB |
581 | unsigned int priority; |
582 | ||
9ab80843 | 583 | priority = __ffs(*pending); |
8b3a00fc | 584 | while (priority < BOOKE_IRQPRIO_MAX) { |
d4cf3892 | 585 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
bbf45ba5 | 586 | break; |
bbf45ba5 HB |
587 | |
588 | priority = find_next_bit(pending, | |
589 | BITS_PER_BYTE * sizeof(*pending), | |
590 | priority + 1); | |
591 | } | |
90bba358 AG |
592 | |
593 | /* Tell the guest about our interrupt status */ | |
29ac26ef | 594 | vcpu->arch.shared->int_pending = !!*pending; |
bbf45ba5 HB |
595 | } |
596 | ||
c59a6a3e | 597 | /* Check pending exceptions and deliver one, if possible. */ |
a8e4ef84 | 598 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
c59a6a3e | 599 | { |
a8e4ef84 | 600 | int r = 0; |
c59a6a3e SW |
601 | WARN_ON_ONCE(!irqs_disabled()); |
602 | ||
603 | kvmppc_core_check_exceptions(vcpu); | |
604 | ||
b8c649a9 AG |
605 | if (vcpu->requests) { |
606 | /* Exception delivery raised request; start over */ | |
607 | return 1; | |
608 | } | |
609 | ||
c59a6a3e SW |
610 | if (vcpu->arch.shared->msr & MSR_WE) { |
611 | local_irq_enable(); | |
612 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 613 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
c59a6a3e SW |
614 | local_irq_disable(); |
615 | ||
616 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | |
a8e4ef84 | 617 | r = 1; |
c59a6a3e | 618 | }; |
a8e4ef84 AG |
619 | |
620 | return r; | |
621 | } | |
622 | ||
7c973a2e | 623 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) |
4ffc6356 | 624 | { |
7c973a2e AG |
625 | int r = 1; /* Indicate we want to get back into the guest */ |
626 | ||
2d8185d4 AG |
627 | if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) |
628 | update_timer_ints(vcpu); | |
862d31f7 | 629 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
2d8185d4 AG |
630 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) |
631 | kvmppc_core_flush_tlb(vcpu); | |
862d31f7 | 632 | #endif |
7c973a2e | 633 | |
f61c94bb BB |
634 | if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) { |
635 | vcpu->run->exit_reason = KVM_EXIT_WATCHDOG; | |
636 | r = 0; | |
637 | } | |
638 | ||
1c810636 AG |
639 | if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) { |
640 | vcpu->run->epr.epr = 0; | |
641 | vcpu->arch.epr_needed = true; | |
642 | vcpu->run->exit_reason = KVM_EXIT_EPR; | |
643 | r = 0; | |
644 | } | |
645 | ||
7c973a2e | 646 | return r; |
4ffc6356 AG |
647 | } |
648 | ||
df6909e5 PM |
649 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
650 | { | |
7ee78855 | 651 | int ret, s; |
8fae845f SW |
652 | #ifdef CONFIG_PPC_FPU |
653 | unsigned int fpscr; | |
654 | int fpexc_mode; | |
655 | u64 fpr[32]; | |
656 | #endif | |
df6909e5 | 657 | |
af8f38b3 AG |
658 | if (!vcpu->arch.sane) { |
659 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
660 | return -EINVAL; | |
661 | } | |
662 | ||
df6909e5 | 663 | local_irq_disable(); |
7ee78855 AG |
664 | s = kvmppc_prepare_to_enter(vcpu); |
665 | if (s <= 0) { | |
24afa37b | 666 | local_irq_enable(); |
7ee78855 | 667 | ret = s; |
1d1ef222 SW |
668 | goto out; |
669 | } | |
bd2be683 | 670 | kvmppc_lazy_ee_enable(); |
1d1ef222 | 671 | |
df6909e5 | 672 | kvm_guest_enter(); |
8fae845f SW |
673 | |
674 | #ifdef CONFIG_PPC_FPU | |
675 | /* Save userspace FPU state in stack */ | |
676 | enable_kernel_fp(); | |
677 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | |
678 | fpscr = current->thread.fpscr.val; | |
679 | fpexc_mode = current->thread.fpexc_mode; | |
680 | ||
681 | /* Restore guest FPU state to thread */ | |
682 | memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); | |
683 | current->thread.fpscr.val = vcpu->arch.fpscr; | |
684 | ||
685 | /* | |
686 | * Since we can't trap on MSR_FP in GS-mode, we consider the guest | |
687 | * as always using the FPU. Kernel usage of FP (via | |
688 | * enable_kernel_fp()) in this thread must not occur while | |
689 | * vcpu->fpu_active is set. | |
690 | */ | |
691 | vcpu->fpu_active = 1; | |
692 | ||
693 | kvmppc_load_guest_fp(vcpu); | |
694 | #endif | |
695 | ||
df6909e5 | 696 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
8fae845f | 697 | |
24afa37b AG |
698 | /* No need for kvm_guest_exit. It's done in handle_exit. |
699 | We also get here with interrupts enabled. */ | |
700 | ||
8fae845f SW |
701 | #ifdef CONFIG_PPC_FPU |
702 | kvmppc_save_guest_fp(vcpu); | |
703 | ||
704 | vcpu->fpu_active = 0; | |
705 | ||
706 | /* Save guest FPU state from thread */ | |
707 | memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); | |
708 | vcpu->arch.fpscr = current->thread.fpscr.val; | |
709 | ||
710 | /* Restore userspace FPU state from stack */ | |
711 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | |
712 | current->thread.fpscr.val = fpscr; | |
713 | current->thread.fpexc_mode = fpexc_mode; | |
714 | #endif | |
715 | ||
1d1ef222 | 716 | out: |
d69c6436 | 717 | vcpu->mode = OUTSIDE_GUEST_MODE; |
df6909e5 PM |
718 | return ret; |
719 | } | |
720 | ||
d30f6e48 SW |
721 | static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) |
722 | { | |
723 | enum emulation_result er; | |
724 | ||
725 | er = kvmppc_emulate_instruction(run, vcpu); | |
726 | switch (er) { | |
727 | case EMULATE_DONE: | |
728 | /* don't overwrite subtypes, just account kvm_stats */ | |
729 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | |
730 | /* Future optimization: only reload non-volatiles if | |
731 | * they were actually modified by emulation. */ | |
732 | return RESUME_GUEST_NV; | |
733 | ||
734 | case EMULATE_DO_DCR: | |
735 | run->exit_reason = KVM_EXIT_DCR; | |
736 | return RESUME_HOST; | |
737 | ||
738 | case EMULATE_FAIL: | |
d30f6e48 SW |
739 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
740 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | |
741 | /* For debugging, encode the failing instruction and | |
742 | * report it to userspace. */ | |
743 | run->hw.hardware_exit_reason = ~0ULL << 32; | |
744 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | |
d1ff5499 | 745 | kvmppc_core_queue_program(vcpu, ESR_PIL); |
d30f6e48 SW |
746 | return RESUME_HOST; |
747 | ||
748 | default: | |
749 | BUG(); | |
750 | } | |
751 | } | |
752 | ||
4e642ccb | 753 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) |
bbf45ba5 | 754 | { |
4e642ccb | 755 | ulong r1, ip, msr, lr; |
bbf45ba5 | 756 | |
4e642ccb AG |
757 | asm("mr %0, 1" : "=r"(r1)); |
758 | asm("mflr %0" : "=r"(lr)); | |
759 | asm("mfmsr %0" : "=r"(msr)); | |
760 | asm("bl 1f; 1: mflr %0" : "=r"(ip)); | |
761 | ||
762 | memset(regs, 0, sizeof(*regs)); | |
763 | regs->gpr[1] = r1; | |
764 | regs->nip = ip; | |
765 | regs->msr = msr; | |
766 | regs->link = lr; | |
767 | } | |
768 | ||
6328e593 BB |
769 | /* |
770 | * For interrupts needed to be handled by host interrupt handlers, | |
771 | * corresponding host handler are called from here in similar way | |
772 | * (but not exact) as they are called from low level handler | |
773 | * (such as from arch/powerpc/kernel/head_fsl_booke.S). | |
774 | */ | |
4e642ccb AG |
775 | static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, |
776 | unsigned int exit_nr) | |
777 | { | |
778 | struct pt_regs regs; | |
73e75b41 | 779 | |
d30f6e48 SW |
780 | switch (exit_nr) { |
781 | case BOOKE_INTERRUPT_EXTERNAL: | |
4e642ccb AG |
782 | kvmppc_fill_pt_regs(®s); |
783 | do_IRQ(®s); | |
d30f6e48 | 784 | break; |
d30f6e48 | 785 | case BOOKE_INTERRUPT_DECREMENTER: |
4e642ccb AG |
786 | kvmppc_fill_pt_regs(®s); |
787 | timer_interrupt(®s); | |
d30f6e48 | 788 | break; |
d30f6e48 SW |
789 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) |
790 | case BOOKE_INTERRUPT_DOORBELL: | |
4e642ccb AG |
791 | kvmppc_fill_pt_regs(®s); |
792 | doorbell_exception(®s); | |
d30f6e48 SW |
793 | break; |
794 | #endif | |
795 | case BOOKE_INTERRUPT_MACHINE_CHECK: | |
796 | /* FIXME */ | |
797 | break; | |
7cc1e8ee AG |
798 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: |
799 | kvmppc_fill_pt_regs(®s); | |
800 | performance_monitor_exception(®s); | |
801 | break; | |
6328e593 BB |
802 | case BOOKE_INTERRUPT_WATCHDOG: |
803 | kvmppc_fill_pt_regs(®s); | |
804 | #ifdef CONFIG_BOOKE_WDT | |
805 | WatchdogException(®s); | |
806 | #else | |
807 | unknown_exception(®s); | |
808 | #endif | |
809 | break; | |
810 | case BOOKE_INTERRUPT_CRITICAL: | |
811 | unknown_exception(®s); | |
812 | break; | |
d30f6e48 | 813 | } |
4e642ccb AG |
814 | } |
815 | ||
816 | /** | |
817 | * kvmppc_handle_exit | |
818 | * | |
819 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | |
820 | */ | |
821 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
822 | unsigned int exit_nr) | |
823 | { | |
824 | int r = RESUME_HOST; | |
7ee78855 | 825 | int s; |
4e642ccb AG |
826 | |
827 | /* update before a new last_exit_type is rewritten */ | |
828 | kvmppc_update_timing_stats(vcpu); | |
829 | ||
830 | /* restart interrupts if they were meant for the host */ | |
831 | kvmppc_restart_interrupt(vcpu, exit_nr); | |
d30f6e48 | 832 | |
bbf45ba5 HB |
833 | local_irq_enable(); |
834 | ||
97c95059 | 835 | trace_kvm_exit(exit_nr, vcpu); |
706fb730 | 836 | kvm_guest_exit(); |
97c95059 | 837 | |
bbf45ba5 HB |
838 | run->exit_reason = KVM_EXIT_UNKNOWN; |
839 | run->ready_for_interrupt_injection = 1; | |
840 | ||
841 | switch (exit_nr) { | |
842 | case BOOKE_INTERRUPT_MACHINE_CHECK: | |
c35c9d84 AG |
843 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
844 | kvmppc_dump_vcpu(vcpu); | |
845 | /* For debugging, send invalid exit reason to user space */ | |
846 | run->hw.hardware_exit_reason = ~1ULL << 32; | |
847 | run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); | |
848 | r = RESUME_HOST; | |
bbf45ba5 HB |
849 | break; |
850 | ||
851 | case BOOKE_INTERRUPT_EXTERNAL: | |
7b701591 | 852 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
1b6766c7 HB |
853 | r = RESUME_GUEST; |
854 | break; | |
855 | ||
bbf45ba5 | 856 | case BOOKE_INTERRUPT_DECREMENTER: |
7b701591 | 857 | kvmppc_account_exit(vcpu, DEC_EXITS); |
bbf45ba5 HB |
858 | r = RESUME_GUEST; |
859 | break; | |
860 | ||
6328e593 BB |
861 | case BOOKE_INTERRUPT_WATCHDOG: |
862 | r = RESUME_GUEST; | |
863 | break; | |
864 | ||
d30f6e48 SW |
865 | case BOOKE_INTERRUPT_DOORBELL: |
866 | kvmppc_account_exit(vcpu, DBELL_EXITS); | |
d30f6e48 SW |
867 | r = RESUME_GUEST; |
868 | break; | |
869 | ||
870 | case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: | |
871 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | |
872 | ||
873 | /* | |
874 | * We are here because there is a pending guest interrupt | |
875 | * which could not be delivered as MSR_CE or MSR_ME was not | |
876 | * set. Once we break from here we will retry delivery. | |
877 | */ | |
878 | r = RESUME_GUEST; | |
879 | break; | |
880 | ||
881 | case BOOKE_INTERRUPT_GUEST_DBELL: | |
882 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | |
883 | ||
884 | /* | |
885 | * We are here because there is a pending guest interrupt | |
886 | * which could not be delivered as MSR_EE was not set. Once | |
887 | * we break from here we will retry delivery. | |
888 | */ | |
889 | r = RESUME_GUEST; | |
890 | break; | |
891 | ||
95f2e921 AG |
892 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: |
893 | r = RESUME_GUEST; | |
894 | break; | |
895 | ||
d30f6e48 SW |
896 | case BOOKE_INTERRUPT_HV_PRIV: |
897 | r = emulation_exit(run, vcpu); | |
898 | break; | |
899 | ||
bbf45ba5 | 900 | case BOOKE_INTERRUPT_PROGRAM: |
d30f6e48 | 901 | if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { |
0268597c AG |
902 | /* |
903 | * Program traps generated by user-level software must | |
904 | * be handled by the guest kernel. | |
905 | * | |
906 | * In GS mode, hypervisor privileged instructions trap | |
907 | * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are | |
908 | * actual program interrupts, handled by the guest. | |
909 | */ | |
daf5e271 | 910 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
bbf45ba5 | 911 | r = RESUME_GUEST; |
7b701591 | 912 | kvmppc_account_exit(vcpu, USR_PR_INST); |
bbf45ba5 HB |
913 | break; |
914 | } | |
915 | ||
d30f6e48 | 916 | r = emulation_exit(run, vcpu); |
bbf45ba5 HB |
917 | break; |
918 | ||
de368dce | 919 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
d4cf3892 | 920 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
7b701591 | 921 | kvmppc_account_exit(vcpu, FP_UNAVAIL); |
de368dce CE |
922 | r = RESUME_GUEST; |
923 | break; | |
924 | ||
4cd35f67 SW |
925 | #ifdef CONFIG_SPE |
926 | case BOOKE_INTERRUPT_SPE_UNAVAIL: { | |
927 | if (vcpu->arch.shared->msr & MSR_SPE) | |
928 | kvmppc_vcpu_enable_spe(vcpu); | |
929 | else | |
930 | kvmppc_booke_queue_irqprio(vcpu, | |
931 | BOOKE_IRQPRIO_SPE_UNAVAIL); | |
bb3a8a17 HB |
932 | r = RESUME_GUEST; |
933 | break; | |
4cd35f67 | 934 | } |
bb3a8a17 HB |
935 | |
936 | case BOOKE_INTERRUPT_SPE_FP_DATA: | |
937 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); | |
938 | r = RESUME_GUEST; | |
939 | break; | |
940 | ||
941 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | |
942 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); | |
943 | r = RESUME_GUEST; | |
944 | break; | |
4cd35f67 SW |
945 | #else |
946 | case BOOKE_INTERRUPT_SPE_UNAVAIL: | |
947 | /* | |
948 | * Guest wants SPE, but host kernel doesn't support it. Send | |
949 | * an "unimplemented operation" program check to the guest. | |
950 | */ | |
951 | kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); | |
952 | r = RESUME_GUEST; | |
953 | break; | |
954 | ||
955 | /* | |
956 | * These really should never happen without CONFIG_SPE, | |
957 | * as we should never enable the real MSR[SPE] in the guest. | |
958 | */ | |
959 | case BOOKE_INTERRUPT_SPE_FP_DATA: | |
960 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | |
961 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | |
962 | __func__, exit_nr, vcpu->arch.pc); | |
963 | run->hw.hardware_exit_reason = exit_nr; | |
964 | r = RESUME_HOST; | |
965 | break; | |
966 | #endif | |
bb3a8a17 | 967 | |
bbf45ba5 | 968 | case BOOKE_INTERRUPT_DATA_STORAGE: |
daf5e271 LY |
969 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
970 | vcpu->arch.fault_esr); | |
7b701591 | 971 | kvmppc_account_exit(vcpu, DSI_EXITS); |
bbf45ba5 HB |
972 | r = RESUME_GUEST; |
973 | break; | |
974 | ||
975 | case BOOKE_INTERRUPT_INST_STORAGE: | |
daf5e271 | 976 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
7b701591 | 977 | kvmppc_account_exit(vcpu, ISI_EXITS); |
bbf45ba5 HB |
978 | r = RESUME_GUEST; |
979 | break; | |
980 | ||
011da899 AG |
981 | case BOOKE_INTERRUPT_ALIGNMENT: |
982 | kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, | |
983 | vcpu->arch.fault_esr); | |
984 | r = RESUME_GUEST; | |
985 | break; | |
986 | ||
d30f6e48 SW |
987 | #ifdef CONFIG_KVM_BOOKE_HV |
988 | case BOOKE_INTERRUPT_HV_SYSCALL: | |
989 | if (!(vcpu->arch.shared->msr & MSR_PR)) { | |
990 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
991 | } else { | |
992 | /* | |
993 | * hcall from guest userspace -- send privileged | |
994 | * instruction program check. | |
995 | */ | |
996 | kvmppc_core_queue_program(vcpu, ESR_PPR); | |
997 | } | |
998 | ||
999 | r = RESUME_GUEST; | |
1000 | break; | |
1001 | #else | |
bbf45ba5 | 1002 | case BOOKE_INTERRUPT_SYSCALL: |
2a342ed5 AG |
1003 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
1004 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
1005 | /* KVM PV hypercalls */ | |
1006 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1007 | r = RESUME_GUEST; | |
1008 | } else { | |
1009 | /* Guest syscalls */ | |
1010 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | |
1011 | } | |
7b701591 | 1012 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
bbf45ba5 HB |
1013 | r = RESUME_GUEST; |
1014 | break; | |
d30f6e48 | 1015 | #endif |
bbf45ba5 HB |
1016 | |
1017 | case BOOKE_INTERRUPT_DTLB_MISS: { | |
bbf45ba5 | 1018 | unsigned long eaddr = vcpu->arch.fault_dear; |
7924bd41 | 1019 | int gtlb_index; |
475e7cdd | 1020 | gpa_t gpaddr; |
bbf45ba5 HB |
1021 | gfn_t gfn; |
1022 | ||
bf7ca4bd | 1023 | #ifdef CONFIG_KVM_E500V2 |
a4cd8b23 SW |
1024 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
1025 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | |
1026 | kvmppc_map_magic(vcpu); | |
1027 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); | |
1028 | r = RESUME_GUEST; | |
1029 | ||
1030 | break; | |
1031 | } | |
1032 | #endif | |
1033 | ||
bbf45ba5 | 1034 | /* Check the guest TLB. */ |
fa86b8dd | 1035 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
7924bd41 | 1036 | if (gtlb_index < 0) { |
bbf45ba5 | 1037 | /* The guest didn't have a mapping for it. */ |
daf5e271 LY |
1038 | kvmppc_core_queue_dtlb_miss(vcpu, |
1039 | vcpu->arch.fault_dear, | |
1040 | vcpu->arch.fault_esr); | |
b52a638c | 1041 | kvmppc_mmu_dtlb_miss(vcpu); |
7b701591 | 1042 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
bbf45ba5 HB |
1043 | r = RESUME_GUEST; |
1044 | break; | |
1045 | } | |
1046 | ||
be8d1cae | 1047 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
475e7cdd | 1048 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
1049 | |
1050 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
1051 | /* The guest TLB had a mapping, but the shadow TLB | |
1052 | * didn't, and it is RAM. This could be because: | |
1053 | * a) the entry is mapping the host kernel, or | |
1054 | * b) the guest used a large mapping which we're faking | |
1055 | * Either way, we need to satisfy the fault without | |
1056 | * invoking the guest. */ | |
58a96214 | 1057 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
7b701591 | 1058 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
bbf45ba5 HB |
1059 | r = RESUME_GUEST; |
1060 | } else { | |
1061 | /* Guest has mapped and accessed a page which is not | |
1062 | * actually RAM. */ | |
475e7cdd | 1063 | vcpu->arch.paddr_accessed = gpaddr; |
6020c0f6 | 1064 | vcpu->arch.vaddr_accessed = eaddr; |
bbf45ba5 | 1065 | r = kvmppc_emulate_mmio(run, vcpu); |
7b701591 | 1066 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
bbf45ba5 HB |
1067 | } |
1068 | ||
1069 | break; | |
1070 | } | |
1071 | ||
1072 | case BOOKE_INTERRUPT_ITLB_MISS: { | |
bbf45ba5 | 1073 | unsigned long eaddr = vcpu->arch.pc; |
89168618 | 1074 | gpa_t gpaddr; |
bbf45ba5 | 1075 | gfn_t gfn; |
7924bd41 | 1076 | int gtlb_index; |
bbf45ba5 HB |
1077 | |
1078 | r = RESUME_GUEST; | |
1079 | ||
1080 | /* Check the guest TLB. */ | |
fa86b8dd | 1081 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); |
7924bd41 | 1082 | if (gtlb_index < 0) { |
bbf45ba5 | 1083 | /* The guest didn't have a mapping for it. */ |
d4cf3892 | 1084 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
b52a638c | 1085 | kvmppc_mmu_itlb_miss(vcpu); |
7b701591 | 1086 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
bbf45ba5 HB |
1087 | break; |
1088 | } | |
1089 | ||
7b701591 | 1090 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
bbf45ba5 | 1091 | |
be8d1cae | 1092 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
89168618 | 1093 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
1094 | |
1095 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
1096 | /* The guest TLB had a mapping, but the shadow TLB | |
1097 | * didn't. This could be because: | |
1098 | * a) the entry is mapping the host kernel, or | |
1099 | * b) the guest used a large mapping which we're faking | |
1100 | * Either way, we need to satisfy the fault without | |
1101 | * invoking the guest. */ | |
58a96214 | 1102 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
bbf45ba5 HB |
1103 | } else { |
1104 | /* Guest mapped and leaped at non-RAM! */ | |
d4cf3892 | 1105 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
bbf45ba5 HB |
1106 | } |
1107 | ||
1108 | break; | |
1109 | } | |
1110 | ||
6a0ab738 HB |
1111 | case BOOKE_INTERRUPT_DEBUG: { |
1112 | u32 dbsr; | |
1113 | ||
1114 | vcpu->arch.pc = mfspr(SPRN_CSRR0); | |
1115 | ||
1116 | /* clear IAC events in DBSR register */ | |
1117 | dbsr = mfspr(SPRN_DBSR); | |
1118 | dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; | |
1119 | mtspr(SPRN_DBSR, dbsr); | |
1120 | ||
1121 | run->exit_reason = KVM_EXIT_DEBUG; | |
7b701591 | 1122 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
6a0ab738 HB |
1123 | r = RESUME_HOST; |
1124 | break; | |
1125 | } | |
1126 | ||
bbf45ba5 HB |
1127 | default: |
1128 | printk(KERN_EMERG "exit_nr %d\n", exit_nr); | |
1129 | BUG(); | |
1130 | } | |
1131 | ||
a8e4ef84 AG |
1132 | /* |
1133 | * To avoid clobbering exit_reason, only check for signals if we | |
1134 | * aren't already exiting to userspace for some other reason. | |
1135 | */ | |
03660ba2 AG |
1136 | if (!(r & RESUME_HOST)) { |
1137 | local_irq_disable(); | |
7ee78855 AG |
1138 | s = kvmppc_prepare_to_enter(vcpu); |
1139 | if (s <= 0) { | |
24afa37b | 1140 | local_irq_enable(); |
7ee78855 | 1141 | r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
24afa37b | 1142 | } else { |
bd2be683 | 1143 | kvmppc_lazy_ee_enable(); |
03660ba2 | 1144 | } |
bbf45ba5 HB |
1145 | } |
1146 | ||
1147 | return r; | |
1148 | } | |
1149 | ||
d26f22c9 BB |
1150 | static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr) |
1151 | { | |
1152 | u32 old_tsr = vcpu->arch.tsr; | |
1153 | ||
1154 | vcpu->arch.tsr = new_tsr; | |
1155 | ||
1156 | if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) | |
1157 | arm_next_watchdog(vcpu); | |
1158 | ||
1159 | update_timer_ints(vcpu); | |
1160 | } | |
1161 | ||
bbf45ba5 HB |
1162 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
1163 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1164 | { | |
082decf2 | 1165 | int i; |
af8f38b3 | 1166 | int r; |
082decf2 | 1167 | |
bbf45ba5 | 1168 | vcpu->arch.pc = 0; |
b5904972 | 1169 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
8e5b26b5 | 1170 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
d30f6e48 | 1171 | kvmppc_set_msr(vcpu, 0); |
bbf45ba5 | 1172 | |
d30f6e48 SW |
1173 | #ifndef CONFIG_KVM_BOOKE_HV |
1174 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | |
49dd2c49 | 1175 | vcpu->arch.shadow_pid = 1; |
d30f6e48 SW |
1176 | vcpu->arch.shared->msr = 0; |
1177 | #endif | |
49dd2c49 | 1178 | |
082decf2 HB |
1179 | /* Eye-catching numbers so we know if the guest takes an interrupt |
1180 | * before it's programmed its own IVPR/IVORs. */ | |
bbf45ba5 | 1181 | vcpu->arch.ivpr = 0x55550000; |
082decf2 HB |
1182 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) |
1183 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | |
bbf45ba5 | 1184 | |
73e75b41 HB |
1185 | kvmppc_init_timing_stats(vcpu); |
1186 | ||
af8f38b3 AG |
1187 | r = kvmppc_core_vcpu_setup(vcpu); |
1188 | kvmppc_sanity_check(vcpu); | |
1189 | return r; | |
bbf45ba5 HB |
1190 | } |
1191 | ||
f61c94bb BB |
1192 | int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) |
1193 | { | |
1194 | /* setup watchdog timer once */ | |
1195 | spin_lock_init(&vcpu->arch.wdt_lock); | |
1196 | setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, | |
1197 | (unsigned long)vcpu); | |
1198 | ||
1199 | return 0; | |
1200 | } | |
1201 | ||
1202 | void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
1203 | { | |
1204 | del_timer_sync(&vcpu->arch.wdt_timer); | |
1205 | } | |
1206 | ||
bbf45ba5 HB |
1207 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1208 | { | |
1209 | int i; | |
1210 | ||
1211 | regs->pc = vcpu->arch.pc; | |
992b5b29 | 1212 | regs->cr = kvmppc_get_cr(vcpu); |
bbf45ba5 HB |
1213 | regs->ctr = vcpu->arch.ctr; |
1214 | regs->lr = vcpu->arch.lr; | |
992b5b29 | 1215 | regs->xer = kvmppc_get_xer(vcpu); |
666e7252 | 1216 | regs->msr = vcpu->arch.shared->msr; |
de7906c3 AG |
1217 | regs->srr0 = vcpu->arch.shared->srr0; |
1218 | regs->srr1 = vcpu->arch.shared->srr1; | |
bbf45ba5 | 1219 | regs->pid = vcpu->arch.pid; |
a73a9599 AG |
1220 | regs->sprg0 = vcpu->arch.shared->sprg0; |
1221 | regs->sprg1 = vcpu->arch.shared->sprg1; | |
1222 | regs->sprg2 = vcpu->arch.shared->sprg2; | |
1223 | regs->sprg3 = vcpu->arch.shared->sprg3; | |
b5904972 SW |
1224 | regs->sprg4 = vcpu->arch.shared->sprg4; |
1225 | regs->sprg5 = vcpu->arch.shared->sprg5; | |
1226 | regs->sprg6 = vcpu->arch.shared->sprg6; | |
1227 | regs->sprg7 = vcpu->arch.shared->sprg7; | |
bbf45ba5 HB |
1228 | |
1229 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | |
8e5b26b5 | 1230 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
bbf45ba5 HB |
1231 | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1236 | { | |
1237 | int i; | |
1238 | ||
1239 | vcpu->arch.pc = regs->pc; | |
992b5b29 | 1240 | kvmppc_set_cr(vcpu, regs->cr); |
bbf45ba5 HB |
1241 | vcpu->arch.ctr = regs->ctr; |
1242 | vcpu->arch.lr = regs->lr; | |
992b5b29 | 1243 | kvmppc_set_xer(vcpu, regs->xer); |
b8fd68ac | 1244 | kvmppc_set_msr(vcpu, regs->msr); |
de7906c3 AG |
1245 | vcpu->arch.shared->srr0 = regs->srr0; |
1246 | vcpu->arch.shared->srr1 = regs->srr1; | |
5ce941ee | 1247 | kvmppc_set_pid(vcpu, regs->pid); |
a73a9599 AG |
1248 | vcpu->arch.shared->sprg0 = regs->sprg0; |
1249 | vcpu->arch.shared->sprg1 = regs->sprg1; | |
1250 | vcpu->arch.shared->sprg2 = regs->sprg2; | |
1251 | vcpu->arch.shared->sprg3 = regs->sprg3; | |
b5904972 SW |
1252 | vcpu->arch.shared->sprg4 = regs->sprg4; |
1253 | vcpu->arch.shared->sprg5 = regs->sprg5; | |
1254 | vcpu->arch.shared->sprg6 = regs->sprg6; | |
1255 | vcpu->arch.shared->sprg7 = regs->sprg7; | |
bbf45ba5 | 1256 | |
8e5b26b5 AG |
1257 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1258 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | |
bbf45ba5 HB |
1259 | |
1260 | return 0; | |
1261 | } | |
1262 | ||
5ce941ee SW |
1263 | static void get_sregs_base(struct kvm_vcpu *vcpu, |
1264 | struct kvm_sregs *sregs) | |
1265 | { | |
1266 | u64 tb = get_tb(); | |
1267 | ||
1268 | sregs->u.e.features |= KVM_SREGS_E_BASE; | |
1269 | ||
1270 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | |
1271 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | |
1272 | sregs->u.e.mcsr = vcpu->arch.mcsr; | |
d30f6e48 SW |
1273 | sregs->u.e.esr = get_guest_esr(vcpu); |
1274 | sregs->u.e.dear = get_guest_dear(vcpu); | |
5ce941ee SW |
1275 | sregs->u.e.tsr = vcpu->arch.tsr; |
1276 | sregs->u.e.tcr = vcpu->arch.tcr; | |
1277 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | |
1278 | sregs->u.e.tb = tb; | |
1279 | sregs->u.e.vrsave = vcpu->arch.vrsave; | |
1280 | } | |
1281 | ||
1282 | static int set_sregs_base(struct kvm_vcpu *vcpu, | |
1283 | struct kvm_sregs *sregs) | |
1284 | { | |
1285 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) | |
1286 | return 0; | |
1287 | ||
1288 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | |
1289 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | |
1290 | vcpu->arch.mcsr = sregs->u.e.mcsr; | |
d30f6e48 SW |
1291 | set_guest_esr(vcpu, sregs->u.e.esr); |
1292 | set_guest_dear(vcpu, sregs->u.e.dear); | |
5ce941ee | 1293 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
dfd4d47e | 1294 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
5ce941ee | 1295 | |
dfd4d47e | 1296 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { |
5ce941ee | 1297 | vcpu->arch.dec = sregs->u.e.dec; |
dfd4d47e SW |
1298 | kvmppc_emulate_dec(vcpu); |
1299 | } | |
5ce941ee | 1300 | |
d26f22c9 BB |
1301 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) |
1302 | kvmppc_set_tsr(vcpu, sregs->u.e.tsr); | |
5ce941ee SW |
1303 | |
1304 | return 0; | |
1305 | } | |
1306 | ||
1307 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, | |
1308 | struct kvm_sregs *sregs) | |
1309 | { | |
1310 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; | |
1311 | ||
841741f2 | 1312 | sregs->u.e.pir = vcpu->vcpu_id; |
5ce941ee SW |
1313 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; |
1314 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; | |
1315 | sregs->u.e.decar = vcpu->arch.decar; | |
1316 | sregs->u.e.ivpr = vcpu->arch.ivpr; | |
1317 | } | |
1318 | ||
1319 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, | |
1320 | struct kvm_sregs *sregs) | |
1321 | { | |
1322 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) | |
1323 | return 0; | |
1324 | ||
841741f2 | 1325 | if (sregs->u.e.pir != vcpu->vcpu_id) |
5ce941ee SW |
1326 | return -EINVAL; |
1327 | ||
1328 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; | |
1329 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; | |
1330 | vcpu->arch.decar = sregs->u.e.decar; | |
1331 | vcpu->arch.ivpr = sregs->u.e.ivpr; | |
1332 | ||
1333 | return 0; | |
1334 | } | |
1335 | ||
1336 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |
1337 | { | |
1338 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | |
1339 | ||
1340 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | |
1341 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | |
1342 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | |
1343 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | |
1344 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | |
1345 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | |
1346 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | |
1347 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | |
1348 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | |
1349 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | |
1350 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | |
1351 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | |
1352 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | |
1353 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | |
1354 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | |
1355 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | |
1356 | } | |
1357 | ||
1358 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |
1359 | { | |
1360 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | |
1361 | return 0; | |
1362 | ||
1363 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; | |
1364 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; | |
1365 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; | |
1366 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; | |
1367 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; | |
1368 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; | |
1369 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; | |
1370 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; | |
1371 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; | |
1372 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; | |
1373 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; | |
1374 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; | |
1375 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; | |
1376 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; | |
1377 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; | |
1378 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; | |
1379 | ||
1380 | return 0; | |
1381 | } | |
1382 | ||
bbf45ba5 HB |
1383 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1384 | struct kvm_sregs *sregs) | |
1385 | { | |
5ce941ee SW |
1386 | sregs->pvr = vcpu->arch.pvr; |
1387 | ||
1388 | get_sregs_base(vcpu, sregs); | |
1389 | get_sregs_arch206(vcpu, sregs); | |
1390 | kvmppc_core_get_sregs(vcpu, sregs); | |
1391 | return 0; | |
bbf45ba5 HB |
1392 | } |
1393 | ||
1394 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
1395 | struct kvm_sregs *sregs) | |
1396 | { | |
5ce941ee SW |
1397 | int ret; |
1398 | ||
1399 | if (vcpu->arch.pvr != sregs->pvr) | |
1400 | return -EINVAL; | |
1401 | ||
1402 | ret = set_sregs_base(vcpu, sregs); | |
1403 | if (ret < 0) | |
1404 | return ret; | |
1405 | ||
1406 | ret = set_sregs_arch206(vcpu, sregs); | |
1407 | if (ret < 0) | |
1408 | return ret; | |
1409 | ||
1410 | return kvmppc_core_set_sregs(vcpu, sregs); | |
bbf45ba5 HB |
1411 | } |
1412 | ||
31f3438e PM |
1413 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
1414 | { | |
6df8d3fc BB |
1415 | int r = -EINVAL; |
1416 | ||
1417 | switch (reg->id) { | |
1418 | case KVM_REG_PPC_IAC1: | |
1419 | case KVM_REG_PPC_IAC2: | |
1420 | case KVM_REG_PPC_IAC3: | |
1421 | case KVM_REG_PPC_IAC4: { | |
1422 | int iac = reg->id - KVM_REG_PPC_IAC1; | |
1423 | r = copy_to_user((u64 __user *)(long)reg->addr, | |
1424 | &vcpu->arch.dbg_reg.iac[iac], sizeof(u64)); | |
1425 | break; | |
1426 | } | |
1427 | case KVM_REG_PPC_DAC1: | |
1428 | case KVM_REG_PPC_DAC2: { | |
1429 | int dac = reg->id - KVM_REG_PPC_DAC1; | |
1430 | r = copy_to_user((u64 __user *)(long)reg->addr, | |
1431 | &vcpu->arch.dbg_reg.dac[dac], sizeof(u64)); | |
1432 | break; | |
1433 | } | |
324b3e63 AG |
1434 | case KVM_REG_PPC_EPR: { |
1435 | u32 epr = get_guest_epr(vcpu); | |
1436 | r = put_user(epr, (u32 __user *)(long)reg->addr); | |
1437 | break; | |
1438 | } | |
352df1de MC |
1439 | #if defined(CONFIG_64BIT) |
1440 | case KVM_REG_PPC_EPCR: | |
1441 | r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr); | |
1442 | break; | |
1443 | #endif | |
78accda4 BB |
1444 | case KVM_REG_PPC_TCR: |
1445 | r = put_user(vcpu->arch.tcr, (u32 __user *)(long)reg->addr); | |
1446 | break; | |
1447 | case KVM_REG_PPC_TSR: | |
1448 | r = put_user(vcpu->arch.tsr, (u32 __user *)(long)reg->addr); | |
1449 | break; | |
8c32a2ea BB |
1450 | case KVM_REG_PPC_DEBUG_INST: { |
1451 | u32 opcode = KVMPPC_INST_EHPRIV; | |
1452 | r = copy_to_user((u32 __user *)(long)reg->addr, | |
1453 | &opcode, sizeof(u32)); | |
1454 | break; | |
1455 | } | |
6df8d3fc BB |
1456 | default: |
1457 | break; | |
1458 | } | |
1459 | return r; | |
31f3438e PM |
1460 | } |
1461 | ||
1462 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1463 | { | |
6df8d3fc BB |
1464 | int r = -EINVAL; |
1465 | ||
1466 | switch (reg->id) { | |
1467 | case KVM_REG_PPC_IAC1: | |
1468 | case KVM_REG_PPC_IAC2: | |
1469 | case KVM_REG_PPC_IAC3: | |
1470 | case KVM_REG_PPC_IAC4: { | |
1471 | int iac = reg->id - KVM_REG_PPC_IAC1; | |
1472 | r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac], | |
1473 | (u64 __user *)(long)reg->addr, sizeof(u64)); | |
1474 | break; | |
1475 | } | |
1476 | case KVM_REG_PPC_DAC1: | |
1477 | case KVM_REG_PPC_DAC2: { | |
1478 | int dac = reg->id - KVM_REG_PPC_DAC1; | |
1479 | r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac], | |
1480 | (u64 __user *)(long)reg->addr, sizeof(u64)); | |
1481 | break; | |
1482 | } | |
324b3e63 AG |
1483 | case KVM_REG_PPC_EPR: { |
1484 | u32 new_epr; | |
1485 | r = get_user(new_epr, (u32 __user *)(long)reg->addr); | |
1486 | if (!r) | |
1487 | kvmppc_set_epr(vcpu, new_epr); | |
1488 | break; | |
1489 | } | |
352df1de MC |
1490 | #if defined(CONFIG_64BIT) |
1491 | case KVM_REG_PPC_EPCR: { | |
1492 | u32 new_epcr; | |
1493 | r = get_user(new_epcr, (u32 __user *)(long)reg->addr); | |
1494 | if (r == 0) | |
1495 | kvmppc_set_epcr(vcpu, new_epcr); | |
1496 | break; | |
1497 | } | |
1498 | #endif | |
78accda4 BB |
1499 | case KVM_REG_PPC_OR_TSR: { |
1500 | u32 tsr_bits; | |
1501 | r = get_user(tsr_bits, (u32 __user *)(long)reg->addr); | |
1502 | kvmppc_set_tsr_bits(vcpu, tsr_bits); | |
1503 | break; | |
1504 | } | |
1505 | case KVM_REG_PPC_CLEAR_TSR: { | |
1506 | u32 tsr_bits; | |
1507 | r = get_user(tsr_bits, (u32 __user *)(long)reg->addr); | |
1508 | kvmppc_clr_tsr_bits(vcpu, tsr_bits); | |
1509 | break; | |
1510 | } | |
1511 | case KVM_REG_PPC_TSR: { | |
1512 | u32 tsr; | |
1513 | r = get_user(tsr, (u32 __user *)(long)reg->addr); | |
1514 | kvmppc_set_tsr(vcpu, tsr); | |
1515 | break; | |
1516 | } | |
1517 | case KVM_REG_PPC_TCR: { | |
1518 | u32 tcr; | |
1519 | r = get_user(tcr, (u32 __user *)(long)reg->addr); | |
1520 | kvmppc_set_tcr(vcpu, tcr); | |
1521 | break; | |
1522 | } | |
6df8d3fc BB |
1523 | default: |
1524 | break; | |
1525 | } | |
1526 | return r; | |
31f3438e PM |
1527 | } |
1528 | ||
092d62ee BB |
1529 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1530 | struct kvm_guest_debug *dbg) | |
1531 | { | |
1532 | return -EINVAL; | |
1533 | } | |
1534 | ||
bbf45ba5 HB |
1535 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1536 | { | |
1537 | return -ENOTSUPP; | |
1538 | } | |
1539 | ||
1540 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1541 | { | |
1542 | return -ENOTSUPP; | |
1543 | } | |
1544 | ||
bbf45ba5 HB |
1545 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1546 | struct kvm_translation *tr) | |
1547 | { | |
98001d8d AK |
1548 | int r; |
1549 | ||
98001d8d | 1550 | r = kvmppc_core_vcpu_translate(vcpu, tr); |
98001d8d | 1551 | return r; |
bbf45ba5 | 1552 | } |
d9fbd03d | 1553 | |
4e755758 AG |
1554 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
1555 | { | |
1556 | return -ENOTSUPP; | |
1557 | } | |
1558 | ||
a66b48c3 PM |
1559 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, |
1560 | struct kvm_memory_slot *dont) | |
1561 | { | |
1562 | } | |
1563 | ||
1564 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | |
1565 | unsigned long npages) | |
1566 | { | |
1567 | return 0; | |
1568 | } | |
1569 | ||
f9e0554d | 1570 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
a66b48c3 | 1571 | struct kvm_memory_slot *memslot, |
f9e0554d PM |
1572 | struct kvm_userspace_memory_region *mem) |
1573 | { | |
1574 | return 0; | |
1575 | } | |
1576 | ||
1577 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
dfe49dbd | 1578 | struct kvm_userspace_memory_region *mem, |
8482644a | 1579 | const struct kvm_memory_slot *old) |
dfe49dbd PM |
1580 | { |
1581 | } | |
1582 | ||
1583 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | |
f9e0554d PM |
1584 | { |
1585 | } | |
1586 | ||
38f98824 MC |
1587 | void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr) |
1588 | { | |
1589 | #if defined(CONFIG_64BIT) | |
1590 | vcpu->arch.epcr = new_epcr; | |
1591 | #ifdef CONFIG_KVM_BOOKE_HV | |
1592 | vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; | |
1593 | if (vcpu->arch.epcr & SPRN_EPCR_ICM) | |
1594 | vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; | |
1595 | #endif | |
1596 | #endif | |
1597 | } | |
1598 | ||
dfd4d47e SW |
1599 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) |
1600 | { | |
1601 | vcpu->arch.tcr = new_tcr; | |
f61c94bb | 1602 | arm_next_watchdog(vcpu); |
dfd4d47e SW |
1603 | update_timer_ints(vcpu); |
1604 | } | |
1605 | ||
1606 | void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | |
1607 | { | |
1608 | set_bits(tsr_bits, &vcpu->arch.tsr); | |
1609 | smp_wmb(); | |
1610 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | |
1611 | kvm_vcpu_kick(vcpu); | |
1612 | } | |
1613 | ||
1614 | void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | |
1615 | { | |
1616 | clear_bits(tsr_bits, &vcpu->arch.tsr); | |
f61c94bb BB |
1617 | |
1618 | /* | |
1619 | * We may have stopped the watchdog due to | |
1620 | * being stuck on final expiration. | |
1621 | */ | |
1622 | if (tsr_bits & (TSR_ENW | TSR_WIS)) | |
1623 | arm_next_watchdog(vcpu); | |
1624 | ||
dfd4d47e SW |
1625 | update_timer_ints(vcpu); |
1626 | } | |
1627 | ||
1628 | void kvmppc_decrementer_func(unsigned long data) | |
1629 | { | |
1630 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
1631 | ||
21bd000a BB |
1632 | if (vcpu->arch.tcr & TCR_ARE) { |
1633 | vcpu->arch.dec = vcpu->arch.decar; | |
1634 | kvmppc_emulate_dec(vcpu); | |
1635 | } | |
1636 | ||
dfd4d47e SW |
1637 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); |
1638 | } | |
1639 | ||
94fa9d99 SW |
1640 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1641 | { | |
a47d72f3 | 1642 | vcpu->cpu = smp_processor_id(); |
d30f6e48 | 1643 | current->thread.kvm_vcpu = vcpu; |
94fa9d99 SW |
1644 | } |
1645 | ||
1646 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | |
1647 | { | |
d30f6e48 | 1648 | current->thread.kvm_vcpu = NULL; |
a47d72f3 | 1649 | vcpu->cpu = -1; |
94fa9d99 SW |
1650 | } |
1651 | ||
2986b8c7 | 1652 | int __init kvmppc_booke_init(void) |
d9fbd03d | 1653 | { |
d30f6e48 | 1654 | #ifndef CONFIG_KVM_BOOKE_HV |
d9fbd03d | 1655 | unsigned long ivor[16]; |
1d542d9c | 1656 | unsigned long *handler = kvmppc_booke_handler_addr; |
d9fbd03d | 1657 | unsigned long max_ivor = 0; |
1d542d9c | 1658 | unsigned long handler_len; |
d9fbd03d HB |
1659 | int i; |
1660 | ||
1661 | /* We install our own exception handlers by hijacking IVPR. IVPR must | |
1662 | * be 16-bit aligned, so we need a 64KB allocation. */ | |
1663 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
1664 | VCPU_SIZE_ORDER); | |
1665 | if (!kvmppc_booke_handlers) | |
1666 | return -ENOMEM; | |
1667 | ||
1668 | /* XXX make sure our handlers are smaller than Linux's */ | |
1669 | ||
1670 | /* Copy our interrupt handlers to match host IVORs. That way we don't | |
1671 | * have to swap the IVORs on every guest/host transition. */ | |
1672 | ivor[0] = mfspr(SPRN_IVOR0); | |
1673 | ivor[1] = mfspr(SPRN_IVOR1); | |
1674 | ivor[2] = mfspr(SPRN_IVOR2); | |
1675 | ivor[3] = mfspr(SPRN_IVOR3); | |
1676 | ivor[4] = mfspr(SPRN_IVOR4); | |
1677 | ivor[5] = mfspr(SPRN_IVOR5); | |
1678 | ivor[6] = mfspr(SPRN_IVOR6); | |
1679 | ivor[7] = mfspr(SPRN_IVOR7); | |
1680 | ivor[8] = mfspr(SPRN_IVOR8); | |
1681 | ivor[9] = mfspr(SPRN_IVOR9); | |
1682 | ivor[10] = mfspr(SPRN_IVOR10); | |
1683 | ivor[11] = mfspr(SPRN_IVOR11); | |
1684 | ivor[12] = mfspr(SPRN_IVOR12); | |
1685 | ivor[13] = mfspr(SPRN_IVOR13); | |
1686 | ivor[14] = mfspr(SPRN_IVOR14); | |
1687 | ivor[15] = mfspr(SPRN_IVOR15); | |
1688 | ||
1689 | for (i = 0; i < 16; i++) { | |
1690 | if (ivor[i] > max_ivor) | |
1d542d9c | 1691 | max_ivor = i; |
d9fbd03d | 1692 | |
1d542d9c | 1693 | handler_len = handler[i + 1] - handler[i]; |
d9fbd03d | 1694 | memcpy((void *)kvmppc_booke_handlers + ivor[i], |
1d542d9c | 1695 | (void *)handler[i], handler_len); |
d9fbd03d | 1696 | } |
1d542d9c BB |
1697 | |
1698 | handler_len = handler[max_ivor + 1] - handler[max_ivor]; | |
1699 | flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + | |
1700 | ivor[max_ivor] + handler_len); | |
d30f6e48 | 1701 | #endif /* !BOOKE_HV */ |
db93f574 | 1702 | return 0; |
d9fbd03d HB |
1703 | } |
1704 | ||
db93f574 | 1705 | void __exit kvmppc_booke_exit(void) |
d9fbd03d HB |
1706 | { |
1707 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | |
1708 | kvm_exit(); | |
1709 | } |