Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
4cd35f67 | 16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
bbf45ba5 HB |
17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
d30f6e48 SW |
20 | * Scott Wood <scottwood@freescale.com> |
21 | * Varun Sethi <varun.sethi@freescale.com> | |
bbf45ba5 HB |
22 | */ |
23 | ||
24 | #include <linux/errno.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/kvm_host.h> | |
5a0e3ad6 | 27 | #include <linux/gfp.h> |
bbf45ba5 HB |
28 | #include <linux/module.h> |
29 | #include <linux/vmalloc.h> | |
30 | #include <linux/fs.h> | |
7924bd41 | 31 | |
bbf45ba5 HB |
32 | #include <asm/cputable.h> |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/kvm_ppc.h> | |
d9fbd03d | 35 | #include <asm/cacheflush.h> |
d30f6e48 SW |
36 | #include <asm/dbell.h> |
37 | #include <asm/hw_irq.h> | |
38 | #include <asm/irq.h> | |
bbf45ba5 | 39 | |
d30f6e48 | 40 | #include "timing.h" |
75f74f0d | 41 | #include "booke.h" |
97c95059 | 42 | #include "trace.h" |
bbf45ba5 | 43 | |
d9fbd03d HB |
44 | unsigned long kvmppc_booke_handlers; |
45 | ||
bbf45ba5 HB |
46 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
47 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | |
48 | ||
49 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
bbf45ba5 HB |
50 | { "mmio", VCPU_STAT(mmio_exits) }, |
51 | { "dcr", VCPU_STAT(dcr_exits) }, | |
52 | { "sig", VCPU_STAT(signal_exits) }, | |
bbf45ba5 HB |
53 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
54 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | |
55 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | |
56 | { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, | |
57 | { "sysc", VCPU_STAT(syscall_exits) }, | |
58 | { "isi", VCPU_STAT(isi_exits) }, | |
59 | { "dsi", VCPU_STAT(dsi_exits) }, | |
60 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | |
61 | { "dec", VCPU_STAT(dec_exits) }, | |
62 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | |
45c5eb67 | 63 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
d30f6e48 SW |
64 | { "doorbell", VCPU_STAT(dbell_exits) }, |
65 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | |
cf1c5ca4 | 66 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, |
bbf45ba5 HB |
67 | { NULL } |
68 | }; | |
69 | ||
bbf45ba5 HB |
70 | /* TODO: use vcpu_printf() */ |
71 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |
72 | { | |
73 | int i; | |
74 | ||
666e7252 | 75 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
5cf8ca22 | 76 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
de7906c3 AG |
77 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
78 | vcpu->arch.shared->srr1); | |
bbf45ba5 HB |
79 | |
80 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | |
81 | ||
82 | for (i = 0; i < 32; i += 4) { | |
5cf8ca22 | 83 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
8e5b26b5 AG |
84 | kvmppc_get_gpr(vcpu, i), |
85 | kvmppc_get_gpr(vcpu, i+1), | |
86 | kvmppc_get_gpr(vcpu, i+2), | |
87 | kvmppc_get_gpr(vcpu, i+3)); | |
bbf45ba5 HB |
88 | } |
89 | } | |
90 | ||
4cd35f67 SW |
91 | #ifdef CONFIG_SPE |
92 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) | |
93 | { | |
94 | preempt_disable(); | |
95 | enable_kernel_spe(); | |
96 | kvmppc_save_guest_spe(vcpu); | |
97 | vcpu->arch.shadow_msr &= ~MSR_SPE; | |
98 | preempt_enable(); | |
99 | } | |
100 | ||
101 | static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) | |
102 | { | |
103 | preempt_disable(); | |
104 | enable_kernel_spe(); | |
105 | kvmppc_load_guest_spe(vcpu); | |
106 | vcpu->arch.shadow_msr |= MSR_SPE; | |
107 | preempt_enable(); | |
108 | } | |
109 | ||
110 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | |
111 | { | |
112 | if (vcpu->arch.shared->msr & MSR_SPE) { | |
113 | if (!(vcpu->arch.shadow_msr & MSR_SPE)) | |
114 | kvmppc_vcpu_enable_spe(vcpu); | |
115 | } else if (vcpu->arch.shadow_msr & MSR_SPE) { | |
116 | kvmppc_vcpu_disable_spe(vcpu); | |
117 | } | |
118 | } | |
119 | #else | |
120 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | |
121 | { | |
122 | } | |
123 | #endif | |
124 | ||
dd9ebf1f LY |
125 | /* |
126 | * Helper function for "full" MSR writes. No need to call this if only | |
127 | * EE/CE/ME/DE/RI are changing. | |
128 | */ | |
4cd35f67 SW |
129 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) |
130 | { | |
dd9ebf1f | 131 | u32 old_msr = vcpu->arch.shared->msr; |
4cd35f67 | 132 | |
d30f6e48 SW |
133 | #ifdef CONFIG_KVM_BOOKE_HV |
134 | new_msr |= MSR_GS; | |
135 | #endif | |
136 | ||
4cd35f67 SW |
137 | vcpu->arch.shared->msr = new_msr; |
138 | ||
dd9ebf1f | 139 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
4cd35f67 SW |
140 | kvmppc_vcpu_sync_spe(vcpu); |
141 | } | |
142 | ||
d4cf3892 HB |
143 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
144 | unsigned int priority) | |
9dd921cf | 145 | { |
9dd921cf HB |
146 | set_bit(priority, &vcpu->arch.pending_exceptions); |
147 | } | |
148 | ||
daf5e271 LY |
149 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
150 | ulong dear_flags, ulong esr_flags) | |
9dd921cf | 151 | { |
daf5e271 LY |
152 | vcpu->arch.queued_dear = dear_flags; |
153 | vcpu->arch.queued_esr = esr_flags; | |
154 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | |
155 | } | |
156 | ||
157 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | |
158 | ulong dear_flags, ulong esr_flags) | |
159 | { | |
160 | vcpu->arch.queued_dear = dear_flags; | |
161 | vcpu->arch.queued_esr = esr_flags; | |
162 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | |
163 | } | |
164 | ||
165 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | |
166 | ulong esr_flags) | |
167 | { | |
168 | vcpu->arch.queued_esr = esr_flags; | |
169 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | |
170 | } | |
171 | ||
172 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) | |
173 | { | |
174 | vcpu->arch.queued_esr = esr_flags; | |
d4cf3892 | 175 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
9dd921cf HB |
176 | } |
177 | ||
178 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |
179 | { | |
d4cf3892 | 180 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
9dd921cf HB |
181 | } |
182 | ||
183 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |
184 | { | |
d4cf3892 | 185 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
9dd921cf HB |
186 | } |
187 | ||
7706664d AG |
188 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
189 | { | |
190 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | |
191 | } | |
192 | ||
9dd921cf HB |
193 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
194 | struct kvm_interrupt *irq) | |
195 | { | |
c5335f17 AG |
196 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
197 | ||
198 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | |
199 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | |
200 | ||
201 | kvmppc_booke_queue_irqprio(vcpu, prio); | |
9dd921cf HB |
202 | } |
203 | ||
4496f974 AG |
204 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
205 | struct kvm_interrupt *irq) | |
206 | { | |
207 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | |
c5335f17 | 208 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
4496f974 AG |
209 | } |
210 | ||
d30f6e48 SW |
211 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) |
212 | { | |
213 | #ifdef CONFIG_KVM_BOOKE_HV | |
214 | mtspr(SPRN_GSRR0, srr0); | |
215 | mtspr(SPRN_GSRR1, srr1); | |
216 | #else | |
217 | vcpu->arch.shared->srr0 = srr0; | |
218 | vcpu->arch.shared->srr1 = srr1; | |
219 | #endif | |
220 | } | |
221 | ||
222 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
223 | { | |
224 | vcpu->arch.csrr0 = srr0; | |
225 | vcpu->arch.csrr1 = srr1; | |
226 | } | |
227 | ||
228 | static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
229 | { | |
230 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { | |
231 | vcpu->arch.dsrr0 = srr0; | |
232 | vcpu->arch.dsrr1 = srr1; | |
233 | } else { | |
234 | set_guest_csrr(vcpu, srr0, srr1); | |
235 | } | |
236 | } | |
237 | ||
238 | static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
239 | { | |
240 | vcpu->arch.mcsrr0 = srr0; | |
241 | vcpu->arch.mcsrr1 = srr1; | |
242 | } | |
243 | ||
244 | static unsigned long get_guest_dear(struct kvm_vcpu *vcpu) | |
245 | { | |
246 | #ifdef CONFIG_KVM_BOOKE_HV | |
247 | return mfspr(SPRN_GDEAR); | |
248 | #else | |
249 | return vcpu->arch.shared->dar; | |
250 | #endif | |
251 | } | |
252 | ||
253 | static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear) | |
254 | { | |
255 | #ifdef CONFIG_KVM_BOOKE_HV | |
256 | mtspr(SPRN_GDEAR, dear); | |
257 | #else | |
258 | vcpu->arch.shared->dar = dear; | |
259 | #endif | |
260 | } | |
261 | ||
262 | static unsigned long get_guest_esr(struct kvm_vcpu *vcpu) | |
263 | { | |
264 | #ifdef CONFIG_KVM_BOOKE_HV | |
265 | return mfspr(SPRN_GESR); | |
266 | #else | |
267 | return vcpu->arch.shared->esr; | |
268 | #endif | |
269 | } | |
270 | ||
271 | static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr) | |
272 | { | |
273 | #ifdef CONFIG_KVM_BOOKE_HV | |
274 | mtspr(SPRN_GESR, esr); | |
275 | #else | |
276 | vcpu->arch.shared->esr = esr; | |
277 | #endif | |
278 | } | |
279 | ||
d4cf3892 HB |
280 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
281 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |
282 | unsigned int priority) | |
bbf45ba5 | 283 | { |
d4cf3892 | 284 | int allowed = 0; |
79300f8c | 285 | ulong msr_mask = 0; |
daf5e271 | 286 | bool update_esr = false, update_dear = false; |
5c6cedf4 AG |
287 | ulong crit_raw = vcpu->arch.shared->critical; |
288 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | |
289 | bool crit; | |
c5335f17 | 290 | bool keep_irq = false; |
d30f6e48 | 291 | enum int_class int_class; |
5c6cedf4 AG |
292 | |
293 | /* Truncate crit indicators in 32 bit mode */ | |
294 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | |
295 | crit_raw &= 0xffffffff; | |
296 | crit_r1 &= 0xffffffff; | |
297 | } | |
298 | ||
299 | /* Critical section when crit == r1 */ | |
300 | crit = (crit_raw == crit_r1); | |
301 | /* ... and we're in supervisor mode */ | |
302 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | |
d4cf3892 | 303 | |
c5335f17 AG |
304 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { |
305 | priority = BOOKE_IRQPRIO_EXTERNAL; | |
306 | keep_irq = true; | |
307 | } | |
308 | ||
d4cf3892 | 309 | switch (priority) { |
d4cf3892 | 310 | case BOOKE_IRQPRIO_DTLB_MISS: |
d4cf3892 | 311 | case BOOKE_IRQPRIO_DATA_STORAGE: |
daf5e271 LY |
312 | update_dear = true; |
313 | /* fall through */ | |
d4cf3892 | 314 | case BOOKE_IRQPRIO_INST_STORAGE: |
daf5e271 LY |
315 | case BOOKE_IRQPRIO_PROGRAM: |
316 | update_esr = true; | |
317 | /* fall through */ | |
318 | case BOOKE_IRQPRIO_ITLB_MISS: | |
319 | case BOOKE_IRQPRIO_SYSCALL: | |
d4cf3892 | 320 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
bb3a8a17 HB |
321 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
322 | case BOOKE_IRQPRIO_SPE_FP_DATA: | |
323 | case BOOKE_IRQPRIO_SPE_FP_ROUND: | |
d4cf3892 HB |
324 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
325 | case BOOKE_IRQPRIO_ALIGNMENT: | |
326 | allowed = 1; | |
79300f8c | 327 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
d30f6e48 | 328 | int_class = INT_CLASS_NONCRIT; |
bbf45ba5 | 329 | break; |
d4cf3892 | 330 | case BOOKE_IRQPRIO_CRITICAL: |
4ab96919 | 331 | case BOOKE_IRQPRIO_DBELL_CRIT: |
666e7252 | 332 | allowed = vcpu->arch.shared->msr & MSR_CE; |
d30f6e48 | 333 | allowed = allowed && !crit; |
79300f8c | 334 | msr_mask = MSR_ME; |
d30f6e48 | 335 | int_class = INT_CLASS_CRIT; |
bbf45ba5 | 336 | break; |
d4cf3892 | 337 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
666e7252 | 338 | allowed = vcpu->arch.shared->msr & MSR_ME; |
d30f6e48 | 339 | allowed = allowed && !crit; |
d30f6e48 | 340 | int_class = INT_CLASS_MC; |
bbf45ba5 | 341 | break; |
d4cf3892 HB |
342 | case BOOKE_IRQPRIO_DECREMENTER: |
343 | case BOOKE_IRQPRIO_FIT: | |
dfd4d47e SW |
344 | keep_irq = true; |
345 | /* fall through */ | |
346 | case BOOKE_IRQPRIO_EXTERNAL: | |
4ab96919 | 347 | case BOOKE_IRQPRIO_DBELL: |
666e7252 | 348 | allowed = vcpu->arch.shared->msr & MSR_EE; |
5c6cedf4 | 349 | allowed = allowed && !crit; |
79300f8c | 350 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
d30f6e48 | 351 | int_class = INT_CLASS_NONCRIT; |
bbf45ba5 | 352 | break; |
d4cf3892 | 353 | case BOOKE_IRQPRIO_DEBUG: |
666e7252 | 354 | allowed = vcpu->arch.shared->msr & MSR_DE; |
d30f6e48 | 355 | allowed = allowed && !crit; |
79300f8c | 356 | msr_mask = MSR_ME; |
d30f6e48 | 357 | int_class = INT_CLASS_CRIT; |
bbf45ba5 | 358 | break; |
bbf45ba5 HB |
359 | } |
360 | ||
d4cf3892 | 361 | if (allowed) { |
d30f6e48 SW |
362 | switch (int_class) { |
363 | case INT_CLASS_NONCRIT: | |
364 | set_guest_srr(vcpu, vcpu->arch.pc, | |
365 | vcpu->arch.shared->msr); | |
366 | break; | |
367 | case INT_CLASS_CRIT: | |
368 | set_guest_csrr(vcpu, vcpu->arch.pc, | |
369 | vcpu->arch.shared->msr); | |
370 | break; | |
371 | case INT_CLASS_DBG: | |
372 | set_guest_dsrr(vcpu, vcpu->arch.pc, | |
373 | vcpu->arch.shared->msr); | |
374 | break; | |
375 | case INT_CLASS_MC: | |
376 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | |
377 | vcpu->arch.shared->msr); | |
378 | break; | |
379 | } | |
380 | ||
d4cf3892 | 381 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
daf5e271 | 382 | if (update_esr == true) |
d30f6e48 | 383 | set_guest_esr(vcpu, vcpu->arch.queued_esr); |
daf5e271 | 384 | if (update_dear == true) |
d30f6e48 | 385 | set_guest_dear(vcpu, vcpu->arch.queued_dear); |
666e7252 | 386 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
bbf45ba5 | 387 | |
c5335f17 AG |
388 | if (!keep_irq) |
389 | clear_bit(priority, &vcpu->arch.pending_exceptions); | |
bbf45ba5 HB |
390 | } |
391 | ||
d30f6e48 SW |
392 | #ifdef CONFIG_KVM_BOOKE_HV |
393 | /* | |
394 | * If an interrupt is pending but masked, raise a guest doorbell | |
395 | * so that we are notified when the guest enables the relevant | |
396 | * MSR bit. | |
397 | */ | |
398 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) | |
399 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); | |
400 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) | |
401 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); | |
402 | if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) | |
403 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); | |
404 | #endif | |
405 | ||
d4cf3892 | 406 | return allowed; |
bbf45ba5 HB |
407 | } |
408 | ||
dfd4d47e SW |
409 | static void update_timer_ints(struct kvm_vcpu *vcpu) |
410 | { | |
411 | if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) | |
412 | kvmppc_core_queue_dec(vcpu); | |
413 | else | |
414 | kvmppc_core_dequeue_dec(vcpu); | |
415 | } | |
416 | ||
c59a6a3e | 417 | static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) |
bbf45ba5 HB |
418 | { |
419 | unsigned long *pending = &vcpu->arch.pending_exceptions; | |
bbf45ba5 HB |
420 | unsigned int priority; |
421 | ||
9ab80843 | 422 | priority = __ffs(*pending); |
8b3a00fc | 423 | while (priority < BOOKE_IRQPRIO_MAX) { |
d4cf3892 | 424 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
bbf45ba5 | 425 | break; |
bbf45ba5 HB |
426 | |
427 | priority = find_next_bit(pending, | |
428 | BITS_PER_BYTE * sizeof(*pending), | |
429 | priority + 1); | |
430 | } | |
90bba358 AG |
431 | |
432 | /* Tell the guest about our interrupt status */ | |
29ac26ef | 433 | vcpu->arch.shared->int_pending = !!*pending; |
bbf45ba5 HB |
434 | } |
435 | ||
c59a6a3e | 436 | /* Check pending exceptions and deliver one, if possible. */ |
a8e4ef84 | 437 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
c59a6a3e | 438 | { |
a8e4ef84 | 439 | int r = 0; |
c59a6a3e SW |
440 | WARN_ON_ONCE(!irqs_disabled()); |
441 | ||
442 | kvmppc_core_check_exceptions(vcpu); | |
443 | ||
444 | if (vcpu->arch.shared->msr & MSR_WE) { | |
445 | local_irq_enable(); | |
446 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 447 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
c59a6a3e SW |
448 | local_irq_disable(); |
449 | ||
450 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | |
a8e4ef84 | 451 | r = 1; |
c59a6a3e | 452 | }; |
a8e4ef84 AG |
453 | |
454 | return r; | |
455 | } | |
456 | ||
4ffc6356 AG |
457 | static void kvmppc_check_requests(struct kvm_vcpu *vcpu) |
458 | { | |
459 | if (vcpu->requests) { | |
460 | if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) | |
461 | update_timer_ints(vcpu); | |
862d31f7 AG |
462 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
463 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
464 | kvmppc_core_flush_tlb(vcpu); | |
465 | #endif | |
4ffc6356 AG |
466 | } |
467 | } | |
468 | ||
a8e4ef84 AG |
469 | /* |
470 | * Common checks before entering the guest world. Call with interrupts | |
471 | * disabled. | |
472 | * | |
473 | * returns !0 if a signal is pending and check_signal is true | |
474 | */ | |
03660ba2 | 475 | static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) |
a8e4ef84 AG |
476 | { |
477 | int r = 0; | |
478 | ||
479 | WARN_ON_ONCE(!irqs_disabled()); | |
480 | while (true) { | |
481 | if (need_resched()) { | |
482 | local_irq_enable(); | |
483 | cond_resched(); | |
484 | local_irq_disable(); | |
485 | continue; | |
486 | } | |
487 | ||
03660ba2 | 488 | if (signal_pending(current)) { |
a8e4ef84 AG |
489 | r = 1; |
490 | break; | |
491 | } | |
492 | ||
4ffc6356 AG |
493 | smp_mb(); |
494 | if (vcpu->requests) { | |
495 | /* Make sure we process requests preemptable */ | |
496 | local_irq_enable(); | |
497 | kvmppc_check_requests(vcpu); | |
498 | local_irq_disable(); | |
499 | continue; | |
500 | } | |
501 | ||
a8e4ef84 AG |
502 | if (kvmppc_core_prepare_to_enter(vcpu)) { |
503 | /* interrupts got enabled in between, so we | |
504 | are back at square 1 */ | |
505 | continue; | |
506 | } | |
507 | ||
d69c6436 AG |
508 | if (vcpu->mode == EXITING_GUEST_MODE) { |
509 | r = 1; | |
510 | break; | |
511 | } | |
512 | ||
513 | /* Going into guest context! Yay! */ | |
514 | vcpu->mode = IN_GUEST_MODE; | |
515 | smp_wmb(); | |
516 | ||
a8e4ef84 AG |
517 | break; |
518 | } | |
519 | ||
520 | return r; | |
c59a6a3e SW |
521 | } |
522 | ||
df6909e5 PM |
523 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
524 | { | |
525 | int ret; | |
8fae845f SW |
526 | #ifdef CONFIG_PPC_FPU |
527 | unsigned int fpscr; | |
528 | int fpexc_mode; | |
529 | u64 fpr[32]; | |
530 | #endif | |
df6909e5 | 531 | |
af8f38b3 AG |
532 | if (!vcpu->arch.sane) { |
533 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
534 | return -EINVAL; | |
535 | } | |
536 | ||
df6909e5 | 537 | local_irq_disable(); |
03660ba2 | 538 | if (kvmppc_prepare_to_enter(vcpu)) { |
1d1ef222 SW |
539 | kvm_run->exit_reason = KVM_EXIT_INTR; |
540 | ret = -EINTR; | |
541 | goto out; | |
542 | } | |
543 | ||
df6909e5 | 544 | kvm_guest_enter(); |
8fae845f SW |
545 | |
546 | #ifdef CONFIG_PPC_FPU | |
547 | /* Save userspace FPU state in stack */ | |
548 | enable_kernel_fp(); | |
549 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | |
550 | fpscr = current->thread.fpscr.val; | |
551 | fpexc_mode = current->thread.fpexc_mode; | |
552 | ||
553 | /* Restore guest FPU state to thread */ | |
554 | memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr)); | |
555 | current->thread.fpscr.val = vcpu->arch.fpscr; | |
556 | ||
557 | /* | |
558 | * Since we can't trap on MSR_FP in GS-mode, we consider the guest | |
559 | * as always using the FPU. Kernel usage of FP (via | |
560 | * enable_kernel_fp()) in this thread must not occur while | |
561 | * vcpu->fpu_active is set. | |
562 | */ | |
563 | vcpu->fpu_active = 1; | |
564 | ||
565 | kvmppc_load_guest_fp(vcpu); | |
566 | #endif | |
567 | ||
df6909e5 | 568 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
8fae845f SW |
569 | |
570 | #ifdef CONFIG_PPC_FPU | |
571 | kvmppc_save_guest_fp(vcpu); | |
572 | ||
573 | vcpu->fpu_active = 0; | |
574 | ||
575 | /* Save guest FPU state from thread */ | |
576 | memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr)); | |
577 | vcpu->arch.fpscr = current->thread.fpscr.val; | |
578 | ||
579 | /* Restore userspace FPU state from stack */ | |
580 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | |
581 | current->thread.fpscr.val = fpscr; | |
582 | current->thread.fpexc_mode = fpexc_mode; | |
583 | #endif | |
584 | ||
df6909e5 | 585 | kvm_guest_exit(); |
862d31f7 AG |
586 | vcpu->mode = OUTSIDE_GUEST_MODE; |
587 | smp_wmb(); | |
df6909e5 | 588 | |
1d1ef222 | 589 | out: |
d69c6436 AG |
590 | vcpu->mode = OUTSIDE_GUEST_MODE; |
591 | smp_wmb(); | |
1d1ef222 | 592 | local_irq_enable(); |
df6909e5 PM |
593 | return ret; |
594 | } | |
595 | ||
d30f6e48 SW |
596 | static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) |
597 | { | |
598 | enum emulation_result er; | |
599 | ||
600 | er = kvmppc_emulate_instruction(run, vcpu); | |
601 | switch (er) { | |
602 | case EMULATE_DONE: | |
603 | /* don't overwrite subtypes, just account kvm_stats */ | |
604 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | |
605 | /* Future optimization: only reload non-volatiles if | |
606 | * they were actually modified by emulation. */ | |
607 | return RESUME_GUEST_NV; | |
608 | ||
609 | case EMULATE_DO_DCR: | |
610 | run->exit_reason = KVM_EXIT_DCR; | |
611 | return RESUME_HOST; | |
612 | ||
613 | case EMULATE_FAIL: | |
d30f6e48 SW |
614 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
615 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | |
616 | /* For debugging, encode the failing instruction and | |
617 | * report it to userspace. */ | |
618 | run->hw.hardware_exit_reason = ~0ULL << 32; | |
619 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | |
d1ff5499 | 620 | kvmppc_core_queue_program(vcpu, ESR_PIL); |
d30f6e48 SW |
621 | return RESUME_HOST; |
622 | ||
623 | default: | |
624 | BUG(); | |
625 | } | |
626 | } | |
627 | ||
4e642ccb | 628 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) |
bbf45ba5 | 629 | { |
4e642ccb | 630 | ulong r1, ip, msr, lr; |
bbf45ba5 | 631 | |
4e642ccb AG |
632 | asm("mr %0, 1" : "=r"(r1)); |
633 | asm("mflr %0" : "=r"(lr)); | |
634 | asm("mfmsr %0" : "=r"(msr)); | |
635 | asm("bl 1f; 1: mflr %0" : "=r"(ip)); | |
636 | ||
637 | memset(regs, 0, sizeof(*regs)); | |
638 | regs->gpr[1] = r1; | |
639 | regs->nip = ip; | |
640 | regs->msr = msr; | |
641 | regs->link = lr; | |
642 | } | |
643 | ||
6328e593 BB |
644 | /* |
645 | * For interrupts needed to be handled by host interrupt handlers, | |
646 | * corresponding host handler are called from here in similar way | |
647 | * (but not exact) as they are called from low level handler | |
648 | * (such as from arch/powerpc/kernel/head_fsl_booke.S). | |
649 | */ | |
4e642ccb AG |
650 | static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, |
651 | unsigned int exit_nr) | |
652 | { | |
653 | struct pt_regs regs; | |
73e75b41 | 654 | |
d30f6e48 SW |
655 | switch (exit_nr) { |
656 | case BOOKE_INTERRUPT_EXTERNAL: | |
4e642ccb AG |
657 | kvmppc_fill_pt_regs(®s); |
658 | do_IRQ(®s); | |
d30f6e48 | 659 | break; |
d30f6e48 | 660 | case BOOKE_INTERRUPT_DECREMENTER: |
4e642ccb AG |
661 | kvmppc_fill_pt_regs(®s); |
662 | timer_interrupt(®s); | |
d30f6e48 | 663 | break; |
d30f6e48 SW |
664 | #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64) |
665 | case BOOKE_INTERRUPT_DOORBELL: | |
4e642ccb AG |
666 | kvmppc_fill_pt_regs(®s); |
667 | doorbell_exception(®s); | |
d30f6e48 SW |
668 | break; |
669 | #endif | |
670 | case BOOKE_INTERRUPT_MACHINE_CHECK: | |
671 | /* FIXME */ | |
672 | break; | |
7cc1e8ee AG |
673 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: |
674 | kvmppc_fill_pt_regs(®s); | |
675 | performance_monitor_exception(®s); | |
676 | break; | |
6328e593 BB |
677 | case BOOKE_INTERRUPT_WATCHDOG: |
678 | kvmppc_fill_pt_regs(®s); | |
679 | #ifdef CONFIG_BOOKE_WDT | |
680 | WatchdogException(®s); | |
681 | #else | |
682 | unknown_exception(®s); | |
683 | #endif | |
684 | break; | |
685 | case BOOKE_INTERRUPT_CRITICAL: | |
686 | unknown_exception(®s); | |
687 | break; | |
d30f6e48 | 688 | } |
4e642ccb AG |
689 | } |
690 | ||
691 | /** | |
692 | * kvmppc_handle_exit | |
693 | * | |
694 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | |
695 | */ | |
696 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
697 | unsigned int exit_nr) | |
698 | { | |
699 | int r = RESUME_HOST; | |
700 | ||
701 | /* update before a new last_exit_type is rewritten */ | |
702 | kvmppc_update_timing_stats(vcpu); | |
703 | ||
704 | /* restart interrupts if they were meant for the host */ | |
705 | kvmppc_restart_interrupt(vcpu, exit_nr); | |
d30f6e48 | 706 | |
bbf45ba5 HB |
707 | local_irq_enable(); |
708 | ||
97c95059 AG |
709 | trace_kvm_exit(exit_nr, vcpu); |
710 | ||
bbf45ba5 HB |
711 | run->exit_reason = KVM_EXIT_UNKNOWN; |
712 | run->ready_for_interrupt_injection = 1; | |
713 | ||
714 | switch (exit_nr) { | |
715 | case BOOKE_INTERRUPT_MACHINE_CHECK: | |
c35c9d84 AG |
716 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
717 | kvmppc_dump_vcpu(vcpu); | |
718 | /* For debugging, send invalid exit reason to user space */ | |
719 | run->hw.hardware_exit_reason = ~1ULL << 32; | |
720 | run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); | |
721 | r = RESUME_HOST; | |
bbf45ba5 HB |
722 | break; |
723 | ||
724 | case BOOKE_INTERRUPT_EXTERNAL: | |
7b701591 | 725 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
1b6766c7 HB |
726 | r = RESUME_GUEST; |
727 | break; | |
728 | ||
bbf45ba5 | 729 | case BOOKE_INTERRUPT_DECREMENTER: |
7b701591 | 730 | kvmppc_account_exit(vcpu, DEC_EXITS); |
bbf45ba5 HB |
731 | r = RESUME_GUEST; |
732 | break; | |
733 | ||
6328e593 BB |
734 | case BOOKE_INTERRUPT_WATCHDOG: |
735 | r = RESUME_GUEST; | |
736 | break; | |
737 | ||
d30f6e48 SW |
738 | case BOOKE_INTERRUPT_DOORBELL: |
739 | kvmppc_account_exit(vcpu, DBELL_EXITS); | |
d30f6e48 SW |
740 | r = RESUME_GUEST; |
741 | break; | |
742 | ||
743 | case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: | |
744 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | |
745 | ||
746 | /* | |
747 | * We are here because there is a pending guest interrupt | |
748 | * which could not be delivered as MSR_CE or MSR_ME was not | |
749 | * set. Once we break from here we will retry delivery. | |
750 | */ | |
751 | r = RESUME_GUEST; | |
752 | break; | |
753 | ||
754 | case BOOKE_INTERRUPT_GUEST_DBELL: | |
755 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | |
756 | ||
757 | /* | |
758 | * We are here because there is a pending guest interrupt | |
759 | * which could not be delivered as MSR_EE was not set. Once | |
760 | * we break from here we will retry delivery. | |
761 | */ | |
762 | r = RESUME_GUEST; | |
763 | break; | |
764 | ||
95f2e921 AG |
765 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: |
766 | r = RESUME_GUEST; | |
767 | break; | |
768 | ||
d30f6e48 SW |
769 | case BOOKE_INTERRUPT_HV_PRIV: |
770 | r = emulation_exit(run, vcpu); | |
771 | break; | |
772 | ||
bbf45ba5 | 773 | case BOOKE_INTERRUPT_PROGRAM: |
d30f6e48 | 774 | if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { |
0268597c AG |
775 | /* |
776 | * Program traps generated by user-level software must | |
777 | * be handled by the guest kernel. | |
778 | * | |
779 | * In GS mode, hypervisor privileged instructions trap | |
780 | * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are | |
781 | * actual program interrupts, handled by the guest. | |
782 | */ | |
daf5e271 | 783 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
bbf45ba5 | 784 | r = RESUME_GUEST; |
7b701591 | 785 | kvmppc_account_exit(vcpu, USR_PR_INST); |
bbf45ba5 HB |
786 | break; |
787 | } | |
788 | ||
d30f6e48 | 789 | r = emulation_exit(run, vcpu); |
bbf45ba5 HB |
790 | break; |
791 | ||
de368dce | 792 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
d4cf3892 | 793 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
7b701591 | 794 | kvmppc_account_exit(vcpu, FP_UNAVAIL); |
de368dce CE |
795 | r = RESUME_GUEST; |
796 | break; | |
797 | ||
4cd35f67 SW |
798 | #ifdef CONFIG_SPE |
799 | case BOOKE_INTERRUPT_SPE_UNAVAIL: { | |
800 | if (vcpu->arch.shared->msr & MSR_SPE) | |
801 | kvmppc_vcpu_enable_spe(vcpu); | |
802 | else | |
803 | kvmppc_booke_queue_irqprio(vcpu, | |
804 | BOOKE_IRQPRIO_SPE_UNAVAIL); | |
bb3a8a17 HB |
805 | r = RESUME_GUEST; |
806 | break; | |
4cd35f67 | 807 | } |
bb3a8a17 HB |
808 | |
809 | case BOOKE_INTERRUPT_SPE_FP_DATA: | |
810 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); | |
811 | r = RESUME_GUEST; | |
812 | break; | |
813 | ||
814 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | |
815 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); | |
816 | r = RESUME_GUEST; | |
817 | break; | |
4cd35f67 SW |
818 | #else |
819 | case BOOKE_INTERRUPT_SPE_UNAVAIL: | |
820 | /* | |
821 | * Guest wants SPE, but host kernel doesn't support it. Send | |
822 | * an "unimplemented operation" program check to the guest. | |
823 | */ | |
824 | kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); | |
825 | r = RESUME_GUEST; | |
826 | break; | |
827 | ||
828 | /* | |
829 | * These really should never happen without CONFIG_SPE, | |
830 | * as we should never enable the real MSR[SPE] in the guest. | |
831 | */ | |
832 | case BOOKE_INTERRUPT_SPE_FP_DATA: | |
833 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | |
834 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | |
835 | __func__, exit_nr, vcpu->arch.pc); | |
836 | run->hw.hardware_exit_reason = exit_nr; | |
837 | r = RESUME_HOST; | |
838 | break; | |
839 | #endif | |
bb3a8a17 | 840 | |
bbf45ba5 | 841 | case BOOKE_INTERRUPT_DATA_STORAGE: |
daf5e271 LY |
842 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
843 | vcpu->arch.fault_esr); | |
7b701591 | 844 | kvmppc_account_exit(vcpu, DSI_EXITS); |
bbf45ba5 HB |
845 | r = RESUME_GUEST; |
846 | break; | |
847 | ||
848 | case BOOKE_INTERRUPT_INST_STORAGE: | |
daf5e271 | 849 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
7b701591 | 850 | kvmppc_account_exit(vcpu, ISI_EXITS); |
bbf45ba5 HB |
851 | r = RESUME_GUEST; |
852 | break; | |
853 | ||
d30f6e48 SW |
854 | #ifdef CONFIG_KVM_BOOKE_HV |
855 | case BOOKE_INTERRUPT_HV_SYSCALL: | |
856 | if (!(vcpu->arch.shared->msr & MSR_PR)) { | |
857 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
858 | } else { | |
859 | /* | |
860 | * hcall from guest userspace -- send privileged | |
861 | * instruction program check. | |
862 | */ | |
863 | kvmppc_core_queue_program(vcpu, ESR_PPR); | |
864 | } | |
865 | ||
866 | r = RESUME_GUEST; | |
867 | break; | |
868 | #else | |
bbf45ba5 | 869 | case BOOKE_INTERRUPT_SYSCALL: |
2a342ed5 AG |
870 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
871 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
872 | /* KVM PV hypercalls */ | |
873 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
874 | r = RESUME_GUEST; | |
875 | } else { | |
876 | /* Guest syscalls */ | |
877 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | |
878 | } | |
7b701591 | 879 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
bbf45ba5 HB |
880 | r = RESUME_GUEST; |
881 | break; | |
d30f6e48 | 882 | #endif |
bbf45ba5 HB |
883 | |
884 | case BOOKE_INTERRUPT_DTLB_MISS: { | |
bbf45ba5 | 885 | unsigned long eaddr = vcpu->arch.fault_dear; |
7924bd41 | 886 | int gtlb_index; |
475e7cdd | 887 | gpa_t gpaddr; |
bbf45ba5 HB |
888 | gfn_t gfn; |
889 | ||
bf7ca4bd | 890 | #ifdef CONFIG_KVM_E500V2 |
a4cd8b23 SW |
891 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
892 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | |
893 | kvmppc_map_magic(vcpu); | |
894 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); | |
895 | r = RESUME_GUEST; | |
896 | ||
897 | break; | |
898 | } | |
899 | #endif | |
900 | ||
bbf45ba5 | 901 | /* Check the guest TLB. */ |
fa86b8dd | 902 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
7924bd41 | 903 | if (gtlb_index < 0) { |
bbf45ba5 | 904 | /* The guest didn't have a mapping for it. */ |
daf5e271 LY |
905 | kvmppc_core_queue_dtlb_miss(vcpu, |
906 | vcpu->arch.fault_dear, | |
907 | vcpu->arch.fault_esr); | |
b52a638c | 908 | kvmppc_mmu_dtlb_miss(vcpu); |
7b701591 | 909 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
bbf45ba5 HB |
910 | r = RESUME_GUEST; |
911 | break; | |
912 | } | |
913 | ||
be8d1cae | 914 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
475e7cdd | 915 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
916 | |
917 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
918 | /* The guest TLB had a mapping, but the shadow TLB | |
919 | * didn't, and it is RAM. This could be because: | |
920 | * a) the entry is mapping the host kernel, or | |
921 | * b) the guest used a large mapping which we're faking | |
922 | * Either way, we need to satisfy the fault without | |
923 | * invoking the guest. */ | |
58a96214 | 924 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
7b701591 | 925 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
bbf45ba5 HB |
926 | r = RESUME_GUEST; |
927 | } else { | |
928 | /* Guest has mapped and accessed a page which is not | |
929 | * actually RAM. */ | |
475e7cdd | 930 | vcpu->arch.paddr_accessed = gpaddr; |
6020c0f6 | 931 | vcpu->arch.vaddr_accessed = eaddr; |
bbf45ba5 | 932 | r = kvmppc_emulate_mmio(run, vcpu); |
7b701591 | 933 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
bbf45ba5 HB |
934 | } |
935 | ||
936 | break; | |
937 | } | |
938 | ||
939 | case BOOKE_INTERRUPT_ITLB_MISS: { | |
bbf45ba5 | 940 | unsigned long eaddr = vcpu->arch.pc; |
89168618 | 941 | gpa_t gpaddr; |
bbf45ba5 | 942 | gfn_t gfn; |
7924bd41 | 943 | int gtlb_index; |
bbf45ba5 HB |
944 | |
945 | r = RESUME_GUEST; | |
946 | ||
947 | /* Check the guest TLB. */ | |
fa86b8dd | 948 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); |
7924bd41 | 949 | if (gtlb_index < 0) { |
bbf45ba5 | 950 | /* The guest didn't have a mapping for it. */ |
d4cf3892 | 951 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
b52a638c | 952 | kvmppc_mmu_itlb_miss(vcpu); |
7b701591 | 953 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
bbf45ba5 HB |
954 | break; |
955 | } | |
956 | ||
7b701591 | 957 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
bbf45ba5 | 958 | |
be8d1cae | 959 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
89168618 | 960 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
961 | |
962 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
963 | /* The guest TLB had a mapping, but the shadow TLB | |
964 | * didn't. This could be because: | |
965 | * a) the entry is mapping the host kernel, or | |
966 | * b) the guest used a large mapping which we're faking | |
967 | * Either way, we need to satisfy the fault without | |
968 | * invoking the guest. */ | |
58a96214 | 969 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
bbf45ba5 HB |
970 | } else { |
971 | /* Guest mapped and leaped at non-RAM! */ | |
d4cf3892 | 972 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
bbf45ba5 HB |
973 | } |
974 | ||
975 | break; | |
976 | } | |
977 | ||
6a0ab738 HB |
978 | case BOOKE_INTERRUPT_DEBUG: { |
979 | u32 dbsr; | |
980 | ||
981 | vcpu->arch.pc = mfspr(SPRN_CSRR0); | |
982 | ||
983 | /* clear IAC events in DBSR register */ | |
984 | dbsr = mfspr(SPRN_DBSR); | |
985 | dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4; | |
986 | mtspr(SPRN_DBSR, dbsr); | |
987 | ||
988 | run->exit_reason = KVM_EXIT_DEBUG; | |
7b701591 | 989 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
6a0ab738 HB |
990 | r = RESUME_HOST; |
991 | break; | |
992 | } | |
993 | ||
bbf45ba5 HB |
994 | default: |
995 | printk(KERN_EMERG "exit_nr %d\n", exit_nr); | |
996 | BUG(); | |
997 | } | |
998 | ||
a8e4ef84 AG |
999 | /* |
1000 | * To avoid clobbering exit_reason, only check for signals if we | |
1001 | * aren't already exiting to userspace for some other reason. | |
1002 | */ | |
03660ba2 AG |
1003 | if (!(r & RESUME_HOST)) { |
1004 | local_irq_disable(); | |
1005 | if (kvmppc_prepare_to_enter(vcpu)) { | |
1006 | run->exit_reason = KVM_EXIT_INTR; | |
1007 | r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); | |
1008 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | |
1009 | } | |
bbf45ba5 HB |
1010 | } |
1011 | ||
1012 | return r; | |
1013 | } | |
1014 | ||
1015 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | |
1016 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1017 | { | |
082decf2 | 1018 | int i; |
af8f38b3 | 1019 | int r; |
082decf2 | 1020 | |
bbf45ba5 | 1021 | vcpu->arch.pc = 0; |
b5904972 | 1022 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
8e5b26b5 | 1023 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
d30f6e48 | 1024 | kvmppc_set_msr(vcpu, 0); |
bbf45ba5 | 1025 | |
d30f6e48 SW |
1026 | #ifndef CONFIG_KVM_BOOKE_HV |
1027 | vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; | |
49dd2c49 | 1028 | vcpu->arch.shadow_pid = 1; |
d30f6e48 SW |
1029 | vcpu->arch.shared->msr = 0; |
1030 | #endif | |
49dd2c49 | 1031 | |
082decf2 HB |
1032 | /* Eye-catching numbers so we know if the guest takes an interrupt |
1033 | * before it's programmed its own IVPR/IVORs. */ | |
bbf45ba5 | 1034 | vcpu->arch.ivpr = 0x55550000; |
082decf2 HB |
1035 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) |
1036 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | |
bbf45ba5 | 1037 | |
73e75b41 HB |
1038 | kvmppc_init_timing_stats(vcpu); |
1039 | ||
af8f38b3 AG |
1040 | r = kvmppc_core_vcpu_setup(vcpu); |
1041 | kvmppc_sanity_check(vcpu); | |
1042 | return r; | |
bbf45ba5 HB |
1043 | } |
1044 | ||
1045 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1046 | { | |
1047 | int i; | |
1048 | ||
1049 | regs->pc = vcpu->arch.pc; | |
992b5b29 | 1050 | regs->cr = kvmppc_get_cr(vcpu); |
bbf45ba5 HB |
1051 | regs->ctr = vcpu->arch.ctr; |
1052 | regs->lr = vcpu->arch.lr; | |
992b5b29 | 1053 | regs->xer = kvmppc_get_xer(vcpu); |
666e7252 | 1054 | regs->msr = vcpu->arch.shared->msr; |
de7906c3 AG |
1055 | regs->srr0 = vcpu->arch.shared->srr0; |
1056 | regs->srr1 = vcpu->arch.shared->srr1; | |
bbf45ba5 | 1057 | regs->pid = vcpu->arch.pid; |
a73a9599 AG |
1058 | regs->sprg0 = vcpu->arch.shared->sprg0; |
1059 | regs->sprg1 = vcpu->arch.shared->sprg1; | |
1060 | regs->sprg2 = vcpu->arch.shared->sprg2; | |
1061 | regs->sprg3 = vcpu->arch.shared->sprg3; | |
b5904972 SW |
1062 | regs->sprg4 = vcpu->arch.shared->sprg4; |
1063 | regs->sprg5 = vcpu->arch.shared->sprg5; | |
1064 | regs->sprg6 = vcpu->arch.shared->sprg6; | |
1065 | regs->sprg7 = vcpu->arch.shared->sprg7; | |
bbf45ba5 HB |
1066 | |
1067 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | |
8e5b26b5 | 1068 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
bbf45ba5 HB |
1069 | |
1070 | return 0; | |
1071 | } | |
1072 | ||
1073 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1074 | { | |
1075 | int i; | |
1076 | ||
1077 | vcpu->arch.pc = regs->pc; | |
992b5b29 | 1078 | kvmppc_set_cr(vcpu, regs->cr); |
bbf45ba5 HB |
1079 | vcpu->arch.ctr = regs->ctr; |
1080 | vcpu->arch.lr = regs->lr; | |
992b5b29 | 1081 | kvmppc_set_xer(vcpu, regs->xer); |
b8fd68ac | 1082 | kvmppc_set_msr(vcpu, regs->msr); |
de7906c3 AG |
1083 | vcpu->arch.shared->srr0 = regs->srr0; |
1084 | vcpu->arch.shared->srr1 = regs->srr1; | |
5ce941ee | 1085 | kvmppc_set_pid(vcpu, regs->pid); |
a73a9599 AG |
1086 | vcpu->arch.shared->sprg0 = regs->sprg0; |
1087 | vcpu->arch.shared->sprg1 = regs->sprg1; | |
1088 | vcpu->arch.shared->sprg2 = regs->sprg2; | |
1089 | vcpu->arch.shared->sprg3 = regs->sprg3; | |
b5904972 SW |
1090 | vcpu->arch.shared->sprg4 = regs->sprg4; |
1091 | vcpu->arch.shared->sprg5 = regs->sprg5; | |
1092 | vcpu->arch.shared->sprg6 = regs->sprg6; | |
1093 | vcpu->arch.shared->sprg7 = regs->sprg7; | |
bbf45ba5 | 1094 | |
8e5b26b5 AG |
1095 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1096 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | |
bbf45ba5 HB |
1097 | |
1098 | return 0; | |
1099 | } | |
1100 | ||
5ce941ee SW |
1101 | static void get_sregs_base(struct kvm_vcpu *vcpu, |
1102 | struct kvm_sregs *sregs) | |
1103 | { | |
1104 | u64 tb = get_tb(); | |
1105 | ||
1106 | sregs->u.e.features |= KVM_SREGS_E_BASE; | |
1107 | ||
1108 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | |
1109 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | |
1110 | sregs->u.e.mcsr = vcpu->arch.mcsr; | |
d30f6e48 SW |
1111 | sregs->u.e.esr = get_guest_esr(vcpu); |
1112 | sregs->u.e.dear = get_guest_dear(vcpu); | |
5ce941ee SW |
1113 | sregs->u.e.tsr = vcpu->arch.tsr; |
1114 | sregs->u.e.tcr = vcpu->arch.tcr; | |
1115 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | |
1116 | sregs->u.e.tb = tb; | |
1117 | sregs->u.e.vrsave = vcpu->arch.vrsave; | |
1118 | } | |
1119 | ||
1120 | static int set_sregs_base(struct kvm_vcpu *vcpu, | |
1121 | struct kvm_sregs *sregs) | |
1122 | { | |
1123 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) | |
1124 | return 0; | |
1125 | ||
1126 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | |
1127 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | |
1128 | vcpu->arch.mcsr = sregs->u.e.mcsr; | |
d30f6e48 SW |
1129 | set_guest_esr(vcpu, sregs->u.e.esr); |
1130 | set_guest_dear(vcpu, sregs->u.e.dear); | |
5ce941ee | 1131 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
dfd4d47e | 1132 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
5ce941ee | 1133 | |
dfd4d47e | 1134 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { |
5ce941ee | 1135 | vcpu->arch.dec = sregs->u.e.dec; |
dfd4d47e SW |
1136 | kvmppc_emulate_dec(vcpu); |
1137 | } | |
5ce941ee SW |
1138 | |
1139 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { | |
dfd4d47e SW |
1140 | vcpu->arch.tsr = sregs->u.e.tsr; |
1141 | update_timer_ints(vcpu); | |
5ce941ee SW |
1142 | } |
1143 | ||
1144 | return 0; | |
1145 | } | |
1146 | ||
1147 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, | |
1148 | struct kvm_sregs *sregs) | |
1149 | { | |
1150 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; | |
1151 | ||
841741f2 | 1152 | sregs->u.e.pir = vcpu->vcpu_id; |
5ce941ee SW |
1153 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; |
1154 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; | |
1155 | sregs->u.e.decar = vcpu->arch.decar; | |
1156 | sregs->u.e.ivpr = vcpu->arch.ivpr; | |
1157 | } | |
1158 | ||
1159 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, | |
1160 | struct kvm_sregs *sregs) | |
1161 | { | |
1162 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) | |
1163 | return 0; | |
1164 | ||
841741f2 | 1165 | if (sregs->u.e.pir != vcpu->vcpu_id) |
5ce941ee SW |
1166 | return -EINVAL; |
1167 | ||
1168 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; | |
1169 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; | |
1170 | vcpu->arch.decar = sregs->u.e.decar; | |
1171 | vcpu->arch.ivpr = sregs->u.e.ivpr; | |
1172 | ||
1173 | return 0; | |
1174 | } | |
1175 | ||
1176 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |
1177 | { | |
1178 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | |
1179 | ||
1180 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | |
1181 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | |
1182 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | |
1183 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | |
1184 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | |
1185 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | |
1186 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | |
1187 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | |
1188 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | |
1189 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | |
1190 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | |
1191 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | |
1192 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | |
1193 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | |
1194 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | |
1195 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | |
1196 | } | |
1197 | ||
1198 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |
1199 | { | |
1200 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | |
1201 | return 0; | |
1202 | ||
1203 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; | |
1204 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; | |
1205 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; | |
1206 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; | |
1207 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; | |
1208 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; | |
1209 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; | |
1210 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; | |
1211 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; | |
1212 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; | |
1213 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; | |
1214 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; | |
1215 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; | |
1216 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; | |
1217 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; | |
1218 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; | |
1219 | ||
1220 | return 0; | |
1221 | } | |
1222 | ||
bbf45ba5 HB |
1223 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1224 | struct kvm_sregs *sregs) | |
1225 | { | |
5ce941ee SW |
1226 | sregs->pvr = vcpu->arch.pvr; |
1227 | ||
1228 | get_sregs_base(vcpu, sregs); | |
1229 | get_sregs_arch206(vcpu, sregs); | |
1230 | kvmppc_core_get_sregs(vcpu, sregs); | |
1231 | return 0; | |
bbf45ba5 HB |
1232 | } |
1233 | ||
1234 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
1235 | struct kvm_sregs *sregs) | |
1236 | { | |
5ce941ee SW |
1237 | int ret; |
1238 | ||
1239 | if (vcpu->arch.pvr != sregs->pvr) | |
1240 | return -EINVAL; | |
1241 | ||
1242 | ret = set_sregs_base(vcpu, sregs); | |
1243 | if (ret < 0) | |
1244 | return ret; | |
1245 | ||
1246 | ret = set_sregs_arch206(vcpu, sregs); | |
1247 | if (ret < 0) | |
1248 | return ret; | |
1249 | ||
1250 | return kvmppc_core_set_sregs(vcpu, sregs); | |
bbf45ba5 HB |
1251 | } |
1252 | ||
31f3438e PM |
1253 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
1254 | { | |
1255 | return -EINVAL; | |
1256 | } | |
1257 | ||
1258 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1259 | { | |
1260 | return -EINVAL; | |
1261 | } | |
1262 | ||
bbf45ba5 HB |
1263 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1264 | { | |
1265 | return -ENOTSUPP; | |
1266 | } | |
1267 | ||
1268 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1269 | { | |
1270 | return -ENOTSUPP; | |
1271 | } | |
1272 | ||
bbf45ba5 HB |
1273 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1274 | struct kvm_translation *tr) | |
1275 | { | |
98001d8d AK |
1276 | int r; |
1277 | ||
98001d8d | 1278 | r = kvmppc_core_vcpu_translate(vcpu, tr); |
98001d8d | 1279 | return r; |
bbf45ba5 | 1280 | } |
d9fbd03d | 1281 | |
4e755758 AG |
1282 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
1283 | { | |
1284 | return -ENOTSUPP; | |
1285 | } | |
1286 | ||
f9e0554d PM |
1287 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1288 | struct kvm_userspace_memory_region *mem) | |
1289 | { | |
1290 | return 0; | |
1291 | } | |
1292 | ||
1293 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
1294 | struct kvm_userspace_memory_region *mem) | |
1295 | { | |
1296 | } | |
1297 | ||
dfd4d47e SW |
1298 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) |
1299 | { | |
1300 | vcpu->arch.tcr = new_tcr; | |
1301 | update_timer_ints(vcpu); | |
1302 | } | |
1303 | ||
1304 | void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | |
1305 | { | |
1306 | set_bits(tsr_bits, &vcpu->arch.tsr); | |
1307 | smp_wmb(); | |
1308 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | |
1309 | kvm_vcpu_kick(vcpu); | |
1310 | } | |
1311 | ||
1312 | void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | |
1313 | { | |
1314 | clear_bits(tsr_bits, &vcpu->arch.tsr); | |
1315 | update_timer_ints(vcpu); | |
1316 | } | |
1317 | ||
1318 | void kvmppc_decrementer_func(unsigned long data) | |
1319 | { | |
1320 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
1321 | ||
21bd000a BB |
1322 | if (vcpu->arch.tcr & TCR_ARE) { |
1323 | vcpu->arch.dec = vcpu->arch.decar; | |
1324 | kvmppc_emulate_dec(vcpu); | |
1325 | } | |
1326 | ||
dfd4d47e SW |
1327 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); |
1328 | } | |
1329 | ||
94fa9d99 SW |
1330 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
1331 | { | |
d30f6e48 | 1332 | current->thread.kvm_vcpu = vcpu; |
94fa9d99 SW |
1333 | } |
1334 | ||
1335 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | |
1336 | { | |
d30f6e48 | 1337 | current->thread.kvm_vcpu = NULL; |
94fa9d99 SW |
1338 | } |
1339 | ||
2986b8c7 | 1340 | int __init kvmppc_booke_init(void) |
d9fbd03d | 1341 | { |
d30f6e48 | 1342 | #ifndef CONFIG_KVM_BOOKE_HV |
d9fbd03d HB |
1343 | unsigned long ivor[16]; |
1344 | unsigned long max_ivor = 0; | |
1345 | int i; | |
1346 | ||
1347 | /* We install our own exception handlers by hijacking IVPR. IVPR must | |
1348 | * be 16-bit aligned, so we need a 64KB allocation. */ | |
1349 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
1350 | VCPU_SIZE_ORDER); | |
1351 | if (!kvmppc_booke_handlers) | |
1352 | return -ENOMEM; | |
1353 | ||
1354 | /* XXX make sure our handlers are smaller than Linux's */ | |
1355 | ||
1356 | /* Copy our interrupt handlers to match host IVORs. That way we don't | |
1357 | * have to swap the IVORs on every guest/host transition. */ | |
1358 | ivor[0] = mfspr(SPRN_IVOR0); | |
1359 | ivor[1] = mfspr(SPRN_IVOR1); | |
1360 | ivor[2] = mfspr(SPRN_IVOR2); | |
1361 | ivor[3] = mfspr(SPRN_IVOR3); | |
1362 | ivor[4] = mfspr(SPRN_IVOR4); | |
1363 | ivor[5] = mfspr(SPRN_IVOR5); | |
1364 | ivor[6] = mfspr(SPRN_IVOR6); | |
1365 | ivor[7] = mfspr(SPRN_IVOR7); | |
1366 | ivor[8] = mfspr(SPRN_IVOR8); | |
1367 | ivor[9] = mfspr(SPRN_IVOR9); | |
1368 | ivor[10] = mfspr(SPRN_IVOR10); | |
1369 | ivor[11] = mfspr(SPRN_IVOR11); | |
1370 | ivor[12] = mfspr(SPRN_IVOR12); | |
1371 | ivor[13] = mfspr(SPRN_IVOR13); | |
1372 | ivor[14] = mfspr(SPRN_IVOR14); | |
1373 | ivor[15] = mfspr(SPRN_IVOR15); | |
1374 | ||
1375 | for (i = 0; i < 16; i++) { | |
1376 | if (ivor[i] > max_ivor) | |
1377 | max_ivor = ivor[i]; | |
1378 | ||
1379 | memcpy((void *)kvmppc_booke_handlers + ivor[i], | |
1380 | kvmppc_handlers_start + i * kvmppc_handler_len, | |
1381 | kvmppc_handler_len); | |
1382 | } | |
1383 | flush_icache_range(kvmppc_booke_handlers, | |
1384 | kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); | |
d30f6e48 | 1385 | #endif /* !BOOKE_HV */ |
db93f574 | 1386 | return 0; |
d9fbd03d HB |
1387 | } |
1388 | ||
db93f574 | 1389 | void __exit kvmppc_booke_exit(void) |
d9fbd03d HB |
1390 | { |
1391 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | |
1392 | kvm_exit(); | |
1393 | } |