KVM: powerpc: Move vector to irqprio resolving to separate function
[linux-2.6-block.git] / arch / powerpc / kvm / booke.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/vmalloc.h>
26#include <linux/fs.h>
7924bd41 27
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
73e75b41 31#include "timing.h"
d9fbd03d 32#include <asm/cacheflush.h>
bbf45ba5 33
75f74f0d 34#include "booke.h"
bbf45ba5 35
d9fbd03d
HB
36unsigned long kvmppc_booke_handlers;
37
bbf45ba5
HB
38#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
bbf45ba5
HB
42 { "mmio", VCPU_STAT(mmio_exits) },
43 { "dcr", VCPU_STAT(dcr_exits) },
44 { "sig", VCPU_STAT(signal_exits) },
bbf45ba5
HB
45 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
46 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
47 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
48 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
49 { "sysc", VCPU_STAT(syscall_exits) },
50 { "isi", VCPU_STAT(isi_exits) },
51 { "dsi", VCPU_STAT(dsi_exits) },
52 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
53 { "dec", VCPU_STAT(dec_exits) },
54 { "ext_intr", VCPU_STAT(ext_intr_exits) },
45c5eb67 55 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
bbf45ba5
HB
56 { NULL }
57};
58
bbf45ba5
HB
59/* TODO: use vcpu_printf() */
60void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
61{
62 int i;
63
5cf8ca22
HB
64 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
65 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
66 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
bbf45ba5
HB
67
68 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
69
70 for (i = 0; i < 32; i += 4) {
5cf8ca22 71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
bbf45ba5
HB
72 vcpu->arch.gpr[i],
73 vcpu->arch.gpr[i+1],
74 vcpu->arch.gpr[i+2],
75 vcpu->arch.gpr[i+3]);
76 }
77}
78
d4cf3892
HB
79static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
80 unsigned int priority)
9dd921cf 81{
9dd921cf
HB
82 set_bit(priority, &vcpu->arch.pending_exceptions);
83}
84
9dd921cf
HB
85void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
86{
d4cf3892 87 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
9dd921cf
HB
88}
89
90void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
91{
d4cf3892 92 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
9dd921cf
HB
93}
94
95int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
96{
d4cf3892 97 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
9dd921cf
HB
98}
99
100void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
101 struct kvm_interrupt *irq)
102{
d4cf3892 103 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
9dd921cf
HB
104}
105
d4cf3892
HB
106/* Deliver the interrupt of the corresponding priority, if possible. */
107static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
108 unsigned int priority)
bbf45ba5 109{
d4cf3892
HB
110 int allowed = 0;
111 ulong msr_mask;
112
113 switch (priority) {
114 case BOOKE_IRQPRIO_PROGRAM:
115 case BOOKE_IRQPRIO_DTLB_MISS:
116 case BOOKE_IRQPRIO_ITLB_MISS:
117 case BOOKE_IRQPRIO_SYSCALL:
118 case BOOKE_IRQPRIO_DATA_STORAGE:
119 case BOOKE_IRQPRIO_INST_STORAGE:
120 case BOOKE_IRQPRIO_FP_UNAVAIL:
bb3a8a17
HB
121 case BOOKE_IRQPRIO_SPE_UNAVAIL:
122 case BOOKE_IRQPRIO_SPE_FP_DATA:
123 case BOOKE_IRQPRIO_SPE_FP_ROUND:
d4cf3892
HB
124 case BOOKE_IRQPRIO_AP_UNAVAIL:
125 case BOOKE_IRQPRIO_ALIGNMENT:
126 allowed = 1;
127 msr_mask = MSR_CE|MSR_ME|MSR_DE;
bbf45ba5 128 break;
d4cf3892
HB
129 case BOOKE_IRQPRIO_CRITICAL:
130 case BOOKE_IRQPRIO_WATCHDOG:
131 allowed = vcpu->arch.msr & MSR_CE;
132 msr_mask = MSR_ME;
bbf45ba5 133 break;
d4cf3892
HB
134 case BOOKE_IRQPRIO_MACHINE_CHECK:
135 allowed = vcpu->arch.msr & MSR_ME;
136 msr_mask = 0;
bbf45ba5 137 break;
d4cf3892
HB
138 case BOOKE_IRQPRIO_EXTERNAL:
139 case BOOKE_IRQPRIO_DECREMENTER:
140 case BOOKE_IRQPRIO_FIT:
141 allowed = vcpu->arch.msr & MSR_EE;
142 msr_mask = MSR_CE|MSR_ME|MSR_DE;
bbf45ba5 143 break;
d4cf3892
HB
144 case BOOKE_IRQPRIO_DEBUG:
145 allowed = vcpu->arch.msr & MSR_DE;
146 msr_mask = MSR_ME;
bbf45ba5 147 break;
bbf45ba5
HB
148 }
149
d4cf3892
HB
150 if (allowed) {
151 vcpu->arch.srr0 = vcpu->arch.pc;
152 vcpu->arch.srr1 = vcpu->arch.msr;
153 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
154 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
bbf45ba5 155
d4cf3892 156 clear_bit(priority, &vcpu->arch.pending_exceptions);
bbf45ba5
HB
157 }
158
d4cf3892 159 return allowed;
bbf45ba5
HB
160}
161
162/* Check pending exceptions and deliver one, if possible. */
9dd921cf 163void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
bbf45ba5
HB
164{
165 unsigned long *pending = &vcpu->arch.pending_exceptions;
bbf45ba5
HB
166 unsigned int priority;
167
9ab80843 168 priority = __ffs(*pending);
bdc89f13 169 while (priority <= BOOKE_IRQPRIO_MAX) {
d4cf3892 170 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
bbf45ba5 171 break;
bbf45ba5
HB
172
173 priority = find_next_bit(pending,
174 BITS_PER_BYTE * sizeof(*pending),
175 priority + 1);
176 }
177}
178
bbf45ba5
HB
179/**
180 * kvmppc_handle_exit
181 *
182 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
183 */
184int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
185 unsigned int exit_nr)
186{
187 enum emulation_result er;
188 int r = RESUME_HOST;
189
73e75b41
HB
190 /* update before a new last_exit_type is rewritten */
191 kvmppc_update_timing_stats(vcpu);
192
bbf45ba5
HB
193 local_irq_enable();
194
195 run->exit_reason = KVM_EXIT_UNKNOWN;
196 run->ready_for_interrupt_injection = 1;
197
198 switch (exit_nr) {
199 case BOOKE_INTERRUPT_MACHINE_CHECK:
200 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
201 kvmppc_dump_vcpu(vcpu);
202 r = RESUME_HOST;
203 break;
204
205 case BOOKE_INTERRUPT_EXTERNAL:
7b701591 206 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1b6766c7
HB
207 if (need_resched())
208 cond_resched();
209 r = RESUME_GUEST;
210 break;
211
bbf45ba5
HB
212 case BOOKE_INTERRUPT_DECREMENTER:
213 /* Since we switched IVPR back to the host's value, the host
214 * handled this interrupt the moment we enabled interrupts.
215 * Now we just offer it a chance to reschedule the guest. */
7b701591 216 kvmppc_account_exit(vcpu, DEC_EXITS);
bbf45ba5
HB
217 if (need_resched())
218 cond_resched();
bbf45ba5
HB
219 r = RESUME_GUEST;
220 break;
221
222 case BOOKE_INTERRUPT_PROGRAM:
223 if (vcpu->arch.msr & MSR_PR) {
224 /* Program traps generated by user-level software must be handled
225 * by the guest kernel. */
226 vcpu->arch.esr = vcpu->arch.fault_esr;
d4cf3892 227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
bbf45ba5 228 r = RESUME_GUEST;
7b701591 229 kvmppc_account_exit(vcpu, USR_PR_INST);
bbf45ba5
HB
230 break;
231 }
232
233 er = kvmppc_emulate_instruction(run, vcpu);
234 switch (er) {
235 case EMULATE_DONE:
73e75b41 236 /* don't overwrite subtypes, just account kvm_stats */
7b701591 237 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
bbf45ba5
HB
238 /* Future optimization: only reload non-volatiles if
239 * they were actually modified by emulation. */
bbf45ba5
HB
240 r = RESUME_GUEST_NV;
241 break;
242 case EMULATE_DO_DCR:
243 run->exit_reason = KVM_EXIT_DCR;
244 r = RESUME_HOST;
245 break;
246 case EMULATE_FAIL:
247 /* XXX Deliver Program interrupt to guest. */
5cf8ca22 248 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
bbf45ba5
HB
249 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
250 /* For debugging, encode the failing instruction and
251 * report it to userspace. */
252 run->hw.hardware_exit_reason = ~0ULL << 32;
253 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
254 r = RESUME_HOST;
255 break;
256 default:
257 BUG();
258 }
259 break;
260
de368dce 261 case BOOKE_INTERRUPT_FP_UNAVAIL:
d4cf3892 262 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
7b701591 263 kvmppc_account_exit(vcpu, FP_UNAVAIL);
de368dce
CE
264 r = RESUME_GUEST;
265 break;
266
bb3a8a17
HB
267 case BOOKE_INTERRUPT_SPE_UNAVAIL:
268 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
269 r = RESUME_GUEST;
270 break;
271
272 case BOOKE_INTERRUPT_SPE_FP_DATA:
273 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
274 r = RESUME_GUEST;
275 break;
276
277 case BOOKE_INTERRUPT_SPE_FP_ROUND:
278 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
279 r = RESUME_GUEST;
280 break;
281
bbf45ba5
HB
282 case BOOKE_INTERRUPT_DATA_STORAGE:
283 vcpu->arch.dear = vcpu->arch.fault_dear;
284 vcpu->arch.esr = vcpu->arch.fault_esr;
d4cf3892 285 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
7b701591 286 kvmppc_account_exit(vcpu, DSI_EXITS);
bbf45ba5
HB
287 r = RESUME_GUEST;
288 break;
289
290 case BOOKE_INTERRUPT_INST_STORAGE:
291 vcpu->arch.esr = vcpu->arch.fault_esr;
d4cf3892 292 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
7b701591 293 kvmppc_account_exit(vcpu, ISI_EXITS);
bbf45ba5
HB
294 r = RESUME_GUEST;
295 break;
296
297 case BOOKE_INTERRUPT_SYSCALL:
d4cf3892 298 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
7b701591 299 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
bbf45ba5
HB
300 r = RESUME_GUEST;
301 break;
302
303 case BOOKE_INTERRUPT_DTLB_MISS: {
bbf45ba5 304 unsigned long eaddr = vcpu->arch.fault_dear;
7924bd41 305 int gtlb_index;
475e7cdd 306 gpa_t gpaddr;
bbf45ba5
HB
307 gfn_t gfn;
308
309 /* Check the guest TLB. */
fa86b8dd 310 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
7924bd41 311 if (gtlb_index < 0) {
bbf45ba5 312 /* The guest didn't have a mapping for it. */
d4cf3892 313 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
bbf45ba5
HB
314 vcpu->arch.dear = vcpu->arch.fault_dear;
315 vcpu->arch.esr = vcpu->arch.fault_esr;
b52a638c 316 kvmppc_mmu_dtlb_miss(vcpu);
7b701591 317 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
bbf45ba5
HB
318 r = RESUME_GUEST;
319 break;
320 }
321
be8d1cae 322 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
475e7cdd 323 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
324
325 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
326 /* The guest TLB had a mapping, but the shadow TLB
327 * didn't, and it is RAM. This could be because:
328 * a) the entry is mapping the host kernel, or
329 * b) the guest used a large mapping which we're faking
330 * Either way, we need to satisfy the fault without
331 * invoking the guest. */
58a96214 332 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
7b701591 333 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
bbf45ba5
HB
334 r = RESUME_GUEST;
335 } else {
336 /* Guest has mapped and accessed a page which is not
337 * actually RAM. */
475e7cdd 338 vcpu->arch.paddr_accessed = gpaddr;
bbf45ba5 339 r = kvmppc_emulate_mmio(run, vcpu);
7b701591 340 kvmppc_account_exit(vcpu, MMIO_EXITS);
bbf45ba5
HB
341 }
342
343 break;
344 }
345
346 case BOOKE_INTERRUPT_ITLB_MISS: {
bbf45ba5 347 unsigned long eaddr = vcpu->arch.pc;
89168618 348 gpa_t gpaddr;
bbf45ba5 349 gfn_t gfn;
7924bd41 350 int gtlb_index;
bbf45ba5
HB
351
352 r = RESUME_GUEST;
353
354 /* Check the guest TLB. */
fa86b8dd 355 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
7924bd41 356 if (gtlb_index < 0) {
bbf45ba5 357 /* The guest didn't have a mapping for it. */
d4cf3892 358 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
b52a638c 359 kvmppc_mmu_itlb_miss(vcpu);
7b701591 360 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
bbf45ba5
HB
361 break;
362 }
363
7b701591 364 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
bbf45ba5 365
be8d1cae 366 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
89168618 367 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
368
369 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
370 /* The guest TLB had a mapping, but the shadow TLB
371 * didn't. This could be because:
372 * a) the entry is mapping the host kernel, or
373 * b) the guest used a large mapping which we're faking
374 * Either way, we need to satisfy the fault without
375 * invoking the guest. */
58a96214 376 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
bbf45ba5
HB
377 } else {
378 /* Guest mapped and leaped at non-RAM! */
d4cf3892 379 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
bbf45ba5
HB
380 }
381
382 break;
383 }
384
6a0ab738
HB
385 case BOOKE_INTERRUPT_DEBUG: {
386 u32 dbsr;
387
388 vcpu->arch.pc = mfspr(SPRN_CSRR0);
389
390 /* clear IAC events in DBSR register */
391 dbsr = mfspr(SPRN_DBSR);
392 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
393 mtspr(SPRN_DBSR, dbsr);
394
395 run->exit_reason = KVM_EXIT_DEBUG;
7b701591 396 kvmppc_account_exit(vcpu, DEBUG_EXITS);
6a0ab738
HB
397 r = RESUME_HOST;
398 break;
399 }
400
bbf45ba5
HB
401 default:
402 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
403 BUG();
404 }
405
406 local_irq_disable();
407
9dd921cf 408 kvmppc_core_deliver_interrupts(vcpu);
bbf45ba5 409
bbf45ba5
HB
410 if (!(r & RESUME_HOST)) {
411 /* To avoid clobbering exit_reason, only check for signals if
412 * we aren't already exiting to userspace for some other
413 * reason. */
414 if (signal_pending(current)) {
415 run->exit_reason = KVM_EXIT_INTR;
416 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
7b701591 417 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
bbf45ba5
HB
418 }
419 }
420
421 return r;
422}
423
424/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
425int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
426{
bbf45ba5
HB
427 vcpu->arch.pc = 0;
428 vcpu->arch.msr = 0;
429 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
430
49dd2c49
HB
431 vcpu->arch.shadow_pid = 1;
432
bbf45ba5
HB
433 /* Eye-catching number so we know if the guest takes an interrupt
434 * before it's programmed its own IVPR. */
435 vcpu->arch.ivpr = 0x55550000;
436
73e75b41
HB
437 kvmppc_init_timing_stats(vcpu);
438
5cbb5106 439 return kvmppc_core_vcpu_setup(vcpu);
bbf45ba5
HB
440}
441
442int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
443{
444 int i;
445
446 regs->pc = vcpu->arch.pc;
447 regs->cr = vcpu->arch.cr;
448 regs->ctr = vcpu->arch.ctr;
449 regs->lr = vcpu->arch.lr;
450 regs->xer = vcpu->arch.xer;
451 regs->msr = vcpu->arch.msr;
452 regs->srr0 = vcpu->arch.srr0;
453 regs->srr1 = vcpu->arch.srr1;
454 regs->pid = vcpu->arch.pid;
455 regs->sprg0 = vcpu->arch.sprg0;
456 regs->sprg1 = vcpu->arch.sprg1;
457 regs->sprg2 = vcpu->arch.sprg2;
458 regs->sprg3 = vcpu->arch.sprg3;
459 regs->sprg5 = vcpu->arch.sprg4;
460 regs->sprg6 = vcpu->arch.sprg5;
461 regs->sprg7 = vcpu->arch.sprg6;
462
463 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
464 regs->gpr[i] = vcpu->arch.gpr[i];
465
466 return 0;
467}
468
469int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
470{
471 int i;
472
473 vcpu->arch.pc = regs->pc;
474 vcpu->arch.cr = regs->cr;
475 vcpu->arch.ctr = regs->ctr;
476 vcpu->arch.lr = regs->lr;
477 vcpu->arch.xer = regs->xer;
b8fd68ac 478 kvmppc_set_msr(vcpu, regs->msr);
bbf45ba5
HB
479 vcpu->arch.srr0 = regs->srr0;
480 vcpu->arch.srr1 = regs->srr1;
481 vcpu->arch.sprg0 = regs->sprg0;
482 vcpu->arch.sprg1 = regs->sprg1;
483 vcpu->arch.sprg2 = regs->sprg2;
484 vcpu->arch.sprg3 = regs->sprg3;
485 vcpu->arch.sprg5 = regs->sprg4;
486 vcpu->arch.sprg6 = regs->sprg5;
487 vcpu->arch.sprg7 = regs->sprg6;
488
489 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
490 vcpu->arch.gpr[i] = regs->gpr[i];
491
492 return 0;
493}
494
495int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
496 struct kvm_sregs *sregs)
497{
498 return -ENOTSUPP;
499}
500
501int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
502 struct kvm_sregs *sregs)
503{
504 return -ENOTSUPP;
505}
506
507int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
508{
509 return -ENOTSUPP;
510}
511
512int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
513{
514 return -ENOTSUPP;
515}
516
bbf45ba5
HB
517int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
518 struct kvm_translation *tr)
519{
5cbb5106 520 return kvmppc_core_vcpu_translate(vcpu, tr);
bbf45ba5 521}
d9fbd03d 522
4e755758
AG
523int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
524{
525 return -ENOTSUPP;
526}
527
2986b8c7 528int __init kvmppc_booke_init(void)
d9fbd03d
HB
529{
530 unsigned long ivor[16];
531 unsigned long max_ivor = 0;
532 int i;
533
534 /* We install our own exception handlers by hijacking IVPR. IVPR must
535 * be 16-bit aligned, so we need a 64KB allocation. */
536 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
537 VCPU_SIZE_ORDER);
538 if (!kvmppc_booke_handlers)
539 return -ENOMEM;
540
541 /* XXX make sure our handlers are smaller than Linux's */
542
543 /* Copy our interrupt handlers to match host IVORs. That way we don't
544 * have to swap the IVORs on every guest/host transition. */
545 ivor[0] = mfspr(SPRN_IVOR0);
546 ivor[1] = mfspr(SPRN_IVOR1);
547 ivor[2] = mfspr(SPRN_IVOR2);
548 ivor[3] = mfspr(SPRN_IVOR3);
549 ivor[4] = mfspr(SPRN_IVOR4);
550 ivor[5] = mfspr(SPRN_IVOR5);
551 ivor[6] = mfspr(SPRN_IVOR6);
552 ivor[7] = mfspr(SPRN_IVOR7);
553 ivor[8] = mfspr(SPRN_IVOR8);
554 ivor[9] = mfspr(SPRN_IVOR9);
555 ivor[10] = mfspr(SPRN_IVOR10);
556 ivor[11] = mfspr(SPRN_IVOR11);
557 ivor[12] = mfspr(SPRN_IVOR12);
558 ivor[13] = mfspr(SPRN_IVOR13);
559 ivor[14] = mfspr(SPRN_IVOR14);
560 ivor[15] = mfspr(SPRN_IVOR15);
561
562 for (i = 0; i < 16; i++) {
563 if (ivor[i] > max_ivor)
564 max_ivor = ivor[i];
565
566 memcpy((void *)kvmppc_booke_handlers + ivor[i],
567 kvmppc_handlers_start + i * kvmppc_handler_len,
568 kvmppc_handler_len);
569 }
570 flush_icache_range(kvmppc_booke_handlers,
571 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
572
db93f574 573 return 0;
d9fbd03d
HB
574}
575
db93f574 576void __exit kvmppc_booke_exit(void)
d9fbd03d
HB
577{
578 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
579 kvm_exit();
580}