powerpc/e500: split CPU_FTRS_ALWAYS/CPU_FTRS_POSSIBLE
[linux-2.6-block.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
371fefd6 32#include <asm/cputhreads.h>
73e75b41 33#include "timing.h"
fad7b9b5 34#include "../mm/mmu_decl.h"
bbf45ba5 35
46f43c6e
MT
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
bbf45ba5
HB
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
666e7252 41 return !(v->arch.shared->msr & MSR_WE) ||
dfd4d47e
SW
42 !!(v->arch.pending_exceptions) ||
43 v->requests;
bbf45ba5
HB
44}
45
b6d33834
CD
46int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
47{
48 return 1;
49}
50
2a342ed5
AG
51int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
52{
53 int nr = kvmppc_get_gpr(vcpu, 11);
54 int r;
55 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
56 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
57 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
58 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
59 unsigned long r2 = 0;
60
61 if (!(vcpu->arch.shared->msr & MSR_SF)) {
62 /* 32 bit mode */
63 param1 &= 0xffffffff;
64 param2 &= 0xffffffff;
65 param3 &= 0xffffffff;
66 param4 &= 0xffffffff;
67 }
68
69 switch (nr) {
5fc87407
AG
70 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
71 {
72 vcpu->arch.magic_page_pa = param1;
73 vcpu->arch.magic_page_ea = param2;
74
b5904972 75 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 76
5fc87407
AG
77 r = HC_EV_SUCCESS;
78 break;
79 }
2a342ed5
AG
80 case HC_VENDOR_KVM | KVM_HC_FEATURES:
81 r = HC_EV_SUCCESS;
a4cd8b23
SW
82#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
83 /* XXX Missing magic page on 44x */
5fc87407
AG
84 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
85#endif
2a342ed5
AG
86
87 /* Second return value is in r4 */
2a342ed5
AG
88 break;
89 default:
90 r = HC_EV_UNIMPLEMENTED;
91 break;
92 }
93
7508e16c
AG
94 kvmppc_set_gpr(vcpu, 4, r2);
95
2a342ed5
AG
96 return r;
97}
bbf45ba5 98
af8f38b3
AG
99int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
100{
101 int r = false;
102
103 /* We have to know what CPU to virtualize */
104 if (!vcpu->arch.pvr)
105 goto out;
106
107 /* PAPR only works with book3s_64 */
108 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
109 goto out;
110
111#ifdef CONFIG_KVM_BOOK3S_64_HV
112 /* HV KVM can only do PAPR mode for now */
113 if (!vcpu->arch.papr_enabled)
114 goto out;
115#endif
116
117 r = true;
118
119out:
120 vcpu->arch.sane = r;
121 return r ? 0 : -EINVAL;
122}
123
bbf45ba5
HB
124int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
125{
126 enum emulation_result er;
127 int r;
128
129 er = kvmppc_emulate_instruction(run, vcpu);
130 switch (er) {
131 case EMULATE_DONE:
132 /* Future optimization: only reload non-volatiles if they were
133 * actually modified. */
134 r = RESUME_GUEST_NV;
135 break;
136 case EMULATE_DO_MMIO:
137 run->exit_reason = KVM_EXIT_MMIO;
138 /* We must reload nonvolatiles because "update" load/store
139 * instructions modify register state. */
140 /* Future optimization: only reload non-volatiles if they were
141 * actually modified. */
142 r = RESUME_HOST_NV;
143 break;
144 case EMULATE_FAIL:
145 /* XXX Deliver Program interrupt to guest. */
146 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 147 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
148 r = RESUME_HOST;
149 break;
150 default:
151 BUG();
152 }
153
154 return r;
155}
156
10474ae8 157int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 158{
10474ae8 159 return 0;
bbf45ba5
HB
160}
161
162void kvm_arch_hardware_disable(void *garbage)
163{
164}
165
166int kvm_arch_hardware_setup(void)
167{
168 return 0;
169}
170
171void kvm_arch_hardware_unsetup(void)
172{
173}
174
175void kvm_arch_check_processor_compat(void *rtn)
176{
9dd921cf 177 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
178}
179
e08b9637 180int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 181{
e08b9637
CO
182 if (type)
183 return -EINVAL;
184
f9e0554d 185 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
186}
187
d89f5eff 188void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
189{
190 unsigned int i;
988a2cae 191 struct kvm_vcpu *vcpu;
bbf45ba5 192
988a2cae
GN
193 kvm_for_each_vcpu(i, vcpu, kvm)
194 kvm_arch_vcpu_free(vcpu);
195
196 mutex_lock(&kvm->lock);
197 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
198 kvm->vcpus[i] = NULL;
199
200 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
201
202 kvmppc_core_destroy_vm(kvm);
203
988a2cae 204 mutex_unlock(&kvm->lock);
bbf45ba5
HB
205}
206
ad8ba2cd
SY
207void kvm_arch_sync_events(struct kvm *kvm)
208{
209}
210
bbf45ba5
HB
211int kvm_dev_ioctl_check_extension(long ext)
212{
213 int r;
214
215 switch (ext) {
5ce941ee
SW
216#ifdef CONFIG_BOOKE
217 case KVM_CAP_PPC_BOOKE_SREGS:
218#else
e15a1137 219 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 220 case KVM_CAP_PPC_HIOR:
930b412a 221 case KVM_CAP_PPC_PAPR:
5ce941ee 222#endif
18978768 223 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 224 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 225 case KVM_CAP_ENABLE_CAP:
e24ed81f 226 case KVM_CAP_ONE_REG:
de56a948
PM
227 r = 1;
228 break;
229#ifndef CONFIG_KVM_BOOK3S_64_HV
230 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 231 case KVM_CAP_PPC_OSI:
15711e9c 232 case KVM_CAP_PPC_GET_PVINFO:
dc83b8bc
SW
233#ifdef CONFIG_KVM_E500
234 case KVM_CAP_SW_TLB:
235#endif
e15a1137
AG
236 r = 1;
237 break;
588968b6
LV
238 case KVM_CAP_COALESCED_MMIO:
239 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
240 break;
54738c09
DG
241#endif
242#ifdef CONFIG_KVM_BOOK3S_64_HV
243 case KVM_CAP_SPAPR_TCE:
244 r = 1;
245 break;
371fefd6
PM
246 case KVM_CAP_PPC_SMT:
247 r = threads_per_core;
248 break;
aa04b4cc
PM
249 case KVM_CAP_PPC_RMA:
250 r = 1;
9e368f29
PM
251 /* PPC970 requires an RMA */
252 if (cpu_has_feature(CPU_FTR_ARCH_201))
253 r = 2;
aa04b4cc 254 break;
342d3db7
PM
255 case KVM_CAP_SYNC_MMU:
256 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
257 break;
de56a948 258#endif
b5434032
ME
259 case KVM_CAP_NR_VCPUS:
260 /*
261 * Recommending a number of CPUs is somewhat arbitrary; we
262 * return the number of present CPUs for -HV (since a host
263 * will have secondary threads "offline"), and for other KVM
264 * implementations just count online CPUs.
265 */
266#ifdef CONFIG_KVM_BOOK3S_64_HV
267 r = num_present_cpus();
268#else
269 r = num_online_cpus();
270#endif
271 break;
272 case KVM_CAP_MAX_VCPUS:
273 r = KVM_MAX_VCPUS;
274 break;
bbf45ba5
HB
275 default:
276 r = 0;
277 break;
278 }
279 return r;
280
281}
282
283long kvm_arch_dev_ioctl(struct file *filp,
284 unsigned int ioctl, unsigned long arg)
285{
286 return -EINVAL;
287}
288
db3fe4eb
TY
289void kvm_arch_free_memslot(struct kvm_memory_slot *free,
290 struct kvm_memory_slot *dont)
291{
292}
293
294int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
295{
296 return 0;
297}
298
f7784b8e
MT
299int kvm_arch_prepare_memory_region(struct kvm *kvm,
300 struct kvm_memory_slot *memslot,
301 struct kvm_memory_slot old,
302 struct kvm_userspace_memory_region *mem,
303 int user_alloc)
bbf45ba5 304{
f9e0554d 305 return kvmppc_core_prepare_memory_region(kvm, mem);
bbf45ba5
HB
306}
307
f7784b8e
MT
308void kvm_arch_commit_memory_region(struct kvm *kvm,
309 struct kvm_userspace_memory_region *mem,
310 struct kvm_memory_slot old,
311 int user_alloc)
312{
f9e0554d 313 kvmppc_core_commit_memory_region(kvm, mem);
f7784b8e
MT
314}
315
316
34d4cb8f
MT
317void kvm_arch_flush_shadow(struct kvm *kvm)
318{
319}
320
bbf45ba5
HB
321struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
322{
73e75b41
HB
323 struct kvm_vcpu *vcpu;
324 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
325 if (!IS_ERR(vcpu)) {
326 vcpu->arch.wqp = &vcpu->wq;
06056bfb 327 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 328 }
73e75b41 329 return vcpu;
bbf45ba5
HB
330}
331
332void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
333{
a595405d
AG
334 /* Make sure we're not using the vcpu anymore */
335 hrtimer_cancel(&vcpu->arch.dec_timer);
336 tasklet_kill(&vcpu->arch.tasklet);
337
73e75b41 338 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 339 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
340}
341
342void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
343{
344 kvm_arch_vcpu_free(vcpu);
345}
346
347int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
348{
9dd921cf 349 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
350}
351
544c6761
AG
352/*
353 * low level hrtimer wake routine. Because this runs in hardirq context
354 * we schedule a tasklet to do the real work.
355 */
356enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
357{
358 struct kvm_vcpu *vcpu;
359
360 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
361 tasklet_schedule(&vcpu->arch.tasklet);
362
363 return HRTIMER_NORESTART;
364}
365
bbf45ba5
HB
366int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
367{
544c6761
AG
368 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
369 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
370 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 371 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 372
09000adb
BB
373#ifdef CONFIG_KVM_EXIT_TIMING
374 mutex_init(&vcpu->arch.exit_timing_lock);
375#endif
376
bbf45ba5
HB
377 return 0;
378}
379
380void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
381{
ecc0981f 382 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
383}
384
385void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
386{
eab17672
SW
387#ifdef CONFIG_BOOKE
388 /*
389 * vrsave (formerly usprg0) isn't used by Linux, but may
390 * be used by the guest.
391 *
392 * On non-booke this is associated with Altivec and
393 * is handled by code in book3s.c.
394 */
395 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
396#endif
9dd921cf 397 kvmppc_core_vcpu_load(vcpu, cpu);
de56a948 398 vcpu->cpu = smp_processor_id();
bbf45ba5
HB
399}
400
401void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
402{
9dd921cf 403 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
404#ifdef CONFIG_BOOKE
405 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
406#endif
de56a948 407 vcpu->cpu = -1;
bbf45ba5
HB
408}
409
d0bfb940 410int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 411 struct kvm_guest_debug *dbg)
bbf45ba5 412{
f5d0906b 413 return -EINVAL;
bbf45ba5
HB
414}
415
416static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
417 struct kvm_run *run)
418{
8e5b26b5 419 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
420}
421
422static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
423 struct kvm_run *run)
424{
69b61833 425 u64 uninitialized_var(gpr);
bbf45ba5 426
8e5b26b5 427 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
428 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
429 return;
430 }
431
432 if (vcpu->arch.mmio_is_bigendian) {
433 switch (run->mmio.len) {
b104d066 434 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
435 case 4: gpr = *(u32 *)run->mmio.data; break;
436 case 2: gpr = *(u16 *)run->mmio.data; break;
437 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
438 }
439 } else {
440 /* Convert BE data from userland back to LE. */
441 switch (run->mmio.len) {
8e5b26b5
AG
442 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
443 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
444 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
445 }
446 }
8e5b26b5 447
3587d534
AG
448 if (vcpu->arch.mmio_sign_extend) {
449 switch (run->mmio.len) {
450#ifdef CONFIG_PPC64
451 case 4:
452 gpr = (s64)(s32)gpr;
453 break;
454#endif
455 case 2:
456 gpr = (s64)(s16)gpr;
457 break;
458 case 1:
459 gpr = (s64)(s8)gpr;
460 break;
461 }
462 }
463
8e5b26b5 464 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 465
b3c5d3c2
AG
466 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
467 case KVM_MMIO_REG_GPR:
b104d066
AG
468 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
469 break;
b3c5d3c2
AG
470 case KVM_MMIO_REG_FPR:
471 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 472 break;
287d5611 473#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
474 case KVM_MMIO_REG_QPR:
475 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 476 break;
b3c5d3c2
AG
477 case KVM_MMIO_REG_FQPR:
478 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
479 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 480 break;
287d5611 481#endif
b104d066
AG
482 default:
483 BUG();
484 }
bbf45ba5
HB
485}
486
487int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
488 unsigned int rt, unsigned int bytes, int is_bigendian)
489{
490 if (bytes > sizeof(run->mmio.data)) {
491 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
492 run->mmio.len);
493 }
494
495 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
496 run->mmio.len = bytes;
497 run->mmio.is_write = 0;
498
499 vcpu->arch.io_gpr = rt;
500 vcpu->arch.mmio_is_bigendian = is_bigendian;
501 vcpu->mmio_needed = 1;
502 vcpu->mmio_is_write = 0;
3587d534 503 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5
HB
504
505 return EMULATE_DO_MMIO;
506}
507
3587d534
AG
508/* Same as above, but sign extends */
509int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
510 unsigned int rt, unsigned int bytes, int is_bigendian)
511{
512 int r;
513
514 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
515 vcpu->arch.mmio_sign_extend = 1;
516
517 return r;
518}
519
bbf45ba5 520int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 521 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
522{
523 void *data = run->mmio.data;
524
525 if (bytes > sizeof(run->mmio.data)) {
526 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
527 run->mmio.len);
528 }
529
530 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
531 run->mmio.len = bytes;
532 run->mmio.is_write = 1;
533 vcpu->mmio_needed = 1;
534 vcpu->mmio_is_write = 1;
535
536 /* Store the value at the lowest bytes in 'data'. */
537 if (is_bigendian) {
538 switch (bytes) {
b104d066 539 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
540 case 4: *(u32 *)data = val; break;
541 case 2: *(u16 *)data = val; break;
542 case 1: *(u8 *)data = val; break;
543 }
544 } else {
545 /* Store LE value into 'data'. */
546 switch (bytes) {
547 case 4: st_le32(data, val); break;
548 case 2: st_le16(data, val); break;
549 case 1: *(u8 *)data = val; break;
550 }
551 }
552
553 return EMULATE_DO_MMIO;
554}
555
556int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
557{
558 int r;
559 sigset_t sigsaved;
560
561 if (vcpu->sigset_active)
562 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
563
564 if (vcpu->mmio_needed) {
565 if (!vcpu->mmio_is_write)
566 kvmppc_complete_mmio_load(vcpu, run);
567 vcpu->mmio_needed = 0;
568 } else if (vcpu->arch.dcr_needed) {
569 if (!vcpu->arch.dcr_is_write)
570 kvmppc_complete_dcr_load(vcpu, run);
571 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
572 } else if (vcpu->arch.osi_needed) {
573 u64 *gprs = run->osi.gprs;
574 int i;
575
576 for (i = 0; i < 32; i++)
577 kvmppc_set_gpr(vcpu, i, gprs[i]);
578 vcpu->arch.osi_needed = 0;
de56a948
PM
579 } else if (vcpu->arch.hcall_needed) {
580 int i;
581
582 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
583 for (i = 0; i < 9; ++i)
584 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
585 vcpu->arch.hcall_needed = 0;
bbf45ba5
HB
586 }
587
df6909e5 588 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
589
590 if (vcpu->sigset_active)
591 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
592
593 return r;
594}
595
596int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
597{
19ccb76a 598 if (irq->irq == KVM_INTERRUPT_UNSET) {
18978768 599 kvmppc_core_dequeue_external(vcpu, irq);
19ccb76a
PM
600 return 0;
601 }
602
603 kvmppc_core_queue_external(vcpu, irq);
b6d33834 604
dfd4d47e 605 kvm_vcpu_kick(vcpu);
45c5eb67 606
bbf45ba5
HB
607 return 0;
608}
609
71fbfd5f
AG
610static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
611 struct kvm_enable_cap *cap)
612{
613 int r;
614
615 if (cap->flags)
616 return -EINVAL;
617
618 switch (cap->cap) {
ad0a048b
AG
619 case KVM_CAP_PPC_OSI:
620 r = 0;
621 vcpu->arch.osi_enabled = true;
622 break;
930b412a
AG
623 case KVM_CAP_PPC_PAPR:
624 r = 0;
625 vcpu->arch.papr_enabled = true;
626 break;
dc83b8bc
SW
627#ifdef CONFIG_KVM_E500
628 case KVM_CAP_SW_TLB: {
629 struct kvm_config_tlb cfg;
630 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
631
632 r = -EFAULT;
633 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
634 break;
635
636 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
637 break;
638 }
639#endif
71fbfd5f
AG
640 default:
641 r = -EINVAL;
642 break;
643 }
644
af8f38b3
AG
645 if (!r)
646 r = kvmppc_sanity_check(vcpu);
647
71fbfd5f
AG
648 return r;
649}
650
bbf45ba5
HB
651int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
652 struct kvm_mp_state *mp_state)
653{
654 return -EINVAL;
655}
656
657int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
658 struct kvm_mp_state *mp_state)
659{
660 return -EINVAL;
661}
662
663long kvm_arch_vcpu_ioctl(struct file *filp,
664 unsigned int ioctl, unsigned long arg)
665{
666 struct kvm_vcpu *vcpu = filp->private_data;
667 void __user *argp = (void __user *)arg;
668 long r;
669
93736624
AK
670 switch (ioctl) {
671 case KVM_INTERRUPT: {
bbf45ba5
HB
672 struct kvm_interrupt irq;
673 r = -EFAULT;
674 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 675 goto out;
bbf45ba5 676 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 677 goto out;
bbf45ba5 678 }
19483d14 679
71fbfd5f
AG
680 case KVM_ENABLE_CAP:
681 {
682 struct kvm_enable_cap cap;
683 r = -EFAULT;
684 if (copy_from_user(&cap, argp, sizeof(cap)))
685 goto out;
686 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
687 break;
688 }
dc83b8bc 689
e24ed81f
AG
690 case KVM_SET_ONE_REG:
691 case KVM_GET_ONE_REG:
692 {
693 struct kvm_one_reg reg;
694 r = -EFAULT;
695 if (copy_from_user(&reg, argp, sizeof(reg)))
696 goto out;
697 if (ioctl == KVM_SET_ONE_REG)
698 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
699 else
700 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
701 break;
702 }
703
dc83b8bc
SW
704#ifdef CONFIG_KVM_E500
705 case KVM_DIRTY_TLB: {
706 struct kvm_dirty_tlb dirty;
707 r = -EFAULT;
708 if (copy_from_user(&dirty, argp, sizeof(dirty)))
709 goto out;
710 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
711 break;
712 }
713#endif
714
bbf45ba5
HB
715 default:
716 r = -EINVAL;
717 }
718
719out:
720 return r;
721}
722
5b1c1493
CO
723int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
724{
725 return VM_FAULT_SIGBUS;
726}
727
15711e9c
AG
728static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
729{
730 u32 inst_lis = 0x3c000000;
731 u32 inst_ori = 0x60000000;
732 u32 inst_nop = 0x60000000;
733 u32 inst_sc = 0x44000002;
734 u32 inst_imm_mask = 0xffff;
735
736 /*
737 * The hypercall to get into KVM from within guest context is as
738 * follows:
739 *
740 * lis r0, r0, KVM_SC_MAGIC_R0@h
741 * ori r0, KVM_SC_MAGIC_R0@l
742 * sc
743 * nop
744 */
745 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
746 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
747 pvinfo->hcall[2] = inst_sc;
748 pvinfo->hcall[3] = inst_nop;
749
750 return 0;
751}
752
bbf45ba5
HB
753long kvm_arch_vm_ioctl(struct file *filp,
754 unsigned int ioctl, unsigned long arg)
755{
15711e9c 756 void __user *argp = (void __user *)arg;
bbf45ba5
HB
757 long r;
758
759 switch (ioctl) {
15711e9c
AG
760 case KVM_PPC_GET_PVINFO: {
761 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 762 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
763 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
764 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
765 r = -EFAULT;
766 goto out;
767 }
768
769 break;
770 }
54738c09
DG
771#ifdef CONFIG_KVM_BOOK3S_64_HV
772 case KVM_CREATE_SPAPR_TCE: {
773 struct kvm_create_spapr_tce create_tce;
774 struct kvm *kvm = filp->private_data;
775
776 r = -EFAULT;
777 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
778 goto out;
779 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
780 goto out;
781 }
aa04b4cc
PM
782
783 case KVM_ALLOCATE_RMA: {
784 struct kvm *kvm = filp->private_data;
785 struct kvm_allocate_rma rma;
786
787 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
788 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
789 r = -EFAULT;
790 break;
791 }
54738c09
DG
792#endif /* CONFIG_KVM_BOOK3S_64_HV */
793
bbf45ba5 794 default:
367e1319 795 r = -ENOTTY;
bbf45ba5
HB
796 }
797
15711e9c 798out:
bbf45ba5
HB
799 return r;
800}
801
802int kvm_arch_init(void *opaque)
803{
804 return 0;
805}
806
807void kvm_arch_exit(void)
808{
809}