757491dd6b7bf7cff2bb8ed8e36bbbd230c594f1
[linux-block.git] / arch / powerpc / kvm / powerpc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright IBM Corp. 2007
5  *
6  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8  */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <linux/of.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
30 #include <asm/xive.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
34 #endif
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
37
38 #include "timing.h"
39 #include "irq.h"
40 #include "../mm/mmu_decl.h"
41
42 #define CREATE_TRACE_POINTS
43 #include "trace.h"
44
45 struct kvmppc_ops *kvmppc_hv_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
47 struct kvmppc_ops *kvmppc_pr_ops;
48 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
49
50
51 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
52 {
53         return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
54 }
55
56 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
57 {
58         return kvm_arch_vcpu_runnable(vcpu);
59 }
60
61 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
62 {
63         return false;
64 }
65
66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
67 {
68         return 1;
69 }
70
71 /*
72  * Common checks before entering the guest world.  Call with interrupts
73  * disabled.
74  *
75  * returns:
76  *
77  * == 1 if we're ready to go into guest state
78  * <= 0 if we need to go back to the host with return value
79  */
80 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
81 {
82         int r;
83
84         WARN_ON(irqs_disabled());
85         hard_irq_disable();
86
87         while (true) {
88                 if (need_resched()) {
89                         local_irq_enable();
90                         cond_resched();
91                         hard_irq_disable();
92                         continue;
93                 }
94
95                 if (signal_pending(current)) {
96                         kvmppc_account_exit(vcpu, SIGNAL_EXITS);
97                         vcpu->run->exit_reason = KVM_EXIT_INTR;
98                         r = -EINTR;
99                         break;
100                 }
101
102                 vcpu->mode = IN_GUEST_MODE;
103
104                 /*
105                  * Reading vcpu->requests must happen after setting vcpu->mode,
106                  * so we don't miss a request because the requester sees
107                  * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
108                  * before next entering the guest (and thus doesn't IPI).
109                  * This also orders the write to mode from any reads
110                  * to the page tables done while the VCPU is running.
111                  * Please see the comment in kvm_flush_remote_tlbs.
112                  */
113                 smp_mb();
114
115                 if (kvm_request_pending(vcpu)) {
116                         /* Make sure we process requests preemptable */
117                         local_irq_enable();
118                         trace_kvm_check_requests(vcpu);
119                         r = kvmppc_core_check_requests(vcpu);
120                         hard_irq_disable();
121                         if (r > 0)
122                                 continue;
123                         break;
124                 }
125
126                 if (kvmppc_core_prepare_to_enter(vcpu)) {
127                         /* interrupts got enabled in between, so we
128                            are back at square 1 */
129                         continue;
130                 }
131
132                 guest_enter_irqoff();
133                 return 1;
134         }
135
136         /* return to host */
137         local_irq_enable();
138         return r;
139 }
140 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141
142 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 {
145         struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
146         int i;
147
148         shared->sprg0 = swab64(shared->sprg0);
149         shared->sprg1 = swab64(shared->sprg1);
150         shared->sprg2 = swab64(shared->sprg2);
151         shared->sprg3 = swab64(shared->sprg3);
152         shared->srr0 = swab64(shared->srr0);
153         shared->srr1 = swab64(shared->srr1);
154         shared->dar = swab64(shared->dar);
155         shared->msr = swab64(shared->msr);
156         shared->dsisr = swab32(shared->dsisr);
157         shared->int_pending = swab32(shared->int_pending);
158         for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
159                 shared->sr[i] = swab32(shared->sr[i]);
160 }
161 #endif
162
163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 {
165         int nr = kvmppc_get_gpr(vcpu, 11);
166         int r;
167         unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
168         unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
169         unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
170         unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
171         unsigned long r2 = 0;
172
173         if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
174                 /* 32 bit mode */
175                 param1 &= 0xffffffff;
176                 param2 &= 0xffffffff;
177                 param3 &= 0xffffffff;
178                 param4 &= 0xffffffff;
179         }
180
181         switch (nr) {
182         case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183         {
184 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
185                 /* Book3S can be little endian, find it out here */
186                 int shared_big_endian = true;
187                 if (vcpu->arch.intr_msr & MSR_LE)
188                         shared_big_endian = false;
189                 if (shared_big_endian != vcpu->arch.shared_big_endian)
190                         kvmppc_swab_shared(vcpu);
191                 vcpu->arch.shared_big_endian = shared_big_endian;
192 #endif
193
194                 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195                         /*
196                          * Older versions of the Linux magic page code had
197                          * a bug where they would map their trampoline code
198                          * NX. If that's the case, remove !PR NX capability.
199                          */
200                         vcpu->arch.disable_kernel_nx = true;
201                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
202                 }
203
204                 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
205                 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206
207 #ifdef CONFIG_PPC_64K_PAGES
208                 /*
209                  * Make sure our 4k magic page is in the same window of a 64k
210                  * page within the guest and within the host's page.
211                  */
212                 if ((vcpu->arch.magic_page_pa & 0xf000) !=
213                     ((ulong)vcpu->arch.shared & 0xf000)) {
214                         void *old_shared = vcpu->arch.shared;
215                         ulong shared = (ulong)vcpu->arch.shared;
216                         void *new_shared;
217
218                         shared &= PAGE_MASK;
219                         shared |= vcpu->arch.magic_page_pa & 0xf000;
220                         new_shared = (void*)shared;
221                         memcpy(new_shared, old_shared, 0x1000);
222                         vcpu->arch.shared = new_shared;
223                 }
224 #endif
225
226                 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
227
228                 r = EV_SUCCESS;
229                 break;
230         }
231         case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
232                 r = EV_SUCCESS;
233 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
234                 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
235 #endif
236
237                 /* Second return value is in r4 */
238                 break;
239         case EV_HCALL_TOKEN(EV_IDLE):
240                 r = EV_SUCCESS;
241                 kvm_vcpu_halt(vcpu);
242                 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
243                 break;
244         default:
245                 r = EV_UNIMPLEMENTED;
246                 break;
247         }
248
249         kvmppc_set_gpr(vcpu, 4, r2);
250
251         return r;
252 }
253 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
254
255 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
256 {
257         int r = false;
258
259         /* We have to know what CPU to virtualize */
260         if (!vcpu->arch.pvr)
261                 goto out;
262
263         /* PAPR only works with book3s_64 */
264         if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
265                 goto out;
266
267         /* HV KVM can only do PAPR mode for now */
268         if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
269                 goto out;
270
271 #ifdef CONFIG_KVM_BOOKE_HV
272         if (!cpu_has_feature(CPU_FTR_EMB_HV))
273                 goto out;
274 #endif
275
276         r = true;
277
278 out:
279         vcpu->arch.sane = r;
280         return r ? 0 : -EINVAL;
281 }
282 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
283
284 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
285 {
286         enum emulation_result er;
287         int r;
288
289         er = kvmppc_emulate_loadstore(vcpu);
290         switch (er) {
291         case EMULATE_DONE:
292                 /* Future optimization: only reload non-volatiles if they were
293                  * actually modified. */
294                 r = RESUME_GUEST_NV;
295                 break;
296         case EMULATE_AGAIN:
297                 r = RESUME_GUEST;
298                 break;
299         case EMULATE_DO_MMIO:
300                 vcpu->run->exit_reason = KVM_EXIT_MMIO;
301                 /* We must reload nonvolatiles because "update" load/store
302                  * instructions modify register state. */
303                 /* Future optimization: only reload non-volatiles if they were
304                  * actually modified. */
305                 r = RESUME_HOST_NV;
306                 break;
307         case EMULATE_FAIL:
308         {
309                 u32 last_inst;
310
311                 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
312                 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
313                                       last_inst);
314
315                 /*
316                  * Injecting a Data Storage here is a bit more
317                  * accurate since the instruction that caused the
318                  * access could still be a valid one.
319                  */
320                 if (!IS_ENABLED(CONFIG_BOOKE)) {
321                         ulong dsisr = DSISR_BADACCESS;
322
323                         if (vcpu->mmio_is_write)
324                                 dsisr |= DSISR_ISSTORE;
325
326                         kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
327                 } else {
328                         /*
329                          * BookE does not send a SIGBUS on a bad
330                          * fault, so use a Program interrupt instead
331                          * to avoid a fault loop.
332                          */
333                         kvmppc_core_queue_program(vcpu, 0);
334                 }
335
336                 r = RESUME_GUEST;
337                 break;
338         }
339         default:
340                 WARN_ON(1);
341                 r = RESUME_GUEST;
342         }
343
344         return r;
345 }
346 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
347
348 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349               bool data)
350 {
351         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
352         struct kvmppc_pte pte;
353         int r = -EINVAL;
354
355         vcpu->stat.st++;
356
357         if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
358                 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
359                                                             size);
360
361         if ((!r) || (r == -EAGAIN))
362                 return r;
363
364         r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
365                          XLATE_WRITE, &pte);
366         if (r < 0)
367                 return r;
368
369         *eaddr = pte.raddr;
370
371         if (!pte.may_write)
372                 return -EPERM;
373
374         /* Magic page override */
375         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
376             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
377             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
378                 void *magic = vcpu->arch.shared;
379                 magic += pte.eaddr & 0xfff;
380                 memcpy(magic, ptr, size);
381                 return EMULATE_DONE;
382         }
383
384         if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
385                 return EMULATE_DO_MMIO;
386
387         return EMULATE_DONE;
388 }
389 EXPORT_SYMBOL_GPL(kvmppc_st);
390
391 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
392                       bool data)
393 {
394         ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
395         struct kvmppc_pte pte;
396         int rc = -EINVAL;
397
398         vcpu->stat.ld++;
399
400         if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
401                 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
402                                                               size);
403
404         if ((!rc) || (rc == -EAGAIN))
405                 return rc;
406
407         rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
408                           XLATE_READ, &pte);
409         if (rc)
410                 return rc;
411
412         *eaddr = pte.raddr;
413
414         if (!pte.may_read)
415                 return -EPERM;
416
417         if (!data && !pte.may_execute)
418                 return -ENOEXEC;
419
420         /* Magic page override */
421         if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
422             ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
423             !(kvmppc_get_msr(vcpu) & MSR_PR)) {
424                 void *magic = vcpu->arch.shared;
425                 magic += pte.eaddr & 0xfff;
426                 memcpy(ptr, magic, size);
427                 return EMULATE_DONE;
428         }
429
430         kvm_vcpu_srcu_read_lock(vcpu);
431         rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
432         kvm_vcpu_srcu_read_unlock(vcpu);
433         if (rc)
434                 return EMULATE_DO_MMIO;
435
436         return EMULATE_DONE;
437 }
438 EXPORT_SYMBOL_GPL(kvmppc_ld);
439
440 int kvm_arch_hardware_enable(void)
441 {
442         return 0;
443 }
444
445 int kvm_arch_hardware_setup(void *opaque)
446 {
447         return 0;
448 }
449
450 int kvm_arch_check_processor_compat(void *opaque)
451 {
452         return kvmppc_core_check_processor_compat();
453 }
454
455 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
456 {
457         struct kvmppc_ops *kvm_ops = NULL;
458         int r;
459
460         /*
461          * if we have both HV and PR enabled, default is HV
462          */
463         if (type == 0) {
464                 if (kvmppc_hv_ops)
465                         kvm_ops = kvmppc_hv_ops;
466                 else
467                         kvm_ops = kvmppc_pr_ops;
468                 if (!kvm_ops)
469                         goto err_out;
470         } else  if (type == KVM_VM_PPC_HV) {
471                 if (!kvmppc_hv_ops)
472                         goto err_out;
473                 kvm_ops = kvmppc_hv_ops;
474         } else if (type == KVM_VM_PPC_PR) {
475                 if (!kvmppc_pr_ops)
476                         goto err_out;
477                 kvm_ops = kvmppc_pr_ops;
478         } else
479                 goto err_out;
480
481         if (!try_module_get(kvm_ops->owner))
482                 return -ENOENT;
483
484         kvm->arch.kvm_ops = kvm_ops;
485         r = kvmppc_core_init_vm(kvm);
486         if (r)
487                 module_put(kvm_ops->owner);
488         return r;
489 err_out:
490         return -EINVAL;
491 }
492
493 void kvm_arch_destroy_vm(struct kvm *kvm)
494 {
495 #ifdef CONFIG_KVM_XICS
496         /*
497          * We call kick_all_cpus_sync() to ensure that all
498          * CPUs have executed any pending IPIs before we
499          * continue and free VCPUs structures below.
500          */
501         if (is_kvmppc_hv_enabled(kvm))
502                 kick_all_cpus_sync();
503 #endif
504
505         kvm_destroy_vcpus(kvm);
506
507         mutex_lock(&kvm->lock);
508
509         kvmppc_core_destroy_vm(kvm);
510
511         mutex_unlock(&kvm->lock);
512
513         /* drop the module reference */
514         module_put(kvm->arch.kvm_ops->owner);
515 }
516
517 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
518 {
519         int r;
520         /* Assume we're using HV mode when the HV module is loaded */
521         int hv_enabled = kvmppc_hv_ops ? 1 : 0;
522
523         if (kvm) {
524                 /*
525                  * Hooray - we know which VM type we're running on. Depend on
526                  * that rather than the guess above.
527                  */
528                 hv_enabled = is_kvmppc_hv_enabled(kvm);
529         }
530
531         switch (ext) {
532 #ifdef CONFIG_BOOKE
533         case KVM_CAP_PPC_BOOKE_SREGS:
534         case KVM_CAP_PPC_BOOKE_WATCHDOG:
535         case KVM_CAP_PPC_EPR:
536 #else
537         case KVM_CAP_PPC_SEGSTATE:
538         case KVM_CAP_PPC_HIOR:
539         case KVM_CAP_PPC_PAPR:
540 #endif
541         case KVM_CAP_PPC_UNSET_IRQ:
542         case KVM_CAP_PPC_IRQ_LEVEL:
543         case KVM_CAP_ENABLE_CAP:
544         case KVM_CAP_ONE_REG:
545         case KVM_CAP_IOEVENTFD:
546         case KVM_CAP_DEVICE_CTRL:
547         case KVM_CAP_IMMEDIATE_EXIT:
548         case KVM_CAP_SET_GUEST_DEBUG:
549                 r = 1;
550                 break;
551         case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
552         case KVM_CAP_PPC_PAIRED_SINGLES:
553         case KVM_CAP_PPC_OSI:
554         case KVM_CAP_PPC_GET_PVINFO:
555 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
556         case KVM_CAP_SW_TLB:
557 #endif
558                 /* We support this only for PR */
559                 r = !hv_enabled;
560                 break;
561 #ifdef CONFIG_KVM_MPIC
562         case KVM_CAP_IRQ_MPIC:
563                 r = 1;
564                 break;
565 #endif
566
567 #ifdef CONFIG_PPC_BOOK3S_64
568         case KVM_CAP_SPAPR_TCE:
569         case KVM_CAP_SPAPR_TCE_64:
570                 r = 1;
571                 break;
572         case KVM_CAP_SPAPR_TCE_VFIO:
573                 r = !!cpu_has_feature(CPU_FTR_HVMODE);
574                 break;
575         case KVM_CAP_PPC_RTAS:
576         case KVM_CAP_PPC_FIXUP_HCALL:
577         case KVM_CAP_PPC_ENABLE_HCALL:
578 #ifdef CONFIG_KVM_XICS
579         case KVM_CAP_IRQ_XICS:
580 #endif
581         case KVM_CAP_PPC_GET_CPU_CHAR:
582                 r = 1;
583                 break;
584 #ifdef CONFIG_KVM_XIVE
585         case KVM_CAP_PPC_IRQ_XIVE:
586                 /*
587                  * We need XIVE to be enabled on the platform (implies
588                  * a POWER9 processor) and the PowerNV platform, as
589                  * nested is not yet supported.
590                  */
591                 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
592                         kvmppc_xive_native_supported();
593                 break;
594 #endif
595
596         case KVM_CAP_PPC_ALLOC_HTAB:
597                 r = hv_enabled;
598                 break;
599 #endif /* CONFIG_PPC_BOOK3S_64 */
600 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
601         case KVM_CAP_PPC_SMT:
602                 r = 0;
603                 if (kvm) {
604                         if (kvm->arch.emul_smt_mode > 1)
605                                 r = kvm->arch.emul_smt_mode;
606                         else
607                                 r = kvm->arch.smt_mode;
608                 } else if (hv_enabled) {
609                         if (cpu_has_feature(CPU_FTR_ARCH_300))
610                                 r = 1;
611                         else
612                                 r = threads_per_subcore;
613                 }
614                 break;
615         case KVM_CAP_PPC_SMT_POSSIBLE:
616                 r = 1;
617                 if (hv_enabled) {
618                         if (!cpu_has_feature(CPU_FTR_ARCH_300))
619                                 r = ((threads_per_subcore << 1) - 1);
620                         else
621                                 /* P9 can emulate dbells, so allow any mode */
622                                 r = 8 | 4 | 2 | 1;
623                 }
624                 break;
625         case KVM_CAP_PPC_RMA:
626                 r = 0;
627                 break;
628         case KVM_CAP_PPC_HWRNG:
629                 r = kvmppc_hwrng_present();
630                 break;
631         case KVM_CAP_PPC_MMU_RADIX:
632                 r = !!(hv_enabled && radix_enabled());
633                 break;
634         case KVM_CAP_PPC_MMU_HASH_V3:
635                 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
636                        kvmppc_hv_ops->hash_v3_possible());
637                 break;
638         case KVM_CAP_PPC_NESTED_HV:
639                 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
640                        !kvmppc_hv_ops->enable_nested(NULL));
641                 break;
642 #endif
643         case KVM_CAP_SYNC_MMU:
644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
645                 r = hv_enabled;
646 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
647                 r = 1;
648 #else
649                 r = 0;
650 #endif
651                 break;
652 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
653         case KVM_CAP_PPC_HTAB_FD:
654                 r = hv_enabled;
655                 break;
656 #endif
657         case KVM_CAP_NR_VCPUS:
658                 /*
659                  * Recommending a number of CPUs is somewhat arbitrary; we
660                  * return the number of present CPUs for -HV (since a host
661                  * will have secondary threads "offline"), and for other KVM
662                  * implementations just count online CPUs.
663                  */
664                 if (hv_enabled)
665                         r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
666                 else
667                         r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
668                 break;
669         case KVM_CAP_MAX_VCPUS:
670                 r = KVM_MAX_VCPUS;
671                 break;
672         case KVM_CAP_MAX_VCPU_ID:
673                 r = KVM_MAX_VCPU_IDS;
674                 break;
675 #ifdef CONFIG_PPC_BOOK3S_64
676         case KVM_CAP_PPC_GET_SMMU_INFO:
677                 r = 1;
678                 break;
679         case KVM_CAP_SPAPR_MULTITCE:
680                 r = 1;
681                 break;
682         case KVM_CAP_SPAPR_RESIZE_HPT:
683                 r = !!hv_enabled;
684                 break;
685 #endif
686 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
687         case KVM_CAP_PPC_FWNMI:
688                 r = hv_enabled;
689                 break;
690 #endif
691 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
692         case KVM_CAP_PPC_HTM:
693                 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
694                      (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
695                 break;
696 #endif
697 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
698         case KVM_CAP_PPC_SECURE_GUEST:
699                 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
700                         !kvmppc_hv_ops->enable_svm(NULL);
701                 break;
702         case KVM_CAP_PPC_DAWR1:
703                 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
704                        !kvmppc_hv_ops->enable_dawr1(NULL));
705                 break;
706         case KVM_CAP_PPC_RPT_INVALIDATE:
707                 r = 1;
708                 break;
709 #endif
710         case KVM_CAP_PPC_AIL_MODE_3:
711                 r = 0;
712                 /*
713                  * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
714                  * The POWER9s can support it if the guest runs in hash mode,
715                  * but QEMU doesn't necessarily query the capability in time.
716                  */
717                 if (hv_enabled) {
718                         if (kvmhv_on_pseries()) {
719                                 if (pseries_reloc_on_exception())
720                                         r = 1;
721                         } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
722                                   !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
723                                 r = 1;
724                         }
725                 }
726                 break;
727         default:
728                 r = 0;
729                 break;
730         }
731         return r;
732
733 }
734
735 long kvm_arch_dev_ioctl(struct file *filp,
736                         unsigned int ioctl, unsigned long arg)
737 {
738         return -EINVAL;
739 }
740
741 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
742 {
743         kvmppc_core_free_memslot(kvm, slot);
744 }
745
746 int kvm_arch_prepare_memory_region(struct kvm *kvm,
747                                    const struct kvm_memory_slot *old,
748                                    struct kvm_memory_slot *new,
749                                    enum kvm_mr_change change)
750 {
751         return kvmppc_core_prepare_memory_region(kvm, old, new, change);
752 }
753
754 void kvm_arch_commit_memory_region(struct kvm *kvm,
755                                    struct kvm_memory_slot *old,
756                                    const struct kvm_memory_slot *new,
757                                    enum kvm_mr_change change)
758 {
759         kvmppc_core_commit_memory_region(kvm, old, new, change);
760 }
761
762 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
763                                    struct kvm_memory_slot *slot)
764 {
765         kvmppc_core_flush_memslot(kvm, slot);
766 }
767
768 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
769 {
770         return 0;
771 }
772
773 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
774 {
775         struct kvm_vcpu *vcpu;
776
777         vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
778         kvmppc_decrementer_func(vcpu);
779
780         return HRTIMER_NORESTART;
781 }
782
783 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
784 {
785         int err;
786
787         hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
788         vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
789
790 #ifdef CONFIG_KVM_EXIT_TIMING
791         mutex_init(&vcpu->arch.exit_timing_lock);
792 #endif
793         err = kvmppc_subarch_vcpu_init(vcpu);
794         if (err)
795                 return err;
796
797         err = kvmppc_core_vcpu_create(vcpu);
798         if (err)
799                 goto out_vcpu_uninit;
800
801         rcuwait_init(&vcpu->arch.wait);
802         vcpu->arch.waitp = &vcpu->arch.wait;
803         return 0;
804
805 out_vcpu_uninit:
806         kvmppc_subarch_vcpu_uninit(vcpu);
807         return err;
808 }
809
810 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
811 {
812 }
813
814 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
815 {
816         /* Make sure we're not using the vcpu anymore */
817         hrtimer_cancel(&vcpu->arch.dec_timer);
818
819         switch (vcpu->arch.irq_type) {
820         case KVMPPC_IRQ_MPIC:
821                 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
822                 break;
823         case KVMPPC_IRQ_XICS:
824                 if (xics_on_xive())
825                         kvmppc_xive_cleanup_vcpu(vcpu);
826                 else
827                         kvmppc_xics_free_icp(vcpu);
828                 break;
829         case KVMPPC_IRQ_XIVE:
830                 kvmppc_xive_native_cleanup_vcpu(vcpu);
831                 break;
832         }
833
834         kvmppc_core_vcpu_free(vcpu);
835
836         kvmppc_subarch_vcpu_uninit(vcpu);
837 }
838
839 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
840 {
841         return kvmppc_core_pending_dec(vcpu);
842 }
843
844 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
845 {
846 #ifdef CONFIG_BOOKE
847         /*
848          * vrsave (formerly usprg0) isn't used by Linux, but may
849          * be used by the guest.
850          *
851          * On non-booke this is associated with Altivec and
852          * is handled by code in book3s.c.
853          */
854         mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
855 #endif
856         kvmppc_core_vcpu_load(vcpu, cpu);
857 }
858
859 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
860 {
861         kvmppc_core_vcpu_put(vcpu);
862 #ifdef CONFIG_BOOKE
863         vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
864 #endif
865 }
866
867 /*
868  * irq_bypass_add_producer and irq_bypass_del_producer are only
869  * useful if the architecture supports PCI passthrough.
870  * irq_bypass_stop and irq_bypass_start are not needed and so
871  * kvm_ops are not defined for them.
872  */
873 bool kvm_arch_has_irq_bypass(void)
874 {
875         return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
876                 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
877 }
878
879 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
880                                      struct irq_bypass_producer *prod)
881 {
882         struct kvm_kernel_irqfd *irqfd =
883                 container_of(cons, struct kvm_kernel_irqfd, consumer);
884         struct kvm *kvm = irqfd->kvm;
885
886         if (kvm->arch.kvm_ops->irq_bypass_add_producer)
887                 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
888
889         return 0;
890 }
891
892 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
893                                       struct irq_bypass_producer *prod)
894 {
895         struct kvm_kernel_irqfd *irqfd =
896                 container_of(cons, struct kvm_kernel_irqfd, consumer);
897         struct kvm *kvm = irqfd->kvm;
898
899         if (kvm->arch.kvm_ops->irq_bypass_del_producer)
900                 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
901 }
902
903 #ifdef CONFIG_VSX
904 static inline int kvmppc_get_vsr_dword_offset(int index)
905 {
906         int offset;
907
908         if ((index != 0) && (index != 1))
909                 return -1;
910
911 #ifdef __BIG_ENDIAN
912         offset =  index;
913 #else
914         offset = 1 - index;
915 #endif
916
917         return offset;
918 }
919
920 static inline int kvmppc_get_vsr_word_offset(int index)
921 {
922         int offset;
923
924         if ((index > 3) || (index < 0))
925                 return -1;
926
927 #ifdef __BIG_ENDIAN
928         offset = index;
929 #else
930         offset = 3 - index;
931 #endif
932         return offset;
933 }
934
935 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
936         u64 gpr)
937 {
938         union kvmppc_one_reg val;
939         int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
940         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
941
942         if (offset == -1)
943                 return;
944
945         if (index >= 32) {
946                 val.vval = VCPU_VSX_VR(vcpu, index - 32);
947                 val.vsxval[offset] = gpr;
948                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
949         } else {
950                 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
951         }
952 }
953
954 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
955         u64 gpr)
956 {
957         union kvmppc_one_reg val;
958         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
959
960         if (index >= 32) {
961                 val.vval = VCPU_VSX_VR(vcpu, index - 32);
962                 val.vsxval[0] = gpr;
963                 val.vsxval[1] = gpr;
964                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
965         } else {
966                 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
967                 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
968         }
969 }
970
971 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
972         u32 gpr)
973 {
974         union kvmppc_one_reg val;
975         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
976
977         if (index >= 32) {
978                 val.vsx32val[0] = gpr;
979                 val.vsx32val[1] = gpr;
980                 val.vsx32val[2] = gpr;
981                 val.vsx32val[3] = gpr;
982                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
983         } else {
984                 val.vsx32val[0] = gpr;
985                 val.vsx32val[1] = gpr;
986                 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
987                 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
988         }
989 }
990
991 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
992         u32 gpr32)
993 {
994         union kvmppc_one_reg val;
995         int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
996         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
997         int dword_offset, word_offset;
998
999         if (offset == -1)
1000                 return;
1001
1002         if (index >= 32) {
1003                 val.vval = VCPU_VSX_VR(vcpu, index - 32);
1004                 val.vsx32val[offset] = gpr32;
1005                 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
1006         } else {
1007                 dword_offset = offset / 2;
1008                 word_offset = offset % 2;
1009                 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1010                 val.vsx32val[word_offset] = gpr32;
1011                 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1012         }
1013 }
1014 #endif /* CONFIG_VSX */
1015
1016 #ifdef CONFIG_ALTIVEC
1017 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1018                 int index, int element_size)
1019 {
1020         int offset;
1021         int elts = sizeof(vector128)/element_size;
1022
1023         if ((index < 0) || (index >= elts))
1024                 return -1;
1025
1026         if (kvmppc_need_byteswap(vcpu))
1027                 offset = elts - index - 1;
1028         else
1029                 offset = index;
1030
1031         return offset;
1032 }
1033
1034 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1035                 int index)
1036 {
1037         return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1038 }
1039
1040 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1041                 int index)
1042 {
1043         return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1044 }
1045
1046 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1047                 int index)
1048 {
1049         return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1050 }
1051
1052 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1053                 int index)
1054 {
1055         return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1056 }
1057
1058
1059 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1060         u64 gpr)
1061 {
1062         union kvmppc_one_reg val;
1063         int offset = kvmppc_get_vmx_dword_offset(vcpu,
1064                         vcpu->arch.mmio_vmx_offset);
1065         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1066
1067         if (offset == -1)
1068                 return;
1069
1070         val.vval = VCPU_VSX_VR(vcpu, index);
1071         val.vsxval[offset] = gpr;
1072         VCPU_VSX_VR(vcpu, index) = val.vval;
1073 }
1074
1075 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1076         u32 gpr32)
1077 {
1078         union kvmppc_one_reg val;
1079         int offset = kvmppc_get_vmx_word_offset(vcpu,
1080                         vcpu->arch.mmio_vmx_offset);
1081         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1082
1083         if (offset == -1)
1084                 return;
1085
1086         val.vval = VCPU_VSX_VR(vcpu, index);
1087         val.vsx32val[offset] = gpr32;
1088         VCPU_VSX_VR(vcpu, index) = val.vval;
1089 }
1090
1091 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1092         u16 gpr16)
1093 {
1094         union kvmppc_one_reg val;
1095         int offset = kvmppc_get_vmx_hword_offset(vcpu,
1096                         vcpu->arch.mmio_vmx_offset);
1097         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1098
1099         if (offset == -1)
1100                 return;
1101
1102         val.vval = VCPU_VSX_VR(vcpu, index);
1103         val.vsx16val[offset] = gpr16;
1104         VCPU_VSX_VR(vcpu, index) = val.vval;
1105 }
1106
1107 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1108         u8 gpr8)
1109 {
1110         union kvmppc_one_reg val;
1111         int offset = kvmppc_get_vmx_byte_offset(vcpu,
1112                         vcpu->arch.mmio_vmx_offset);
1113         int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1114
1115         if (offset == -1)
1116                 return;
1117
1118         val.vval = VCPU_VSX_VR(vcpu, index);
1119         val.vsx8val[offset] = gpr8;
1120         VCPU_VSX_VR(vcpu, index) = val.vval;
1121 }
1122 #endif /* CONFIG_ALTIVEC */
1123
1124 #ifdef CONFIG_PPC_FPU
1125 static inline u64 sp_to_dp(u32 fprs)
1126 {
1127         u64 fprd;
1128
1129         preempt_disable();
1130         enable_kernel_fp();
1131         asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1132              : "fr0");
1133         preempt_enable();
1134         return fprd;
1135 }
1136
1137 static inline u32 dp_to_sp(u64 fprd)
1138 {
1139         u32 fprs;
1140
1141         preempt_disable();
1142         enable_kernel_fp();
1143         asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1144              : "fr0");
1145         preempt_enable();
1146         return fprs;
1147 }
1148
1149 #else
1150 #define sp_to_dp(x)     (x)
1151 #define dp_to_sp(x)     (x)
1152 #endif /* CONFIG_PPC_FPU */
1153
1154 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1155 {
1156         struct kvm_run *run = vcpu->run;
1157         u64 gpr;
1158
1159         if (run->mmio.len > sizeof(gpr))
1160                 return;
1161
1162         if (!vcpu->arch.mmio_host_swabbed) {
1163                 switch (run->mmio.len) {
1164                 case 8: gpr = *(u64 *)run->mmio.data; break;
1165                 case 4: gpr = *(u32 *)run->mmio.data; break;
1166                 case 2: gpr = *(u16 *)run->mmio.data; break;
1167                 case 1: gpr = *(u8 *)run->mmio.data; break;
1168                 }
1169         } else {
1170                 switch (run->mmio.len) {
1171                 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1172                 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1173                 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1174                 case 1: gpr = *(u8 *)run->mmio.data; break;
1175                 }
1176         }
1177
1178         /* conversion between single and double precision */
1179         if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1180                 gpr = sp_to_dp(gpr);
1181
1182         if (vcpu->arch.mmio_sign_extend) {
1183                 switch (run->mmio.len) {
1184 #ifdef CONFIG_PPC64
1185                 case 4:
1186                         gpr = (s64)(s32)gpr;
1187                         break;
1188 #endif
1189                 case 2:
1190                         gpr = (s64)(s16)gpr;
1191                         break;
1192                 case 1:
1193                         gpr = (s64)(s8)gpr;
1194                         break;
1195                 }
1196         }
1197
1198         switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1199         case KVM_MMIO_REG_GPR:
1200                 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1201                 break;
1202         case KVM_MMIO_REG_FPR:
1203                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1204                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1205
1206                 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1207                 break;
1208 #ifdef CONFIG_PPC_BOOK3S
1209         case KVM_MMIO_REG_QPR:
1210                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1211                 break;
1212         case KVM_MMIO_REG_FQPR:
1213                 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1214                 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1215                 break;
1216 #endif
1217 #ifdef CONFIG_VSX
1218         case KVM_MMIO_REG_VSX:
1219                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1220                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1221
1222                 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1223                         kvmppc_set_vsr_dword(vcpu, gpr);
1224                 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1225                         kvmppc_set_vsr_word(vcpu, gpr);
1226                 else if (vcpu->arch.mmio_copy_type ==
1227                                 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1228                         kvmppc_set_vsr_dword_dump(vcpu, gpr);
1229                 else if (vcpu->arch.mmio_copy_type ==
1230                                 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1231                         kvmppc_set_vsr_word_dump(vcpu, gpr);
1232                 break;
1233 #endif
1234 #ifdef CONFIG_ALTIVEC
1235         case KVM_MMIO_REG_VMX:
1236                 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1237                         vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1238
1239                 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1240                         kvmppc_set_vmx_dword(vcpu, gpr);
1241                 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1242                         kvmppc_set_vmx_word(vcpu, gpr);
1243                 else if (vcpu->arch.mmio_copy_type ==
1244                                 KVMPPC_VMX_COPY_HWORD)
1245                         kvmppc_set_vmx_hword(vcpu, gpr);
1246                 else if (vcpu->arch.mmio_copy_type ==
1247                                 KVMPPC_VMX_COPY_BYTE)
1248                         kvmppc_set_vmx_byte(vcpu, gpr);
1249                 break;
1250 #endif
1251 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1252         case KVM_MMIO_REG_NESTED_GPR:
1253                 if (kvmppc_need_byteswap(vcpu))
1254                         gpr = swab64(gpr);
1255                 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1256                                      sizeof(gpr));
1257                 break;
1258 #endif
1259         default:
1260                 BUG();
1261         }
1262 }
1263
1264 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1265                                 unsigned int rt, unsigned int bytes,
1266                                 int is_default_endian, int sign_extend)
1267 {
1268         struct kvm_run *run = vcpu->run;
1269         int idx, ret;
1270         bool host_swabbed;
1271
1272         /* Pity C doesn't have a logical XOR operator */
1273         if (kvmppc_need_byteswap(vcpu)) {
1274                 host_swabbed = is_default_endian;
1275         } else {
1276                 host_swabbed = !is_default_endian;
1277         }
1278
1279         if (bytes > sizeof(run->mmio.data))
1280                 return EMULATE_FAIL;
1281
1282         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1283         run->mmio.len = bytes;
1284         run->mmio.is_write = 0;
1285
1286         vcpu->arch.io_gpr = rt;
1287         vcpu->arch.mmio_host_swabbed = host_swabbed;
1288         vcpu->mmio_needed = 1;
1289         vcpu->mmio_is_write = 0;
1290         vcpu->arch.mmio_sign_extend = sign_extend;
1291
1292         idx = srcu_read_lock(&vcpu->kvm->srcu);
1293
1294         ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1295                               bytes, &run->mmio.data);
1296
1297         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1298
1299         if (!ret) {
1300                 kvmppc_complete_mmio_load(vcpu);
1301                 vcpu->mmio_needed = 0;
1302                 return EMULATE_DONE;
1303         }
1304
1305         return EMULATE_DO_MMIO;
1306 }
1307
1308 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1309                        unsigned int rt, unsigned int bytes,
1310                        int is_default_endian)
1311 {
1312         return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1313 }
1314 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1315
1316 /* Same as above, but sign extends */
1317 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1318                         unsigned int rt, unsigned int bytes,
1319                         int is_default_endian)
1320 {
1321         return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1322 }
1323
1324 #ifdef CONFIG_VSX
1325 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1326                         unsigned int rt, unsigned int bytes,
1327                         int is_default_endian, int mmio_sign_extend)
1328 {
1329         enum emulation_result emulated = EMULATE_DONE;
1330
1331         /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1332         if (vcpu->arch.mmio_vsx_copy_nums > 4)
1333                 return EMULATE_FAIL;
1334
1335         while (vcpu->arch.mmio_vsx_copy_nums) {
1336                 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1337                         is_default_endian, mmio_sign_extend);
1338
1339                 if (emulated != EMULATE_DONE)
1340                         break;
1341
1342                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1343
1344                 vcpu->arch.mmio_vsx_copy_nums--;
1345                 vcpu->arch.mmio_vsx_offset++;
1346         }
1347         return emulated;
1348 }
1349 #endif /* CONFIG_VSX */
1350
1351 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1352                         u64 val, unsigned int bytes, int is_default_endian)
1353 {
1354         struct kvm_run *run = vcpu->run;
1355         void *data = run->mmio.data;
1356         int idx, ret;
1357         bool host_swabbed;
1358
1359         /* Pity C doesn't have a logical XOR operator */
1360         if (kvmppc_need_byteswap(vcpu)) {
1361                 host_swabbed = is_default_endian;
1362         } else {
1363                 host_swabbed = !is_default_endian;
1364         }
1365
1366         if (bytes > sizeof(run->mmio.data))
1367                 return EMULATE_FAIL;
1368
1369         run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1370         run->mmio.len = bytes;
1371         run->mmio.is_write = 1;
1372         vcpu->mmio_needed = 1;
1373         vcpu->mmio_is_write = 1;
1374
1375         if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1376                 val = dp_to_sp(val);
1377
1378         /* Store the value at the lowest bytes in 'data'. */
1379         if (!host_swabbed) {
1380                 switch (bytes) {
1381                 case 8: *(u64 *)data = val; break;
1382                 case 4: *(u32 *)data = val; break;
1383                 case 2: *(u16 *)data = val; break;
1384                 case 1: *(u8  *)data = val; break;
1385                 }
1386         } else {
1387                 switch (bytes) {
1388                 case 8: *(u64 *)data = swab64(val); break;
1389                 case 4: *(u32 *)data = swab32(val); break;
1390                 case 2: *(u16 *)data = swab16(val); break;
1391                 case 1: *(u8  *)data = val; break;
1392                 }
1393         }
1394
1395         idx = srcu_read_lock(&vcpu->kvm->srcu);
1396
1397         ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1398                                bytes, &run->mmio.data);
1399
1400         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1401
1402         if (!ret) {
1403                 vcpu->mmio_needed = 0;
1404                 return EMULATE_DONE;
1405         }
1406
1407         return EMULATE_DO_MMIO;
1408 }
1409 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1410
1411 #ifdef CONFIG_VSX
1412 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1413 {
1414         u32 dword_offset, word_offset;
1415         union kvmppc_one_reg reg;
1416         int vsx_offset = 0;
1417         int copy_type = vcpu->arch.mmio_copy_type;
1418         int result = 0;
1419
1420         switch (copy_type) {
1421         case KVMPPC_VSX_COPY_DWORD:
1422                 vsx_offset =
1423                         kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1424
1425                 if (vsx_offset == -1) {
1426                         result = -1;
1427                         break;
1428                 }
1429
1430                 if (rs < 32) {
1431                         *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1432                 } else {
1433                         reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1434                         *val = reg.vsxval[vsx_offset];
1435                 }
1436                 break;
1437
1438         case KVMPPC_VSX_COPY_WORD:
1439                 vsx_offset =
1440                         kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1441
1442                 if (vsx_offset == -1) {
1443                         result = -1;
1444                         break;
1445                 }
1446
1447                 if (rs < 32) {
1448                         dword_offset = vsx_offset / 2;
1449                         word_offset = vsx_offset % 2;
1450                         reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1451                         *val = reg.vsx32val[word_offset];
1452                 } else {
1453                         reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1454                         *val = reg.vsx32val[vsx_offset];
1455                 }
1456                 break;
1457
1458         default:
1459                 result = -1;
1460                 break;
1461         }
1462
1463         return result;
1464 }
1465
1466 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1467                         int rs, unsigned int bytes, int is_default_endian)
1468 {
1469         u64 val;
1470         enum emulation_result emulated = EMULATE_DONE;
1471
1472         vcpu->arch.io_gpr = rs;
1473
1474         /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1475         if (vcpu->arch.mmio_vsx_copy_nums > 4)
1476                 return EMULATE_FAIL;
1477
1478         while (vcpu->arch.mmio_vsx_copy_nums) {
1479                 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1480                         return EMULATE_FAIL;
1481
1482                 emulated = kvmppc_handle_store(vcpu,
1483                          val, bytes, is_default_endian);
1484
1485                 if (emulated != EMULATE_DONE)
1486                         break;
1487
1488                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1489
1490                 vcpu->arch.mmio_vsx_copy_nums--;
1491                 vcpu->arch.mmio_vsx_offset++;
1492         }
1493
1494         return emulated;
1495 }
1496
1497 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1498 {
1499         struct kvm_run *run = vcpu->run;
1500         enum emulation_result emulated = EMULATE_FAIL;
1501         int r;
1502
1503         vcpu->arch.paddr_accessed += run->mmio.len;
1504
1505         if (!vcpu->mmio_is_write) {
1506                 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1507                          run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1508         } else {
1509                 emulated = kvmppc_handle_vsx_store(vcpu,
1510                          vcpu->arch.io_gpr, run->mmio.len, 1);
1511         }
1512
1513         switch (emulated) {
1514         case EMULATE_DO_MMIO:
1515                 run->exit_reason = KVM_EXIT_MMIO;
1516                 r = RESUME_HOST;
1517                 break;
1518         case EMULATE_FAIL:
1519                 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1520                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1521                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1522                 r = RESUME_HOST;
1523                 break;
1524         default:
1525                 r = RESUME_GUEST;
1526                 break;
1527         }
1528         return r;
1529 }
1530 #endif /* CONFIG_VSX */
1531
1532 #ifdef CONFIG_ALTIVEC
1533 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1534                 unsigned int rt, unsigned int bytes, int is_default_endian)
1535 {
1536         enum emulation_result emulated = EMULATE_DONE;
1537
1538         if (vcpu->arch.mmio_vmx_copy_nums > 2)
1539                 return EMULATE_FAIL;
1540
1541         while (vcpu->arch.mmio_vmx_copy_nums) {
1542                 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1543                                 is_default_endian, 0);
1544
1545                 if (emulated != EMULATE_DONE)
1546                         break;
1547
1548                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1549                 vcpu->arch.mmio_vmx_copy_nums--;
1550                 vcpu->arch.mmio_vmx_offset++;
1551         }
1552
1553         return emulated;
1554 }
1555
1556 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1557 {
1558         union kvmppc_one_reg reg;
1559         int vmx_offset = 0;
1560         int result = 0;
1561
1562         vmx_offset =
1563                 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1564
1565         if (vmx_offset == -1)
1566                 return -1;
1567
1568         reg.vval = VCPU_VSX_VR(vcpu, index);
1569         *val = reg.vsxval[vmx_offset];
1570
1571         return result;
1572 }
1573
1574 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1575 {
1576         union kvmppc_one_reg reg;
1577         int vmx_offset = 0;
1578         int result = 0;
1579
1580         vmx_offset =
1581                 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1582
1583         if (vmx_offset == -1)
1584                 return -1;
1585
1586         reg.vval = VCPU_VSX_VR(vcpu, index);
1587         *val = reg.vsx32val[vmx_offset];
1588
1589         return result;
1590 }
1591
1592 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1593 {
1594         union kvmppc_one_reg reg;
1595         int vmx_offset = 0;
1596         int result = 0;
1597
1598         vmx_offset =
1599                 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1600
1601         if (vmx_offset == -1)
1602                 return -1;
1603
1604         reg.vval = VCPU_VSX_VR(vcpu, index);
1605         *val = reg.vsx16val[vmx_offset];
1606
1607         return result;
1608 }
1609
1610 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1611 {
1612         union kvmppc_one_reg reg;
1613         int vmx_offset = 0;
1614         int result = 0;
1615
1616         vmx_offset =
1617                 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1618
1619         if (vmx_offset == -1)
1620                 return -1;
1621
1622         reg.vval = VCPU_VSX_VR(vcpu, index);
1623         *val = reg.vsx8val[vmx_offset];
1624
1625         return result;
1626 }
1627
1628 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1629                 unsigned int rs, unsigned int bytes, int is_default_endian)
1630 {
1631         u64 val = 0;
1632         unsigned int index = rs & KVM_MMIO_REG_MASK;
1633         enum emulation_result emulated = EMULATE_DONE;
1634
1635         if (vcpu->arch.mmio_vmx_copy_nums > 2)
1636                 return EMULATE_FAIL;
1637
1638         vcpu->arch.io_gpr = rs;
1639
1640         while (vcpu->arch.mmio_vmx_copy_nums) {
1641                 switch (vcpu->arch.mmio_copy_type) {
1642                 case KVMPPC_VMX_COPY_DWORD:
1643                         if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1644                                 return EMULATE_FAIL;
1645
1646                         break;
1647                 case KVMPPC_VMX_COPY_WORD:
1648                         if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1649                                 return EMULATE_FAIL;
1650                         break;
1651                 case KVMPPC_VMX_COPY_HWORD:
1652                         if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1653                                 return EMULATE_FAIL;
1654                         break;
1655                 case KVMPPC_VMX_COPY_BYTE:
1656                         if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1657                                 return EMULATE_FAIL;
1658                         break;
1659                 default:
1660                         return EMULATE_FAIL;
1661                 }
1662
1663                 emulated = kvmppc_handle_store(vcpu, val, bytes,
1664                                 is_default_endian);
1665                 if (emulated != EMULATE_DONE)
1666                         break;
1667
1668                 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1669                 vcpu->arch.mmio_vmx_copy_nums--;
1670                 vcpu->arch.mmio_vmx_offset++;
1671         }
1672
1673         return emulated;
1674 }
1675
1676 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1677 {
1678         struct kvm_run *run = vcpu->run;
1679         enum emulation_result emulated = EMULATE_FAIL;
1680         int r;
1681
1682         vcpu->arch.paddr_accessed += run->mmio.len;
1683
1684         if (!vcpu->mmio_is_write) {
1685                 emulated = kvmppc_handle_vmx_load(vcpu,
1686                                 vcpu->arch.io_gpr, run->mmio.len, 1);
1687         } else {
1688                 emulated = kvmppc_handle_vmx_store(vcpu,
1689                                 vcpu->arch.io_gpr, run->mmio.len, 1);
1690         }
1691
1692         switch (emulated) {
1693         case EMULATE_DO_MMIO:
1694                 run->exit_reason = KVM_EXIT_MMIO;
1695                 r = RESUME_HOST;
1696                 break;
1697         case EMULATE_FAIL:
1698                 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1699                 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1700                 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1701                 r = RESUME_HOST;
1702                 break;
1703         default:
1704                 r = RESUME_GUEST;
1705                 break;
1706         }
1707         return r;
1708 }
1709 #endif /* CONFIG_ALTIVEC */
1710
1711 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1712 {
1713         int r = 0;
1714         union kvmppc_one_reg val;
1715         int size;
1716
1717         size = one_reg_size(reg->id);
1718         if (size > sizeof(val))
1719                 return -EINVAL;
1720
1721         r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1722         if (r == -EINVAL) {
1723                 r = 0;
1724                 switch (reg->id) {
1725 #ifdef CONFIG_ALTIVEC
1726                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1727                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1728                                 r = -ENXIO;
1729                                 break;
1730                         }
1731                         val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1732                         break;
1733                 case KVM_REG_PPC_VSCR:
1734                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1735                                 r = -ENXIO;
1736                                 break;
1737                         }
1738                         val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1739                         break;
1740                 case KVM_REG_PPC_VRSAVE:
1741                         val = get_reg_val(reg->id, vcpu->arch.vrsave);
1742                         break;
1743 #endif /* CONFIG_ALTIVEC */
1744                 default:
1745                         r = -EINVAL;
1746                         break;
1747                 }
1748         }
1749
1750         if (r)
1751                 return r;
1752
1753         if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1754                 r = -EFAULT;
1755
1756         return r;
1757 }
1758
1759 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1760 {
1761         int r;
1762         union kvmppc_one_reg val;
1763         int size;
1764
1765         size = one_reg_size(reg->id);
1766         if (size > sizeof(val))
1767                 return -EINVAL;
1768
1769         if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1770                 return -EFAULT;
1771
1772         r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1773         if (r == -EINVAL) {
1774                 r = 0;
1775                 switch (reg->id) {
1776 #ifdef CONFIG_ALTIVEC
1777                 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1778                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1779                                 r = -ENXIO;
1780                                 break;
1781                         }
1782                         vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1783                         break;
1784                 case KVM_REG_PPC_VSCR:
1785                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1786                                 r = -ENXIO;
1787                                 break;
1788                         }
1789                         vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1790                         break;
1791                 case KVM_REG_PPC_VRSAVE:
1792                         if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1793                                 r = -ENXIO;
1794                                 break;
1795                         }
1796                         vcpu->arch.vrsave = set_reg_val(reg->id, val);
1797                         break;
1798 #endif /* CONFIG_ALTIVEC */
1799                 default:
1800                         r = -EINVAL;
1801                         break;
1802                 }
1803         }
1804
1805         return r;
1806 }
1807
1808 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1809 {
1810         struct kvm_run *run = vcpu->run;
1811         int r;
1812
1813         vcpu_load(vcpu);
1814
1815         if (vcpu->mmio_needed) {
1816                 vcpu->mmio_needed = 0;
1817                 if (!vcpu->mmio_is_write)
1818                         kvmppc_complete_mmio_load(vcpu);
1819 #ifdef CONFIG_VSX
1820                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1821                         vcpu->arch.mmio_vsx_copy_nums--;
1822                         vcpu->arch.mmio_vsx_offset++;
1823                 }
1824
1825                 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1826                         r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1827                         if (r == RESUME_HOST) {
1828                                 vcpu->mmio_needed = 1;
1829                                 goto out;
1830                         }
1831                 }
1832 #endif
1833 #ifdef CONFIG_ALTIVEC
1834                 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1835                         vcpu->arch.mmio_vmx_copy_nums--;
1836                         vcpu->arch.mmio_vmx_offset++;
1837                 }
1838
1839                 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1840                         r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1841                         if (r == RESUME_HOST) {
1842                                 vcpu->mmio_needed = 1;
1843                                 goto out;
1844                         }
1845                 }
1846 #endif
1847         } else if (vcpu->arch.osi_needed) {
1848                 u64 *gprs = run->osi.gprs;
1849                 int i;
1850
1851                 for (i = 0; i < 32; i++)
1852                         kvmppc_set_gpr(vcpu, i, gprs[i]);
1853                 vcpu->arch.osi_needed = 0;
1854         } else if (vcpu->arch.hcall_needed) {
1855                 int i;
1856
1857                 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1858                 for (i = 0; i < 9; ++i)
1859                         kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1860                 vcpu->arch.hcall_needed = 0;
1861 #ifdef CONFIG_BOOKE
1862         } else if (vcpu->arch.epr_needed) {
1863                 kvmppc_set_epr(vcpu, run->epr.epr);
1864                 vcpu->arch.epr_needed = 0;
1865 #endif
1866         }
1867
1868         kvm_sigset_activate(vcpu);
1869
1870         if (run->immediate_exit)
1871                 r = -EINTR;
1872         else
1873                 r = kvmppc_vcpu_run(vcpu);
1874
1875         kvm_sigset_deactivate(vcpu);
1876
1877 #ifdef CONFIG_ALTIVEC
1878 out:
1879 #endif
1880
1881         /*
1882          * We're already returning to userspace, don't pass the
1883          * RESUME_HOST flags along.
1884          */
1885         if (r > 0)
1886                 r = 0;
1887
1888         vcpu_put(vcpu);
1889         return r;
1890 }
1891
1892 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1893 {
1894         if (irq->irq == KVM_INTERRUPT_UNSET) {
1895                 kvmppc_core_dequeue_external(vcpu);
1896                 return 0;
1897         }
1898
1899         kvmppc_core_queue_external(vcpu, irq);
1900
1901         kvm_vcpu_kick(vcpu);
1902
1903         return 0;
1904 }
1905
1906 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1907                                      struct kvm_enable_cap *cap)
1908 {
1909         int r;
1910
1911         if (cap->flags)
1912                 return -EINVAL;
1913
1914         switch (cap->cap) {
1915         case KVM_CAP_PPC_OSI:
1916                 r = 0;
1917                 vcpu->arch.osi_enabled = true;
1918                 break;
1919         case KVM_CAP_PPC_PAPR:
1920                 r = 0;
1921                 vcpu->arch.papr_enabled = true;
1922                 break;
1923         case KVM_CAP_PPC_EPR:
1924                 r = 0;
1925                 if (cap->args[0])
1926                         vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1927                 else
1928                         vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1929                 break;
1930 #ifdef CONFIG_BOOKE
1931         case KVM_CAP_PPC_BOOKE_WATCHDOG:
1932                 r = 0;
1933                 vcpu->arch.watchdog_enabled = true;
1934                 break;
1935 #endif
1936 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1937         case KVM_CAP_SW_TLB: {
1938                 struct kvm_config_tlb cfg;
1939                 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1940
1941                 r = -EFAULT;
1942                 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1943                         break;
1944
1945                 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1946                 break;
1947         }
1948 #endif
1949 #ifdef CONFIG_KVM_MPIC
1950         case KVM_CAP_IRQ_MPIC: {
1951                 struct fd f;
1952                 struct kvm_device *dev;
1953
1954                 r = -EBADF;
1955                 f = fdget(cap->args[0]);
1956                 if (!f.file)
1957                         break;
1958
1959                 r = -EPERM;
1960                 dev = kvm_device_from_filp(f.file);
1961                 if (dev)
1962                         r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1963
1964                 fdput(f);
1965                 break;
1966         }
1967 #endif
1968 #ifdef CONFIG_KVM_XICS
1969         case KVM_CAP_IRQ_XICS: {
1970                 struct fd f;
1971                 struct kvm_device *dev;
1972
1973                 r = -EBADF;
1974                 f = fdget(cap->args[0]);
1975                 if (!f.file)
1976                         break;
1977
1978                 r = -EPERM;
1979                 dev = kvm_device_from_filp(f.file);
1980                 if (dev) {
1981                         if (xics_on_xive())
1982                                 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1983                         else
1984                                 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1985                 }
1986
1987                 fdput(f);
1988                 break;
1989         }
1990 #endif /* CONFIG_KVM_XICS */
1991 #ifdef CONFIG_KVM_XIVE
1992         case KVM_CAP_PPC_IRQ_XIVE: {
1993                 struct fd f;
1994                 struct kvm_device *dev;
1995
1996                 r = -EBADF;
1997                 f = fdget(cap->args[0]);
1998                 if (!f.file)
1999                         break;
2000
2001                 r = -ENXIO;
2002                 if (!xive_enabled())
2003                         break;
2004
2005                 r = -EPERM;
2006                 dev = kvm_device_from_filp(f.file);
2007                 if (dev)
2008                         r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2009                                                             cap->args[1]);
2010
2011                 fdput(f);
2012                 break;
2013         }
2014 #endif /* CONFIG_KVM_XIVE */
2015 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2016         case KVM_CAP_PPC_FWNMI:
2017                 r = -EINVAL;
2018                 if (!is_kvmppc_hv_enabled(vcpu->kvm))
2019                         break;
2020                 r = 0;
2021                 vcpu->kvm->arch.fwnmi_enabled = true;
2022                 break;
2023 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2024         default:
2025                 r = -EINVAL;
2026                 break;
2027         }
2028
2029         if (!r)
2030                 r = kvmppc_sanity_check(vcpu);
2031
2032         return r;
2033 }
2034
2035 bool kvm_arch_intc_initialized(struct kvm *kvm)
2036 {
2037 #ifdef CONFIG_KVM_MPIC
2038         if (kvm->arch.mpic)
2039                 return true;
2040 #endif
2041 #ifdef CONFIG_KVM_XICS
2042         if (kvm->arch.xics || kvm->arch.xive)
2043                 return true;
2044 #endif
2045         return false;
2046 }
2047
2048 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2049                                     struct kvm_mp_state *mp_state)
2050 {
2051         return -EINVAL;
2052 }
2053
2054 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2055                                     struct kvm_mp_state *mp_state)
2056 {
2057         return -EINVAL;
2058 }
2059
2060 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2061                                unsigned int ioctl, unsigned long arg)
2062 {
2063         struct kvm_vcpu *vcpu = filp->private_data;
2064         void __user *argp = (void __user *)arg;
2065
2066         if (ioctl == KVM_INTERRUPT) {
2067                 struct kvm_interrupt irq;
2068                 if (copy_from_user(&irq, argp, sizeof(irq)))
2069                         return -EFAULT;
2070                 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2071         }
2072         return -ENOIOCTLCMD;
2073 }
2074
2075 long kvm_arch_vcpu_ioctl(struct file *filp,
2076                          unsigned int ioctl, unsigned long arg)
2077 {
2078         struct kvm_vcpu *vcpu = filp->private_data;
2079         void __user *argp = (void __user *)arg;
2080         long r;
2081
2082         switch (ioctl) {
2083         case KVM_ENABLE_CAP:
2084         {
2085                 struct kvm_enable_cap cap;
2086                 r = -EFAULT;
2087                 if (copy_from_user(&cap, argp, sizeof(cap)))
2088                         goto out;
2089                 vcpu_load(vcpu);
2090                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2091                 vcpu_put(vcpu);
2092                 break;
2093         }
2094
2095         case KVM_SET_ONE_REG:
2096         case KVM_GET_ONE_REG:
2097         {
2098                 struct kvm_one_reg reg;
2099                 r = -EFAULT;
2100                 if (copy_from_user(&reg, argp, sizeof(reg)))
2101                         goto out;
2102                 if (ioctl == KVM_SET_ONE_REG)
2103                         r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2104                 else
2105                         r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2106                 break;
2107         }
2108
2109 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2110         case KVM_DIRTY_TLB: {
2111                 struct kvm_dirty_tlb dirty;
2112                 r = -EFAULT;
2113                 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2114                         goto out;
2115                 vcpu_load(vcpu);
2116                 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2117                 vcpu_put(vcpu);
2118                 break;
2119         }
2120 #endif
2121         default:
2122                 r = -EINVAL;
2123         }
2124
2125 out:
2126         return r;
2127 }
2128
2129 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2130 {
2131         return VM_FAULT_SIGBUS;
2132 }
2133
2134 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2135 {
2136         u32 inst_nop = 0x60000000;
2137 #ifdef CONFIG_KVM_BOOKE_HV
2138         u32 inst_sc1 = 0x44000022;
2139         pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2140         pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2141         pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2142         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2143 #else
2144         u32 inst_lis = 0x3c000000;
2145         u32 inst_ori = 0x60000000;
2146         u32 inst_sc = 0x44000002;
2147         u32 inst_imm_mask = 0xffff;
2148
2149         /*
2150          * The hypercall to get into KVM from within guest context is as
2151          * follows:
2152          *
2153          *    lis r0, r0, KVM_SC_MAGIC_R0@h
2154          *    ori r0, KVM_SC_MAGIC_R0@l
2155          *    sc
2156          *    nop
2157          */
2158         pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2159         pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2160         pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2161         pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2162 #endif
2163
2164         pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2165
2166         return 0;
2167 }
2168
2169 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2170                           bool line_status)
2171 {
2172         if (!irqchip_in_kernel(kvm))
2173                 return -ENXIO;
2174
2175         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2176                                         irq_event->irq, irq_event->level,
2177                                         line_status);
2178         return 0;
2179 }
2180
2181
2182 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2183                             struct kvm_enable_cap *cap)
2184 {
2185         int r;
2186
2187         if (cap->flags)
2188                 return -EINVAL;
2189
2190         switch (cap->cap) {
2191 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2192         case KVM_CAP_PPC_ENABLE_HCALL: {
2193                 unsigned long hcall = cap->args[0];
2194
2195                 r = -EINVAL;
2196                 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2197                     cap->args[1] > 1)
2198                         break;
2199                 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2200                         break;
2201                 if (cap->args[1])
2202                         set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2203                 else
2204                         clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2205                 r = 0;
2206                 break;
2207         }
2208         case KVM_CAP_PPC_SMT: {
2209                 unsigned long mode = cap->args[0];
2210                 unsigned long flags = cap->args[1];
2211
2212                 r = -EINVAL;
2213                 if (kvm->arch.kvm_ops->set_smt_mode)
2214                         r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2215                 break;
2216         }
2217
2218         case KVM_CAP_PPC_NESTED_HV:
2219                 r = -EINVAL;
2220                 if (!is_kvmppc_hv_enabled(kvm) ||
2221                     !kvm->arch.kvm_ops->enable_nested)
2222                         break;
2223                 r = kvm->arch.kvm_ops->enable_nested(kvm);
2224                 break;
2225 #endif
2226 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2227         case KVM_CAP_PPC_SECURE_GUEST:
2228                 r = -EINVAL;
2229                 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2230                         break;
2231                 r = kvm->arch.kvm_ops->enable_svm(kvm);
2232                 break;
2233         case KVM_CAP_PPC_DAWR1:
2234                 r = -EINVAL;
2235                 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2236                         break;
2237                 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2238                 break;
2239 #endif
2240         default:
2241                 r = -EINVAL;
2242                 break;
2243         }
2244
2245         return r;
2246 }
2247
2248 #ifdef CONFIG_PPC_BOOK3S_64
2249 /*
2250  * These functions check whether the underlying hardware is safe
2251  * against attacks based on observing the effects of speculatively
2252  * executed instructions, and whether it supplies instructions for
2253  * use in workarounds.  The information comes from firmware, either
2254  * via the device tree on powernv platforms or from an hcall on
2255  * pseries platforms.
2256  */
2257 #ifdef CONFIG_PPC_PSERIES
2258 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2259 {
2260         struct h_cpu_char_result c;
2261         unsigned long rc;
2262
2263         if (!machine_is(pseries))
2264                 return -ENOTTY;
2265
2266         rc = plpar_get_cpu_characteristics(&c);
2267         if (rc == H_SUCCESS) {
2268                 cp->character = c.character;
2269                 cp->behaviour = c.behaviour;
2270                 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2271                         KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2272                         KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2273                         KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2274                         KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2275                         KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2276                         KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2277                         KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2278                         KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2279                 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2280                         KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2281                         KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2282                         KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2283         }
2284         return 0;
2285 }
2286 #else
2287 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2288 {
2289         return -ENOTTY;
2290 }
2291 #endif
2292
2293 static inline bool have_fw_feat(struct device_node *fw_features,
2294                                 const char *state, const char *name)
2295 {
2296         struct device_node *np;
2297         bool r = false;
2298
2299         np = of_get_child_by_name(fw_features, name);
2300         if (np) {
2301                 r = of_property_read_bool(np, state);
2302                 of_node_put(np);
2303         }
2304         return r;
2305 }
2306
2307 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2308 {
2309         struct device_node *np, *fw_features;
2310         int r;
2311
2312         memset(cp, 0, sizeof(*cp));
2313         r = pseries_get_cpu_char(cp);
2314         if (r != -ENOTTY)
2315                 return r;
2316
2317         np = of_find_node_by_name(NULL, "ibm,opal");
2318         if (np) {
2319                 fw_features = of_get_child_by_name(np, "fw-features");
2320                 of_node_put(np);
2321                 if (!fw_features)
2322                         return 0;
2323                 if (have_fw_feat(fw_features, "enabled",
2324                                  "inst-spec-barrier-ori31,31,0"))
2325                         cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2326                 if (have_fw_feat(fw_features, "enabled",
2327                                  "fw-bcctrl-serialized"))
2328                         cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2329                 if (have_fw_feat(fw_features, "enabled",
2330                                  "inst-l1d-flush-ori30,30,0"))
2331                         cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2332                 if (have_fw_feat(fw_features, "enabled",
2333                                  "inst-l1d-flush-trig2"))
2334                         cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2335                 if (have_fw_feat(fw_features, "enabled",
2336                                  "fw-l1d-thread-split"))
2337                         cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2338                 if (have_fw_feat(fw_features, "enabled",
2339                                  "fw-count-cache-disabled"))
2340                         cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2341                 if (have_fw_feat(fw_features, "enabled",
2342                                  "fw-count-cache-flush-bcctr2,0,0"))
2343                         cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2344                 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2345                         KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2346                         KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2347                         KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2348                         KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2349                         KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2350                         KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2351
2352                 if (have_fw_feat(fw_features, "enabled",
2353                                  "speculation-policy-favor-security"))
2354                         cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2355                 if (!have_fw_feat(fw_features, "disabled",
2356                                   "needs-l1d-flush-msr-pr-0-to-1"))
2357                         cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2358                 if (!have_fw_feat(fw_features, "disabled",
2359                                   "needs-spec-barrier-for-bound-checks"))
2360                         cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2361                 if (have_fw_feat(fw_features, "enabled",
2362                                  "needs-count-cache-flush-on-context-switch"))
2363                         cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2364                 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2365                         KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2366                         KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2367                         KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2368
2369                 of_node_put(fw_features);
2370         }
2371
2372         return 0;
2373 }
2374 #endif
2375
2376 long kvm_arch_vm_ioctl(struct file *filp,
2377                        unsigned int ioctl, unsigned long arg)
2378 {
2379         struct kvm *kvm __maybe_unused = filp->private_data;
2380         void __user *argp = (void __user *)arg;
2381         long r;
2382
2383         switch (ioctl) {
2384         case KVM_PPC_GET_PVINFO: {
2385                 struct kvm_ppc_pvinfo pvinfo;
2386                 memset(&pvinfo, 0, sizeof(pvinfo));
2387                 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2388                 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2389                         r = -EFAULT;
2390                         goto out;
2391                 }
2392
2393                 break;
2394         }
2395 #ifdef CONFIG_SPAPR_TCE_IOMMU
2396         case KVM_CREATE_SPAPR_TCE_64: {
2397                 struct kvm_create_spapr_tce_64 create_tce_64;
2398
2399                 r = -EFAULT;
2400                 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2401                         goto out;
2402                 if (create_tce_64.flags) {
2403                         r = -EINVAL;
2404                         goto out;
2405                 }
2406                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2407                 goto out;
2408         }
2409         case KVM_CREATE_SPAPR_TCE: {
2410                 struct kvm_create_spapr_tce create_tce;
2411                 struct kvm_create_spapr_tce_64 create_tce_64;
2412
2413                 r = -EFAULT;
2414                 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2415                         goto out;
2416
2417                 create_tce_64.liobn = create_tce.liobn;
2418                 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2419                 create_tce_64.offset = 0;
2420                 create_tce_64.size = create_tce.window_size >>
2421                                 IOMMU_PAGE_SHIFT_4K;
2422                 create_tce_64.flags = 0;
2423                 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2424                 goto out;
2425         }
2426 #endif
2427 #ifdef CONFIG_PPC_BOOK3S_64
2428         case KVM_PPC_GET_SMMU_INFO: {
2429                 struct kvm_ppc_smmu_info info;
2430                 struct kvm *kvm = filp->private_data;
2431
2432                 memset(&info, 0, sizeof(info));
2433                 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2434                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2435                         r = -EFAULT;
2436                 break;
2437         }
2438         case KVM_PPC_RTAS_DEFINE_TOKEN: {
2439                 struct kvm *kvm = filp->private_data;
2440
2441                 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2442                 break;
2443         }
2444         case KVM_PPC_CONFIGURE_V3_MMU: {
2445                 struct kvm *kvm = filp->private_data;
2446                 struct kvm_ppc_mmuv3_cfg cfg;
2447
2448                 r = -EINVAL;
2449                 if (!kvm->arch.kvm_ops->configure_mmu)
2450                         goto out;
2451                 r = -EFAULT;
2452                 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2453                         goto out;
2454                 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2455                 break;
2456         }
2457         case KVM_PPC_GET_RMMU_INFO: {
2458                 struct kvm *kvm = filp->private_data;
2459                 struct kvm_ppc_rmmu_info info;
2460
2461                 r = -EINVAL;
2462                 if (!kvm->arch.kvm_ops->get_rmmu_info)
2463                         goto out;
2464                 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2465                 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2466                         r = -EFAULT;
2467                 break;
2468         }
2469         case KVM_PPC_GET_CPU_CHAR: {
2470                 struct kvm_ppc_cpu_char cpuchar;
2471
2472                 r = kvmppc_get_cpu_char(&cpuchar);
2473                 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2474                         r = -EFAULT;
2475                 break;
2476         }
2477         case KVM_PPC_SVM_OFF: {
2478                 struct kvm *kvm = filp->private_data;
2479
2480                 r = 0;
2481                 if (!kvm->arch.kvm_ops->svm_off)
2482                         goto out;
2483
2484                 r = kvm->arch.kvm_ops->svm_off(kvm);
2485                 break;
2486         }
2487         default: {
2488                 struct kvm *kvm = filp->private_data;
2489                 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2490         }
2491 #else /* CONFIG_PPC_BOOK3S_64 */
2492         default:
2493                 r = -ENOTTY;
2494 #endif
2495         }
2496 out:
2497         return r;
2498 }
2499
2500 static DEFINE_IDA(lpid_inuse);
2501 static unsigned long nr_lpids;
2502
2503 long kvmppc_alloc_lpid(void)
2504 {
2505         int lpid;
2506
2507         /* The host LPID must always be 0 (allocation starts at 1) */
2508         lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2509         if (lpid < 0) {
2510                 if (lpid == -ENOMEM)
2511                         pr_err("%s: Out of memory\n", __func__);
2512                 else
2513                         pr_err("%s: No LPIDs free\n", __func__);
2514                 return -ENOMEM;
2515         }
2516
2517         return lpid;
2518 }
2519 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2520
2521 void kvmppc_free_lpid(long lpid)
2522 {
2523         ida_free(&lpid_inuse, lpid);
2524 }
2525 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2526
2527 /* nr_lpids_param includes the host LPID */
2528 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2529 {
2530         nr_lpids = nr_lpids_param;
2531 }
2532 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2533
2534 int kvm_arch_init(void *opaque)
2535 {
2536         return 0;
2537 }
2538
2539 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2540
2541 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2542 {
2543         if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2544                 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2545 }
2546
2547 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2548 {
2549         if (kvm->arch.kvm_ops->create_vm_debugfs)
2550                 kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2551         return 0;
2552 }