KVM: Fix leak vCPU's VMCS value into other pCPU
[linux-2.6-block.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
d94d71cb 1// SPDX-License-Identifier: GPL-2.0-only
bbf45ba5 2/*
bbf45ba5
HB
3 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/err.h>
12#include <linux/kvm_host.h>
bbf45ba5 13#include <linux/vmalloc.h>
544c6761 14#include <linux/hrtimer.h>
174cd4b1 15#include <linux/sched/signal.h>
bbf45ba5 16#include <linux/fs.h>
5a0e3ad6 17#include <linux/slab.h>
eb1e4f43 18#include <linux/file.h>
cbbc58d4 19#include <linux/module.h>
9576730d
SW
20#include <linux/irqbypass.h>
21#include <linux/kvm_irqfd.h>
bbf45ba5 22#include <asm/cputable.h>
7c0f6ba6 23#include <linux/uaccess.h>
bbf45ba5 24#include <asm/kvm_ppc.h>
371fefd6 25#include <asm/cputhreads.h>
bd2be683 26#include <asm/irqflags.h>
58ded420 27#include <asm/iommu.h>
6f63e81b 28#include <asm/switch_to.h>
5af50993 29#include <asm/xive.h>
3214d01f
PM
30#ifdef CONFIG_PPC_PSERIES
31#include <asm/hvcall.h>
32#include <asm/plpar_wrappers.h>
33#endif
5af50993 34
73e75b41 35#include "timing.h"
5efdb4be 36#include "irq.h"
fad7b9b5 37#include "../mm/mmu_decl.h"
bbf45ba5 38
46f43c6e
MT
39#define CREATE_TRACE_POINTS
40#include "trace.h"
41
cbbc58d4
AK
42struct kvmppc_ops *kvmppc_hv_ops;
43EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
44struct kvmppc_ops *kvmppc_pr_ops;
45EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
46
3a167bea 47
bbf45ba5
HB
48int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
49{
2fa6e1e1 50 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
bbf45ba5
HB
51}
52
17e433b5
WL
53bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
54{
55 return kvm_arch_vcpu_runnable(vcpu);
56}
57
199b5763
LM
58bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
59{
60 return false;
61}
62
b6d33834
CD
63int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64{
65 return 1;
66}
67
03d25c5b
AG
68/*
69 * Common checks before entering the guest world. Call with interrupts
70 * disabled.
71 *
7ee78855
AG
72 * returns:
73 *
74 * == 1 if we're ready to go into guest state
75 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
76 */
77int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
78{
6c85f52b
SW
79 int r;
80
81 WARN_ON(irqs_disabled());
82 hard_irq_disable();
03d25c5b 83
03d25c5b
AG
84 while (true) {
85 if (need_resched()) {
86 local_irq_enable();
87 cond_resched();
6c85f52b 88 hard_irq_disable();
03d25c5b
AG
89 continue;
90 }
91
92 if (signal_pending(current)) {
7ee78855
AG
93 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
94 vcpu->run->exit_reason = KVM_EXIT_INTR;
95 r = -EINTR;
03d25c5b
AG
96 break;
97 }
98
5bd1cf11
SW
99 vcpu->mode = IN_GUEST_MODE;
100
101 /*
102 * Reading vcpu->requests must happen after setting vcpu->mode,
103 * so we don't miss a request because the requester sees
104 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
105 * before next entering the guest (and thus doesn't IPI).
489153c7
LT
106 * This also orders the write to mode from any reads
107 * to the page tables done while the VCPU is running.
108 * Please see the comment in kvm_flush_remote_tlbs.
5bd1cf11 109 */
03d25c5b 110 smp_mb();
5bd1cf11 111
2fa6e1e1 112 if (kvm_request_pending(vcpu)) {
03d25c5b
AG
113 /* Make sure we process requests preemptable */
114 local_irq_enable();
115 trace_kvm_check_requests(vcpu);
7c973a2e 116 r = kvmppc_core_check_requests(vcpu);
6c85f52b 117 hard_irq_disable();
7c973a2e
AG
118 if (r > 0)
119 continue;
120 break;
03d25c5b
AG
121 }
122
123 if (kvmppc_core_prepare_to_enter(vcpu)) {
124 /* interrupts got enabled in between, so we
125 are back at square 1 */
126 continue;
127 }
128
6edaa530 129 guest_enter_irqoff();
6c85f52b 130 return 1;
03d25c5b
AG
131 }
132
6c85f52b
SW
133 /* return to host */
134 local_irq_enable();
03d25c5b
AG
135 return r;
136}
2ba9f0d8 137EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
03d25c5b 138
5deb8e7a
AG
139#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
140static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
141{
142 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
143 int i;
144
145 shared->sprg0 = swab64(shared->sprg0);
146 shared->sprg1 = swab64(shared->sprg1);
147 shared->sprg2 = swab64(shared->sprg2);
148 shared->sprg3 = swab64(shared->sprg3);
149 shared->srr0 = swab64(shared->srr0);
150 shared->srr1 = swab64(shared->srr1);
151 shared->dar = swab64(shared->dar);
152 shared->msr = swab64(shared->msr);
153 shared->dsisr = swab32(shared->dsisr);
154 shared->int_pending = swab32(shared->int_pending);
155 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
156 shared->sr[i] = swab32(shared->sr[i]);
157}
158#endif
159
2a342ed5
AG
160int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
161{
162 int nr = kvmppc_get_gpr(vcpu, 11);
163 int r;
164 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
165 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
166 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
167 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
168 unsigned long r2 = 0;
169
5deb8e7a 170 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
2a342ed5
AG
171 /* 32 bit mode */
172 param1 &= 0xffffffff;
173 param2 &= 0xffffffff;
174 param3 &= 0xffffffff;
175 param4 &= 0xffffffff;
176 }
177
178 switch (nr) {
fdcf8bd7 179 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407 180 {
5deb8e7a
AG
181#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
182 /* Book3S can be little endian, find it out here */
183 int shared_big_endian = true;
184 if (vcpu->arch.intr_msr & MSR_LE)
185 shared_big_endian = false;
186 if (shared_big_endian != vcpu->arch.shared_big_endian)
187 kvmppc_swab_shared(vcpu);
188 vcpu->arch.shared_big_endian = shared_big_endian;
189#endif
190
f3383cf8
AG
191 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
192 /*
193 * Older versions of the Linux magic page code had
194 * a bug where they would map their trampoline code
195 * NX. If that's the case, remove !PR NX capability.
196 */
197 vcpu->arch.disable_kernel_nx = true;
198 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
199 }
200
201 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
202 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
5fc87407 203
89b68c96
AG
204#ifdef CONFIG_PPC_64K_PAGES
205 /*
206 * Make sure our 4k magic page is in the same window of a 64k
207 * page within the guest and within the host's page.
208 */
209 if ((vcpu->arch.magic_page_pa & 0xf000) !=
210 ((ulong)vcpu->arch.shared & 0xf000)) {
211 void *old_shared = vcpu->arch.shared;
212 ulong shared = (ulong)vcpu->arch.shared;
213 void *new_shared;
214
215 shared &= PAGE_MASK;
216 shared |= vcpu->arch.magic_page_pa & 0xf000;
217 new_shared = (void*)shared;
218 memcpy(new_shared, old_shared, 0x1000);
219 vcpu->arch.shared = new_shared;
220 }
221#endif
222
b5904972 223 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 224
fdcf8bd7 225 r = EV_SUCCESS;
5fc87407
AG
226 break;
227 }
fdcf8bd7
SY
228 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
229 r = EV_SUCCESS;
bf7ca4bd 230#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
5fc87407
AG
231 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
232#endif
2a342ed5
AG
233
234 /* Second return value is in r4 */
2a342ed5 235 break;
9202e076
LYB
236 case EV_HCALL_TOKEN(EV_IDLE):
237 r = EV_SUCCESS;
238 kvm_vcpu_block(vcpu);
72875d8a 239 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
9202e076 240 break;
2a342ed5 241 default:
fdcf8bd7 242 r = EV_UNIMPLEMENTED;
2a342ed5
AG
243 break;
244 }
245
7508e16c
AG
246 kvmppc_set_gpr(vcpu, 4, r2);
247
2a342ed5
AG
248 return r;
249}
2ba9f0d8 250EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
bbf45ba5 251
af8f38b3
AG
252int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
253{
254 int r = false;
255
256 /* We have to know what CPU to virtualize */
257 if (!vcpu->arch.pvr)
258 goto out;
259
260 /* PAPR only works with book3s_64 */
261 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
262 goto out;
263
af8f38b3 264 /* HV KVM can only do PAPR mode for now */
a78b55d1 265 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
af8f38b3 266 goto out;
af8f38b3 267
d30f6e48
SW
268#ifdef CONFIG_KVM_BOOKE_HV
269 if (!cpu_has_feature(CPU_FTR_EMB_HV))
270 goto out;
271#endif
272
af8f38b3
AG
273 r = true;
274
275out:
276 vcpu->arch.sane = r;
277 return r ? 0 : -EINVAL;
278}
2ba9f0d8 279EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
af8f38b3 280
bbf45ba5
HB
281int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
282{
283 enum emulation_result er;
284 int r;
285
d69614a2 286 er = kvmppc_emulate_loadstore(vcpu);
bbf45ba5
HB
287 switch (er) {
288 case EMULATE_DONE:
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
291 r = RESUME_GUEST_NV;
292 break;
51f04726
MC
293 case EMULATE_AGAIN:
294 r = RESUME_GUEST;
295 break;
bbf45ba5
HB
296 case EMULATE_DO_MMIO:
297 run->exit_reason = KVM_EXIT_MMIO;
298 /* We must reload nonvolatiles because "update" load/store
299 * instructions modify register state. */
300 /* Future optimization: only reload non-volatiles if they were
301 * actually modified. */
302 r = RESUME_HOST_NV;
303 break;
304 case EMULATE_FAIL:
51f04726
MC
305 {
306 u32 last_inst;
307
8d0eff63 308 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
bbf45ba5 309 /* XXX Deliver Program interrupt to guest. */
51f04726 310 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
bbf45ba5
HB
311 r = RESUME_HOST;
312 break;
51f04726 313 }
bbf45ba5 314 default:
5a33169e
AG
315 WARN_ON(1);
316 r = RESUME_GUEST;
bbf45ba5
HB
317 }
318
319 return r;
320}
2ba9f0d8 321EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
bbf45ba5 322
35c4a733
AG
323int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
324 bool data)
325{
c12fb43c 326 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
35c4a733 327 struct kvmppc_pte pte;
cc6929cc 328 int r = -EINVAL;
35c4a733
AG
329
330 vcpu->stat.st++;
331
cc6929cc
SJS
332 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
333 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
334 size);
335
336 if ((!r) || (r == -EAGAIN))
337 return r;
338
35c4a733
AG
339 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
340 XLATE_WRITE, &pte);
341 if (r < 0)
342 return r;
343
344 *eaddr = pte.raddr;
345
346 if (!pte.may_write)
347 return -EPERM;
348
c12fb43c
AG
349 /* Magic page override */
350 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
351 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
352 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
353 void *magic = vcpu->arch.shared;
354 magic += pte.eaddr & 0xfff;
355 memcpy(magic, ptr, size);
356 return EMULATE_DONE;
357 }
358
35c4a733
AG
359 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
360 return EMULATE_DO_MMIO;
361
362 return EMULATE_DONE;
363}
364EXPORT_SYMBOL_GPL(kvmppc_st);
365
366int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
367 bool data)
368{
c12fb43c 369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
35c4a733 370 struct kvmppc_pte pte;
cc6929cc 371 int rc = -EINVAL;
35c4a733
AG
372
373 vcpu->stat.ld++;
374
cc6929cc
SJS
375 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
376 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
377 size);
378
379 if ((!rc) || (rc == -EAGAIN))
380 return rc;
381
35c4a733
AG
382 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
383 XLATE_READ, &pte);
384 if (rc)
385 return rc;
386
387 *eaddr = pte.raddr;
388
389 if (!pte.may_read)
390 return -EPERM;
391
392 if (!data && !pte.may_execute)
393 return -ENOEXEC;
394
c12fb43c
AG
395 /* Magic page override */
396 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
397 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
398 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
399 void *magic = vcpu->arch.shared;
400 magic += pte.eaddr & 0xfff;
401 memcpy(ptr, magic, size);
402 return EMULATE_DONE;
403 }
404
c45c5514
AG
405 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
406 return EMULATE_DO_MMIO;
35c4a733
AG
407
408 return EMULATE_DONE;
35c4a733
AG
409}
410EXPORT_SYMBOL_GPL(kvmppc_ld);
411
13a34e06 412int kvm_arch_hardware_enable(void)
bbf45ba5 413{
10474ae8 414 return 0;
bbf45ba5
HB
415}
416
bbf45ba5
HB
417int kvm_arch_hardware_setup(void)
418{
419 return 0;
420}
421
f257d6dc 422int kvm_arch_check_processor_compat(void)
bbf45ba5 423{
f257d6dc 424 return kvmppc_core_check_processor_compat();
bbf45ba5
HB
425}
426
e08b9637 427int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 428{
cbbc58d4
AK
429 struct kvmppc_ops *kvm_ops = NULL;
430 /*
431 * if we have both HV and PR enabled, default is HV
432 */
433 if (type == 0) {
434 if (kvmppc_hv_ops)
435 kvm_ops = kvmppc_hv_ops;
436 else
437 kvm_ops = kvmppc_pr_ops;
438 if (!kvm_ops)
439 goto err_out;
440 } else if (type == KVM_VM_PPC_HV) {
441 if (!kvmppc_hv_ops)
442 goto err_out;
443 kvm_ops = kvmppc_hv_ops;
444 } else if (type == KVM_VM_PPC_PR) {
445 if (!kvmppc_pr_ops)
446 goto err_out;
447 kvm_ops = kvmppc_pr_ops;
448 } else
449 goto err_out;
450
451 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
452 return -ENOENT;
453
454 kvm->arch.kvm_ops = kvm_ops;
f9e0554d 455 return kvmppc_core_init_vm(kvm);
cbbc58d4
AK
456err_out:
457 return -EINVAL;
bbf45ba5
HB
458}
459
235539b4
LC
460bool kvm_arch_has_vcpu_debugfs(void)
461{
462 return false;
463}
464
465int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
466{
467 return 0;
468}
469
d89f5eff 470void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
471{
472 unsigned int i;
988a2cae 473 struct kvm_vcpu *vcpu;
bbf45ba5 474
e17769eb
SW
475#ifdef CONFIG_KVM_XICS
476 /*
477 * We call kick_all_cpus_sync() to ensure that all
478 * CPUs have executed any pending IPIs before we
479 * continue and free VCPUs structures below.
480 */
481 if (is_kvmppc_hv_enabled(kvm))
482 kick_all_cpus_sync();
483#endif
484
988a2cae
GN
485 kvm_for_each_vcpu(i, vcpu, kvm)
486 kvm_arch_vcpu_free(vcpu);
487
488 mutex_lock(&kvm->lock);
489 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
490 kvm->vcpus[i] = NULL;
491
492 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
493
494 kvmppc_core_destroy_vm(kvm);
495
988a2cae 496 mutex_unlock(&kvm->lock);
cbbc58d4
AK
497
498 /* drop the module reference */
499 module_put(kvm->arch.kvm_ops->owner);
bbf45ba5
HB
500}
501
784aa3d7 502int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
bbf45ba5
HB
503{
504 int r;
7a58777a 505 /* Assume we're using HV mode when the HV module is loaded */
cbbc58d4 506 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
bbf45ba5 507
7a58777a
AG
508 if (kvm) {
509 /*
510 * Hooray - we know which VM type we're running on. Depend on
511 * that rather than the guess above.
512 */
513 hv_enabled = is_kvmppc_hv_enabled(kvm);
514 }
515
bbf45ba5 516 switch (ext) {
5ce941ee
SW
517#ifdef CONFIG_BOOKE
518 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 519 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 520 case KVM_CAP_PPC_EPR:
5ce941ee 521#else
e15a1137 522 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 523 case KVM_CAP_PPC_HIOR:
930b412a 524 case KVM_CAP_PPC_PAPR:
5ce941ee 525#endif
18978768 526 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 527 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 528 case KVM_CAP_ENABLE_CAP:
e24ed81f 529 case KVM_CAP_ONE_REG:
0e673fb6 530 case KVM_CAP_IOEVENTFD:
5df554ad 531 case KVM_CAP_DEVICE_CTRL:
460df4c1 532 case KVM_CAP_IMMEDIATE_EXIT:
de56a948
PM
533 r = 1;
534 break;
de56a948 535 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 536 case KVM_CAP_PPC_OSI:
15711e9c 537 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 538#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 539 case KVM_CAP_SW_TLB:
eb1e4f43 540#endif
699cc876 541 /* We support this only for PR */
cbbc58d4 542 r = !hv_enabled;
e15a1137 543 break;
699cc876
AK
544#ifdef CONFIG_KVM_MPIC
545 case KVM_CAP_IRQ_MPIC:
546 r = 1;
547 break;
548#endif
549
f31e65e1 550#ifdef CONFIG_PPC_BOOK3S_64
54738c09 551 case KVM_CAP_SPAPR_TCE:
58ded420 552 case KVM_CAP_SPAPR_TCE_64:
693ac10a
SJS
553 r = 1;
554 break;
121f80ba 555 case KVM_CAP_SPAPR_TCE_VFIO:
693ac10a
SJS
556 r = !!cpu_has_feature(CPU_FTR_HVMODE);
557 break;
8e591cb7 558 case KVM_CAP_PPC_RTAS:
f2e91042 559 case KVM_CAP_PPC_FIXUP_HCALL:
699a0ea0 560 case KVM_CAP_PPC_ENABLE_HCALL:
5975a2e0
PM
561#ifdef CONFIG_KVM_XICS
562 case KVM_CAP_IRQ_XICS:
563#endif
3214d01f 564 case KVM_CAP_PPC_GET_CPU_CHAR:
54738c09
DG
565 r = 1;
566 break;
eacc56bb
CLG
567#ifdef CONFIG_KVM_XIVE
568 case KVM_CAP_PPC_IRQ_XIVE:
569 /*
3fab2d10
CLG
570 * We need XIVE to be enabled on the platform (implies
571 * a POWER9 processor) and the PowerNV platform, as
572 * nested is not yet supported.
eacc56bb 573 */
3fab2d10 574 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE);
eacc56bb
CLG
575 break;
576#endif
a8acaece
DG
577
578 case KVM_CAP_PPC_ALLOC_HTAB:
579 r = hv_enabled;
580 break;
f31e65e1 581#endif /* CONFIG_PPC_BOOK3S_64 */
699cc876 582#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371fefd6 583 case KVM_CAP_PPC_SMT:
45c940ba 584 r = 0;
57900694
PM
585 if (kvm) {
586 if (kvm->arch.emul_smt_mode > 1)
587 r = kvm->arch.emul_smt_mode;
588 else
589 r = kvm->arch.smt_mode;
590 } else if (hv_enabled) {
45c940ba
PM
591 if (cpu_has_feature(CPU_FTR_ARCH_300))
592 r = 1;
593 else
594 r = threads_per_subcore;
595 }
371fefd6 596 break;
2ed4f9dd
PM
597 case KVM_CAP_PPC_SMT_POSSIBLE:
598 r = 1;
599 if (hv_enabled) {
600 if (!cpu_has_feature(CPU_FTR_ARCH_300))
601 r = ((threads_per_subcore << 1) - 1);
602 else
603 /* P9 can emulate dbells, so allow any mode */
604 r = 8 | 4 | 2 | 1;
605 }
606 break;
aa04b4cc 607 case KVM_CAP_PPC_RMA:
c17b98cf 608 r = 0;
aa04b4cc 609 break;
e928e9cb
ME
610 case KVM_CAP_PPC_HWRNG:
611 r = kvmppc_hwrng_present();
612 break;
c9270132 613 case KVM_CAP_PPC_MMU_RADIX:
8cf4ecc0 614 r = !!(hv_enabled && radix_enabled());
c9270132
PM
615 break;
616 case KVM_CAP_PPC_MMU_HASH_V3:
de760db4
PM
617 r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
618 cpu_has_feature(CPU_FTR_HVMODE));
c9270132 619 break;
aa069a99
PM
620 case KVM_CAP_PPC_NESTED_HV:
621 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
622 !kvmppc_hv_ops->enable_nested(NULL));
623 break;
f4800b1f 624#endif
342d3db7 625 case KVM_CAP_SYNC_MMU:
699cc876 626#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
c17b98cf 627 r = hv_enabled;
f4800b1f
AG
628#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
629 r = 1;
630#else
631 r = 0;
a2932923 632#endif
699cc876
AK
633 break;
634#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a2932923 635 case KVM_CAP_PPC_HTAB_FD:
cbbc58d4 636 r = hv_enabled;
a2932923 637 break;
de56a948 638#endif
b5434032
ME
639 case KVM_CAP_NR_VCPUS:
640 /*
641 * Recommending a number of CPUs is somewhat arbitrary; we
642 * return the number of present CPUs for -HV (since a host
643 * will have secondary threads "offline"), and for other KVM
644 * implementations just count online CPUs.
645 */
cbbc58d4 646 if (hv_enabled)
699cc876
AK
647 r = num_present_cpus();
648 else
649 r = num_online_cpus();
b5434032
ME
650 break;
651 case KVM_CAP_MAX_VCPUS:
652 r = KVM_MAX_VCPUS;
653 break;
a86cb413
TH
654 case KVM_CAP_MAX_VCPU_ID:
655 r = KVM_MAX_VCPU_ID;
656 break;
5b74716e
BH
657#ifdef CONFIG_PPC_BOOK3S_64
658 case KVM_CAP_PPC_GET_SMMU_INFO:
659 r = 1;
660 break;
d3695aa4
AK
661 case KVM_CAP_SPAPR_MULTITCE:
662 r = 1;
663 break;
050f2339 664 case KVM_CAP_SPAPR_RESIZE_HPT:
790a9df5 665 r = !!hv_enabled;
050f2339 666 break;
134764ed
AP
667#endif
668#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
669 case KVM_CAP_PPC_FWNMI:
670 r = hv_enabled;
671 break;
5b74716e 672#endif
4bb3c7a0 673#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
23528bb2 674 case KVM_CAP_PPC_HTM:
d234d68e
SG
675 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
676 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
23528bb2 677 break;
4bb3c7a0 678#endif
bbf45ba5
HB
679 default:
680 r = 0;
681 break;
682 }
683 return r;
684
685}
686
687long kvm_arch_dev_ioctl(struct file *filp,
688 unsigned int ioctl, unsigned long arg)
689{
690 return -EINVAL;
691}
692
5587027c 693void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
694 struct kvm_memory_slot *dont)
695{
5587027c 696 kvmppc_core_free_memslot(kvm, free, dont);
db3fe4eb
TY
697}
698
5587027c
AK
699int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
700 unsigned long npages)
db3fe4eb 701{
5587027c 702 return kvmppc_core_create_memslot(kvm, slot, npages);
db3fe4eb
TY
703}
704
f7784b8e 705int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 706 struct kvm_memory_slot *memslot,
09170a49 707 const struct kvm_userspace_memory_region *mem,
7b6195a9 708 enum kvm_mr_change change)
bbf45ba5 709{
a66b48c3 710 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
711}
712
f7784b8e 713void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 714 const struct kvm_userspace_memory_region *mem,
8482644a 715 const struct kvm_memory_slot *old,
f36f3f28 716 const struct kvm_memory_slot *new,
8482644a 717 enum kvm_mr_change change)
f7784b8e 718{
f032b734 719 kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
f7784b8e
MT
720}
721
2df72e9b
MT
722void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
723 struct kvm_memory_slot *slot)
34d4cb8f 724{
dfe49dbd 725 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
726}
727
bbf45ba5
HB
728struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
729{
73e75b41
HB
730 struct kvm_vcpu *vcpu;
731 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
732 if (!IS_ERR(vcpu)) {
733 vcpu->arch.wqp = &vcpu->wq;
06056bfb 734 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 735 }
73e75b41 736 return vcpu;
bbf45ba5
HB
737}
738
31928aa5 739void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 740{
42897d86
MT
741}
742
bbf45ba5
HB
743void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
744{
a595405d
AG
745 /* Make sure we're not using the vcpu anymore */
746 hrtimer_cancel(&vcpu->arch.dec_timer);
a595405d 747
73e75b41 748 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
749
750 switch (vcpu->arch.irq_type) {
751 case KVMPPC_IRQ_MPIC:
752 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
753 break;
bc5ad3f3 754 case KVMPPC_IRQ_XICS:
03f95332 755 if (xics_on_xive())
5af50993
BH
756 kvmppc_xive_cleanup_vcpu(vcpu);
757 else
758 kvmppc_xics_free_icp(vcpu);
bc5ad3f3 759 break;
eacc56bb
CLG
760 case KVMPPC_IRQ_XIVE:
761 kvmppc_xive_native_cleanup_vcpu(vcpu);
762 break;
eb1e4f43
SW
763 }
764
db93f574 765 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
766}
767
768void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
769{
770 kvm_arch_vcpu_free(vcpu);
771}
772
773int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
774{
9dd921cf 775 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
776}
777
5358a963 778static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
544c6761
AG
779{
780 struct kvm_vcpu *vcpu;
781
782 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
d02d4d15 783 kvmppc_decrementer_func(vcpu);
544c6761
AG
784
785 return HRTIMER_NORESTART;
786}
787
bbf45ba5
HB
788int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
789{
f61c94bb
BB
790 int ret;
791
544c6761 792 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
544c6761 793 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
5855564c 794 vcpu->arch.dec_expires = get_tb();
bbf45ba5 795
09000adb
BB
796#ifdef CONFIG_KVM_EXIT_TIMING
797 mutex_init(&vcpu->arch.exit_timing_lock);
798#endif
f61c94bb
BB
799 ret = kvmppc_subarch_vcpu_init(vcpu);
800 return ret;
bbf45ba5
HB
801}
802
803void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
804{
ecc0981f 805 kvmppc_mmu_destroy(vcpu);
f61c94bb 806 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
807}
808
809void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
810{
eab17672
SW
811#ifdef CONFIG_BOOKE
812 /*
813 * vrsave (formerly usprg0) isn't used by Linux, but may
814 * be used by the guest.
815 *
816 * On non-booke this is associated with Altivec and
817 * is handled by code in book3s.c.
818 */
819 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
820#endif
9dd921cf 821 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
822}
823
824void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
825{
9dd921cf 826 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
827#ifdef CONFIG_BOOKE
828 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
829#endif
bbf45ba5
HB
830}
831
9576730d
SW
832/*
833 * irq_bypass_add_producer and irq_bypass_del_producer are only
834 * useful if the architecture supports PCI passthrough.
835 * irq_bypass_stop and irq_bypass_start are not needed and so
836 * kvm_ops are not defined for them.
837 */
838bool kvm_arch_has_irq_bypass(void)
839{
840 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
841 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
842}
843
844int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
845 struct irq_bypass_producer *prod)
846{
847 struct kvm_kernel_irqfd *irqfd =
848 container_of(cons, struct kvm_kernel_irqfd, consumer);
849 struct kvm *kvm = irqfd->kvm;
850
851 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
852 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
853
854 return 0;
855}
856
857void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
858 struct irq_bypass_producer *prod)
859{
860 struct kvm_kernel_irqfd *irqfd =
861 container_of(cons, struct kvm_kernel_irqfd, consumer);
862 struct kvm *kvm = irqfd->kvm;
863
864 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
865 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
866}
867
6f63e81b
BL
868#ifdef CONFIG_VSX
869static inline int kvmppc_get_vsr_dword_offset(int index)
870{
871 int offset;
872
873 if ((index != 0) && (index != 1))
874 return -1;
875
876#ifdef __BIG_ENDIAN
877 offset = index;
878#else
879 offset = 1 - index;
880#endif
881
882 return offset;
883}
884
885static inline int kvmppc_get_vsr_word_offset(int index)
886{
887 int offset;
888
889 if ((index > 3) || (index < 0))
890 return -1;
891
892#ifdef __BIG_ENDIAN
893 offset = index;
894#else
895 offset = 3 - index;
896#endif
897 return offset;
898}
899
900static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
901 u64 gpr)
902{
903 union kvmppc_one_reg val;
904 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
905 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
906
907 if (offset == -1)
908 return;
909
4eeb8556
SG
910 if (index >= 32) {
911 val.vval = VCPU_VSX_VR(vcpu, index - 32);
6f63e81b 912 val.vsxval[offset] = gpr;
4eeb8556 913 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
6f63e81b
BL
914 } else {
915 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
916 }
917}
918
919static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
920 u64 gpr)
921{
922 union kvmppc_one_reg val;
923 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
924
4eeb8556
SG
925 if (index >= 32) {
926 val.vval = VCPU_VSX_VR(vcpu, index - 32);
6f63e81b
BL
927 val.vsxval[0] = gpr;
928 val.vsxval[1] = gpr;
4eeb8556 929 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
6f63e81b
BL
930 } else {
931 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
932 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
933 }
934}
935
94dd7fa1
SG
936static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
937 u32 gpr)
938{
939 union kvmppc_one_reg val;
940 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
941
4eeb8556 942 if (index >= 32) {
94dd7fa1
SG
943 val.vsx32val[0] = gpr;
944 val.vsx32val[1] = gpr;
945 val.vsx32val[2] = gpr;
946 val.vsx32val[3] = gpr;
4eeb8556 947 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
94dd7fa1
SG
948 } else {
949 val.vsx32val[0] = gpr;
950 val.vsx32val[1] = gpr;
951 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
952 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
953 }
954}
955
6f63e81b
BL
956static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
957 u32 gpr32)
958{
959 union kvmppc_one_reg val;
960 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
961 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
962 int dword_offset, word_offset;
963
964 if (offset == -1)
965 return;
966
4eeb8556
SG
967 if (index >= 32) {
968 val.vval = VCPU_VSX_VR(vcpu, index - 32);
6f63e81b 969 val.vsx32val[offset] = gpr32;
4eeb8556 970 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
6f63e81b
BL
971 } else {
972 dword_offset = offset / 2;
973 word_offset = offset % 2;
974 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
975 val.vsx32val[word_offset] = gpr32;
976 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
977 }
978}
979#endif /* CONFIG_VSX */
980
09f98496 981#ifdef CONFIG_ALTIVEC
acc9eb93
SG
982static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
983 int index, int element_size)
984{
985 int offset;
986 int elts = sizeof(vector128)/element_size;
987
988 if ((index < 0) || (index >= elts))
989 return -1;
990
991 if (kvmppc_need_byteswap(vcpu))
992 offset = elts - index - 1;
993 else
994 offset = index;
995
996 return offset;
997}
998
999static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1000 int index)
1001{
1002 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1003}
1004
1005static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1006 int index)
1007{
1008 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1009}
1010
1011static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1012 int index)
1013{
1014 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1015}
1016
1017static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1018 int index)
1019{
1020 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1021}
1022
1023
09f98496 1024static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
acc9eb93 1025 u64 gpr)
09f98496 1026{
acc9eb93
SG
1027 union kvmppc_one_reg val;
1028 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1029 vcpu->arch.mmio_vmx_offset);
09f98496 1030 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
09f98496 1031
acc9eb93
SG
1032 if (offset == -1)
1033 return;
1034
1035 val.vval = VCPU_VSX_VR(vcpu, index);
1036 val.vsxval[offset] = gpr;
1037 VCPU_VSX_VR(vcpu, index) = val.vval;
1038}
1039
1040static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1041 u32 gpr32)
1042{
1043 union kvmppc_one_reg val;
1044 int offset = kvmppc_get_vmx_word_offset(vcpu,
1045 vcpu->arch.mmio_vmx_offset);
1046 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
09f98496 1047
acc9eb93 1048 if (offset == -1)
09f98496
JRZ
1049 return;
1050
acc9eb93
SG
1051 val.vval = VCPU_VSX_VR(vcpu, index);
1052 val.vsx32val[offset] = gpr32;
1053 VCPU_VSX_VR(vcpu, index) = val.vval;
1054}
09f98496 1055
acc9eb93
SG
1056static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1057 u16 gpr16)
1058{
1059 union kvmppc_one_reg val;
1060 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1061 vcpu->arch.mmio_vmx_offset);
1062 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1063
1064 if (offset == -1)
09f98496
JRZ
1065 return;
1066
acc9eb93
SG
1067 val.vval = VCPU_VSX_VR(vcpu, index);
1068 val.vsx16val[offset] = gpr16;
1069 VCPU_VSX_VR(vcpu, index) = val.vval;
1070}
1071
1072static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1073 u8 gpr8)
1074{
1075 union kvmppc_one_reg val;
1076 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1077 vcpu->arch.mmio_vmx_offset);
1078 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
09f98496 1079
acc9eb93
SG
1080 if (offset == -1)
1081 return;
09f98496 1082
acc9eb93
SG
1083 val.vval = VCPU_VSX_VR(vcpu, index);
1084 val.vsx8val[offset] = gpr8;
1085 VCPU_VSX_VR(vcpu, index) = val.vval;
09f98496
JRZ
1086}
1087#endif /* CONFIG_ALTIVEC */
1088
6f63e81b
BL
1089#ifdef CONFIG_PPC_FPU
1090static inline u64 sp_to_dp(u32 fprs)
1091{
1092 u64 fprd;
1093
1094 preempt_disable();
1095 enable_kernel_fp();
1096 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
1097 : "fr0");
1098 preempt_enable();
1099 return fprd;
1100}
1101
1102static inline u32 dp_to_sp(u64 fprd)
1103{
1104 u32 fprs;
1105
1106 preempt_disable();
1107 enable_kernel_fp();
1108 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
1109 : "fr0");
1110 preempt_enable();
1111 return fprs;
1112}
1113
1114#else
1115#define sp_to_dp(x) (x)
1116#define dp_to_sp(x) (x)
1117#endif /* CONFIG_PPC_FPU */
1118
bbf45ba5
HB
1119static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
1120 struct kvm_run *run)
1121{
69b61833 1122 u64 uninitialized_var(gpr);
bbf45ba5 1123
8e5b26b5 1124 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
1125 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
1126 return;
1127 }
1128
d078eed3 1129 if (!vcpu->arch.mmio_host_swabbed) {
bbf45ba5 1130 switch (run->mmio.len) {
b104d066 1131 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
1132 case 4: gpr = *(u32 *)run->mmio.data; break;
1133 case 2: gpr = *(u16 *)run->mmio.data; break;
1134 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
1135 }
1136 } else {
bbf45ba5 1137 switch (run->mmio.len) {
d078eed3
DG
1138 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1139 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1140 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
8e5b26b5 1141 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
1142 }
1143 }
8e5b26b5 1144
6f63e81b
BL
1145 /* conversion between single and double precision */
1146 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1147 gpr = sp_to_dp(gpr);
1148
3587d534
AG
1149 if (vcpu->arch.mmio_sign_extend) {
1150 switch (run->mmio.len) {
1151#ifdef CONFIG_PPC64
1152 case 4:
1153 gpr = (s64)(s32)gpr;
1154 break;
1155#endif
1156 case 2:
1157 gpr = (s64)(s16)gpr;
1158 break;
1159 case 1:
1160 gpr = (s64)(s8)gpr;
1161 break;
1162 }
1163 }
1164
b3c5d3c2
AG
1165 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1166 case KVM_MMIO_REG_GPR:
b104d066
AG
1167 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1168 break;
b3c5d3c2 1169 case KVM_MMIO_REG_FPR:
2e6baa46
SG
1170 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1171 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1172
efff1912 1173 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b104d066 1174 break;
287d5611 1175#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
1176 case KVM_MMIO_REG_QPR:
1177 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 1178 break;
b3c5d3c2 1179 case KVM_MMIO_REG_FQPR:
efff1912 1180 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b3c5d3c2 1181 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 1182 break;
6f63e81b
BL
1183#endif
1184#ifdef CONFIG_VSX
1185 case KVM_MMIO_REG_VSX:
2e6baa46
SG
1186 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1187 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1188
da2a32b8 1189 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
6f63e81b 1190 kvmppc_set_vsr_dword(vcpu, gpr);
da2a32b8 1191 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
6f63e81b 1192 kvmppc_set_vsr_word(vcpu, gpr);
da2a32b8 1193 else if (vcpu->arch.mmio_copy_type ==
6f63e81b
BL
1194 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1195 kvmppc_set_vsr_dword_dump(vcpu, gpr);
da2a32b8 1196 else if (vcpu->arch.mmio_copy_type ==
94dd7fa1
SG
1197 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1198 kvmppc_set_vsr_word_dump(vcpu, gpr);
6f63e81b 1199 break;
09f98496
JRZ
1200#endif
1201#ifdef CONFIG_ALTIVEC
1202 case KVM_MMIO_REG_VMX:
2e6baa46
SG
1203 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1204 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1205
acc9eb93
SG
1206 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1207 kvmppc_set_vmx_dword(vcpu, gpr);
1208 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1209 kvmppc_set_vmx_word(vcpu, gpr);
1210 else if (vcpu->arch.mmio_copy_type ==
1211 KVMPPC_VMX_COPY_HWORD)
1212 kvmppc_set_vmx_hword(vcpu, gpr);
1213 else if (vcpu->arch.mmio_copy_type ==
1214 KVMPPC_VMX_COPY_BYTE)
1215 kvmppc_set_vmx_byte(vcpu, gpr);
09f98496 1216 break;
873db2cd
SJS
1217#endif
1218#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1219 case KVM_MMIO_REG_NESTED_GPR:
1220 if (kvmppc_need_byteswap(vcpu))
1221 gpr = swab64(gpr);
1222 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1223 sizeof(gpr));
1224 break;
287d5611 1225#endif
b104d066
AG
1226 default:
1227 BUG();
1228 }
bbf45ba5
HB
1229}
1230
eb8b0560
PM
1231static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1232 unsigned int rt, unsigned int bytes,
1233 int is_default_endian, int sign_extend)
bbf45ba5 1234{
ed840ee9 1235 int idx, ret;
d078eed3 1236 bool host_swabbed;
73601775 1237
d078eed3 1238 /* Pity C doesn't have a logical XOR operator */
73601775 1239 if (kvmppc_need_byteswap(vcpu)) {
d078eed3 1240 host_swabbed = is_default_endian;
73601775 1241 } else {
d078eed3 1242 host_swabbed = !is_default_endian;
73601775 1243 }
ed840ee9 1244
bbf45ba5
HB
1245 if (bytes > sizeof(run->mmio.data)) {
1246 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1247 run->mmio.len);
1248 }
1249
1250 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1251 run->mmio.len = bytes;
1252 run->mmio.is_write = 0;
1253
1254 vcpu->arch.io_gpr = rt;
d078eed3 1255 vcpu->arch.mmio_host_swabbed = host_swabbed;
bbf45ba5
HB
1256 vcpu->mmio_needed = 1;
1257 vcpu->mmio_is_write = 0;
eb8b0560 1258 vcpu->arch.mmio_sign_extend = sign_extend;
bbf45ba5 1259
ed840ee9
SW
1260 idx = srcu_read_lock(&vcpu->kvm->srcu);
1261
e32edf4f 1262 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
ed840ee9
SW
1263 bytes, &run->mmio.data);
1264
1265 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1266
1267 if (!ret) {
0e673fb6
AG
1268 kvmppc_complete_mmio_load(vcpu, run);
1269 vcpu->mmio_needed = 0;
1270 return EMULATE_DONE;
1271 }
1272
bbf45ba5
HB
1273 return EMULATE_DO_MMIO;
1274}
eb8b0560
PM
1275
1276int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1277 unsigned int rt, unsigned int bytes,
1278 int is_default_endian)
1279{
1280 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
1281}
2ba9f0d8 1282EXPORT_SYMBOL_GPL(kvmppc_handle_load);
bbf45ba5 1283
3587d534
AG
1284/* Same as above, but sign extends */
1285int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
1286 unsigned int rt, unsigned int bytes,
1287 int is_default_endian)
3587d534 1288{
eb8b0560 1289 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
3587d534
AG
1290}
1291
6f63e81b
BL
1292#ifdef CONFIG_VSX
1293int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1294 unsigned int rt, unsigned int bytes,
1295 int is_default_endian, int mmio_sign_extend)
1296{
1297 enum emulation_result emulated = EMULATE_DONE;
1298
9aa6825b
PM
1299 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1300 if (vcpu->arch.mmio_vsx_copy_nums > 4)
6f63e81b 1301 return EMULATE_FAIL;
6f63e81b
BL
1302
1303 while (vcpu->arch.mmio_vsx_copy_nums) {
1304 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
1305 is_default_endian, mmio_sign_extend);
1306
1307 if (emulated != EMULATE_DONE)
1308 break;
1309
1310 vcpu->arch.paddr_accessed += run->mmio.len;
1311
1312 vcpu->arch.mmio_vsx_copy_nums--;
1313 vcpu->arch.mmio_vsx_offset++;
1314 }
1315 return emulated;
1316}
1317#endif /* CONFIG_VSX */
1318
bbf45ba5 1319int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775 1320 u64 val, unsigned int bytes, int is_default_endian)
bbf45ba5
HB
1321{
1322 void *data = run->mmio.data;
ed840ee9 1323 int idx, ret;
d078eed3 1324 bool host_swabbed;
73601775 1325
d078eed3 1326 /* Pity C doesn't have a logical XOR operator */
73601775 1327 if (kvmppc_need_byteswap(vcpu)) {
d078eed3 1328 host_swabbed = is_default_endian;
73601775 1329 } else {
d078eed3 1330 host_swabbed = !is_default_endian;
73601775 1331 }
bbf45ba5
HB
1332
1333 if (bytes > sizeof(run->mmio.data)) {
1334 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
1335 run->mmio.len);
1336 }
1337
1338 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1339 run->mmio.len = bytes;
1340 run->mmio.is_write = 1;
1341 vcpu->mmio_needed = 1;
1342 vcpu->mmio_is_write = 1;
1343
6f63e81b
BL
1344 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1345 val = dp_to_sp(val);
1346
bbf45ba5 1347 /* Store the value at the lowest bytes in 'data'. */
d078eed3 1348 if (!host_swabbed) {
bbf45ba5 1349 switch (bytes) {
b104d066 1350 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
1351 case 4: *(u32 *)data = val; break;
1352 case 2: *(u16 *)data = val; break;
1353 case 1: *(u8 *)data = val; break;
1354 }
1355 } else {
bbf45ba5 1356 switch (bytes) {
d078eed3
DG
1357 case 8: *(u64 *)data = swab64(val); break;
1358 case 4: *(u32 *)data = swab32(val); break;
1359 case 2: *(u16 *)data = swab16(val); break;
1360 case 1: *(u8 *)data = val; break;
bbf45ba5
HB
1361 }
1362 }
1363
ed840ee9
SW
1364 idx = srcu_read_lock(&vcpu->kvm->srcu);
1365
e32edf4f 1366 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
ed840ee9
SW
1367 bytes, &run->mmio.data);
1368
1369 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1370
1371 if (!ret) {
0e673fb6
AG
1372 vcpu->mmio_needed = 0;
1373 return EMULATE_DONE;
1374 }
1375
bbf45ba5
HB
1376 return EMULATE_DO_MMIO;
1377}
2ba9f0d8 1378EXPORT_SYMBOL_GPL(kvmppc_handle_store);
bbf45ba5 1379
6f63e81b
BL
1380#ifdef CONFIG_VSX
1381static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1382{
1383 u32 dword_offset, word_offset;
1384 union kvmppc_one_reg reg;
1385 int vsx_offset = 0;
da2a32b8 1386 int copy_type = vcpu->arch.mmio_copy_type;
6f63e81b
BL
1387 int result = 0;
1388
1389 switch (copy_type) {
1390 case KVMPPC_VSX_COPY_DWORD:
1391 vsx_offset =
1392 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1393
1394 if (vsx_offset == -1) {
1395 result = -1;
1396 break;
1397 }
1398
4eeb8556 1399 if (rs < 32) {
6f63e81b
BL
1400 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1401 } else {
4eeb8556 1402 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
6f63e81b
BL
1403 *val = reg.vsxval[vsx_offset];
1404 }
1405 break;
1406
1407 case KVMPPC_VSX_COPY_WORD:
1408 vsx_offset =
1409 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1410
1411 if (vsx_offset == -1) {
1412 result = -1;
1413 break;
1414 }
1415
4eeb8556 1416 if (rs < 32) {
6f63e81b
BL
1417 dword_offset = vsx_offset / 2;
1418 word_offset = vsx_offset % 2;
1419 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1420 *val = reg.vsx32val[word_offset];
1421 } else {
4eeb8556 1422 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
6f63e81b
BL
1423 *val = reg.vsx32val[vsx_offset];
1424 }
1425 break;
1426
1427 default:
1428 result = -1;
1429 break;
1430 }
1431
1432 return result;
1433}
1434
1435int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1436 int rs, unsigned int bytes, int is_default_endian)
1437{
1438 u64 val;
1439 enum emulation_result emulated = EMULATE_DONE;
1440
1441 vcpu->arch.io_gpr = rs;
1442
9aa6825b
PM
1443 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1444 if (vcpu->arch.mmio_vsx_copy_nums > 4)
6f63e81b 1445 return EMULATE_FAIL;
6f63e81b
BL
1446
1447 while (vcpu->arch.mmio_vsx_copy_nums) {
1448 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1449 return EMULATE_FAIL;
1450
1451 emulated = kvmppc_handle_store(run, vcpu,
1452 val, bytes, is_default_endian);
1453
1454 if (emulated != EMULATE_DONE)
1455 break;
1456
1457 vcpu->arch.paddr_accessed += run->mmio.len;
1458
1459 vcpu->arch.mmio_vsx_copy_nums--;
1460 vcpu->arch.mmio_vsx_offset++;
1461 }
1462
1463 return emulated;
1464}
1465
1466static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
1467 struct kvm_run *run)
1468{
1469 enum emulation_result emulated = EMULATE_FAIL;
1470 int r;
1471
1472 vcpu->arch.paddr_accessed += run->mmio.len;
1473
1474 if (!vcpu->mmio_is_write) {
1475 emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
1476 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1477 } else {
1478 emulated = kvmppc_handle_vsx_store(run, vcpu,
1479 vcpu->arch.io_gpr, run->mmio.len, 1);
1480 }
1481
1482 switch (emulated) {
1483 case EMULATE_DO_MMIO:
1484 run->exit_reason = KVM_EXIT_MMIO;
1485 r = RESUME_HOST;
1486 break;
1487 case EMULATE_FAIL:
1488 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1489 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1490 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1491 r = RESUME_HOST;
1492 break;
1493 default:
1494 r = RESUME_GUEST;
1495 break;
1496 }
1497 return r;
1498}
1499#endif /* CONFIG_VSX */
1500
09f98496 1501#ifdef CONFIG_ALTIVEC
acc9eb93
SG
1502int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
1503 unsigned int rt, unsigned int bytes, int is_default_endian)
09f98496 1504{
6df3877f 1505 enum emulation_result emulated = EMULATE_DONE;
09f98496 1506
acc9eb93
SG
1507 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1508 return EMULATE_FAIL;
1509
09f98496 1510 while (vcpu->arch.mmio_vmx_copy_nums) {
acc9eb93 1511 emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
09f98496
JRZ
1512 is_default_endian, 0);
1513
1514 if (emulated != EMULATE_DONE)
1515 break;
1516
1517 vcpu->arch.paddr_accessed += run->mmio.len;
1518 vcpu->arch.mmio_vmx_copy_nums--;
acc9eb93 1519 vcpu->arch.mmio_vmx_offset++;
09f98496
JRZ
1520 }
1521
1522 return emulated;
1523}
1524
acc9eb93 1525int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
09f98496 1526{
acc9eb93
SG
1527 union kvmppc_one_reg reg;
1528 int vmx_offset = 0;
1529 int result = 0;
09f98496 1530
acc9eb93
SG
1531 vmx_offset =
1532 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
09f98496 1533
acc9eb93 1534 if (vmx_offset == -1)
09f98496
JRZ
1535 return -1;
1536
acc9eb93
SG
1537 reg.vval = VCPU_VSX_VR(vcpu, index);
1538 *val = reg.vsxval[vmx_offset];
09f98496 1539
acc9eb93
SG
1540 return result;
1541}
09f98496 1542
acc9eb93
SG
1543int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1544{
1545 union kvmppc_one_reg reg;
1546 int vmx_offset = 0;
1547 int result = 0;
1548
1549 vmx_offset =
1550 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1551
1552 if (vmx_offset == -1)
1553 return -1;
1554
1555 reg.vval = VCPU_VSX_VR(vcpu, index);
1556 *val = reg.vsx32val[vmx_offset];
1557
1558 return result;
1559}
1560
1561int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1562{
1563 union kvmppc_one_reg reg;
1564 int vmx_offset = 0;
1565 int result = 0;
1566
1567 vmx_offset =
1568 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1569
1570 if (vmx_offset == -1)
1571 return -1;
1572
1573 reg.vval = VCPU_VSX_VR(vcpu, index);
1574 *val = reg.vsx16val[vmx_offset];
1575
1576 return result;
09f98496
JRZ
1577}
1578
acc9eb93
SG
1579int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1580{
1581 union kvmppc_one_reg reg;
1582 int vmx_offset = 0;
1583 int result = 0;
1584
1585 vmx_offset =
1586 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1587
1588 if (vmx_offset == -1)
1589 return -1;
1590
1591 reg.vval = VCPU_VSX_VR(vcpu, index);
1592 *val = reg.vsx8val[vmx_offset];
1593
1594 return result;
09f98496
JRZ
1595}
1596
acc9eb93
SG
1597int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
1598 unsigned int rs, unsigned int bytes, int is_default_endian)
09f98496
JRZ
1599{
1600 u64 val = 0;
acc9eb93 1601 unsigned int index = rs & KVM_MMIO_REG_MASK;
09f98496
JRZ
1602 enum emulation_result emulated = EMULATE_DONE;
1603
acc9eb93
SG
1604 if (vcpu->arch.mmio_vsx_copy_nums > 2)
1605 return EMULATE_FAIL;
1606
09f98496
JRZ
1607 vcpu->arch.io_gpr = rs;
1608
1609 while (vcpu->arch.mmio_vmx_copy_nums) {
acc9eb93
SG
1610 switch (vcpu->arch.mmio_copy_type) {
1611 case KVMPPC_VMX_COPY_DWORD:
1612 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1613 return EMULATE_FAIL;
1614
1615 break;
1616 case KVMPPC_VMX_COPY_WORD:
1617 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1618 return EMULATE_FAIL;
1619 break;
1620 case KVMPPC_VMX_COPY_HWORD:
1621 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1622 return EMULATE_FAIL;
1623 break;
1624 case KVMPPC_VMX_COPY_BYTE:
1625 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1626 return EMULATE_FAIL;
1627 break;
1628 default:
09f98496 1629 return EMULATE_FAIL;
acc9eb93 1630 }
09f98496 1631
acc9eb93 1632 emulated = kvmppc_handle_store(run, vcpu, val, bytes,
09f98496
JRZ
1633 is_default_endian);
1634 if (emulated != EMULATE_DONE)
1635 break;
1636
1637 vcpu->arch.paddr_accessed += run->mmio.len;
1638 vcpu->arch.mmio_vmx_copy_nums--;
acc9eb93 1639 vcpu->arch.mmio_vmx_offset++;
09f98496
JRZ
1640 }
1641
1642 return emulated;
1643}
1644
1645static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
1646 struct kvm_run *run)
1647{
1648 enum emulation_result emulated = EMULATE_FAIL;
1649 int r;
1650
1651 vcpu->arch.paddr_accessed += run->mmio.len;
1652
1653 if (!vcpu->mmio_is_write) {
acc9eb93
SG
1654 emulated = kvmppc_handle_vmx_load(run, vcpu,
1655 vcpu->arch.io_gpr, run->mmio.len, 1);
09f98496 1656 } else {
acc9eb93
SG
1657 emulated = kvmppc_handle_vmx_store(run, vcpu,
1658 vcpu->arch.io_gpr, run->mmio.len, 1);
09f98496
JRZ
1659 }
1660
1661 switch (emulated) {
1662 case EMULATE_DO_MMIO:
1663 run->exit_reason = KVM_EXIT_MMIO;
1664 r = RESUME_HOST;
1665 break;
1666 case EMULATE_FAIL:
1667 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1668 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1669 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1670 r = RESUME_HOST;
1671 break;
1672 default:
1673 r = RESUME_GUEST;
1674 break;
1675 }
1676 return r;
1677}
1678#endif /* CONFIG_ALTIVEC */
1679
8a41ea53
MC
1680int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1681{
1682 int r = 0;
1683 union kvmppc_one_reg val;
1684 int size;
1685
1686 size = one_reg_size(reg->id);
1687 if (size > sizeof(val))
1688 return -EINVAL;
1689
1690 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1691 if (r == -EINVAL) {
1692 r = 0;
1693 switch (reg->id) {
3840edc8
MC
1694#ifdef CONFIG_ALTIVEC
1695 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1696 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1697 r = -ENXIO;
1698 break;
1699 }
b4d7f161 1700 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
3840edc8
MC
1701 break;
1702 case KVM_REG_PPC_VSCR:
1703 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1704 r = -ENXIO;
1705 break;
1706 }
b4d7f161 1707 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
3840edc8
MC
1708 break;
1709 case KVM_REG_PPC_VRSAVE:
b4d7f161 1710 val = get_reg_val(reg->id, vcpu->arch.vrsave);
3840edc8
MC
1711 break;
1712#endif /* CONFIG_ALTIVEC */
8a41ea53
MC
1713 default:
1714 r = -EINVAL;
1715 break;
1716 }
1717 }
1718
1719 if (r)
1720 return r;
1721
1722 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1723 r = -EFAULT;
1724
1725 return r;
1726}
1727
1728int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1729{
1730 int r;
1731 union kvmppc_one_reg val;
1732 int size;
1733
1734 size = one_reg_size(reg->id);
1735 if (size > sizeof(val))
1736 return -EINVAL;
1737
1738 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1739 return -EFAULT;
1740
1741 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1742 if (r == -EINVAL) {
1743 r = 0;
1744 switch (reg->id) {
3840edc8
MC
1745#ifdef CONFIG_ALTIVEC
1746 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1747 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1748 r = -ENXIO;
1749 break;
1750 }
b4d7f161 1751 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
3840edc8
MC
1752 break;
1753 case KVM_REG_PPC_VSCR:
1754 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1755 r = -ENXIO;
1756 break;
1757 }
b4d7f161 1758 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
3840edc8
MC
1759 break;
1760 case KVM_REG_PPC_VRSAVE:
b4d7f161
GK
1761 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1762 r = -ENXIO;
1763 break;
1764 }
1765 vcpu->arch.vrsave = set_reg_val(reg->id, val);
3840edc8
MC
1766 break;
1767#endif /* CONFIG_ALTIVEC */
8a41ea53
MC
1768 default:
1769 r = -EINVAL;
1770 break;
1771 }
1772 }
1773
1774 return r;
1775}
1776
bbf45ba5
HB
1777int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1778{
1779 int r;
bbf45ba5 1780
accb757d
CD
1781 vcpu_load(vcpu);
1782
bbf45ba5 1783 if (vcpu->mmio_needed) {
6f63e81b 1784 vcpu->mmio_needed = 0;
bbf45ba5
HB
1785 if (!vcpu->mmio_is_write)
1786 kvmppc_complete_mmio_load(vcpu, run);
6f63e81b
BL
1787#ifdef CONFIG_VSX
1788 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1789 vcpu->arch.mmio_vsx_copy_nums--;
1790 vcpu->arch.mmio_vsx_offset++;
1791 }
1792
1793 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1794 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
1795 if (r == RESUME_HOST) {
1796 vcpu->mmio_needed = 1;
accb757d 1797 goto out;
6f63e81b
BL
1798 }
1799 }
09f98496
JRZ
1800#endif
1801#ifdef CONFIG_ALTIVEC
acc9eb93 1802 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
09f98496 1803 vcpu->arch.mmio_vmx_copy_nums--;
acc9eb93
SG
1804 vcpu->arch.mmio_vmx_offset++;
1805 }
09f98496
JRZ
1806
1807 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1808 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
1809 if (r == RESUME_HOST) {
1810 vcpu->mmio_needed = 1;
1ab03c07 1811 goto out;
09f98496
JRZ
1812 }
1813 }
6f63e81b 1814#endif
ad0a048b
AG
1815 } else if (vcpu->arch.osi_needed) {
1816 u64 *gprs = run->osi.gprs;
1817 int i;
1818
1819 for (i = 0; i < 32; i++)
1820 kvmppc_set_gpr(vcpu, i, gprs[i]);
1821 vcpu->arch.osi_needed = 0;
de56a948
PM
1822 } else if (vcpu->arch.hcall_needed) {
1823 int i;
1824
1825 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1826 for (i = 0; i < 9; ++i)
1827 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1828 vcpu->arch.hcall_needed = 0;
1c810636
AG
1829#ifdef CONFIG_BOOKE
1830 } else if (vcpu->arch.epr_needed) {
1831 kvmppc_set_epr(vcpu, run->epr.epr);
1832 vcpu->arch.epr_needed = 0;
1833#endif
bbf45ba5
HB
1834 }
1835
20b7035c 1836 kvm_sigset_activate(vcpu);
6f63e81b 1837
460df4c1
PB
1838 if (run->immediate_exit)
1839 r = -EINTR;
1840 else
1841 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5 1842
20b7035c 1843 kvm_sigset_deactivate(vcpu);
bbf45ba5 1844
c662f773 1845#ifdef CONFIG_ALTIVEC
accb757d 1846out:
c662f773 1847#endif
accb757d 1848 vcpu_put(vcpu);
bbf45ba5
HB
1849 return r;
1850}
1851
1852int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1853{
19ccb76a 1854 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 1855 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
1856 return 0;
1857 }
1858
1859 kvmppc_core_queue_external(vcpu, irq);
b6d33834 1860
dfd4d47e 1861 kvm_vcpu_kick(vcpu);
45c5eb67 1862
bbf45ba5
HB
1863 return 0;
1864}
1865
71fbfd5f
AG
1866static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1867 struct kvm_enable_cap *cap)
1868{
1869 int r;
1870
1871 if (cap->flags)
1872 return -EINVAL;
1873
1874 switch (cap->cap) {
ad0a048b
AG
1875 case KVM_CAP_PPC_OSI:
1876 r = 0;
1877 vcpu->arch.osi_enabled = true;
1878 break;
930b412a
AG
1879 case KVM_CAP_PPC_PAPR:
1880 r = 0;
1881 vcpu->arch.papr_enabled = true;
1882 break;
1c810636
AG
1883 case KVM_CAP_PPC_EPR:
1884 r = 0;
5df554ad
SW
1885 if (cap->args[0])
1886 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1887 else
1888 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 1889 break;
f61c94bb
BB
1890#ifdef CONFIG_BOOKE
1891 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1892 r = 0;
1893 vcpu->arch.watchdog_enabled = true;
1894 break;
1895#endif
bf7ca4bd 1896#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
1897 case KVM_CAP_SW_TLB: {
1898 struct kvm_config_tlb cfg;
1899 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1900
1901 r = -EFAULT;
1902 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1903 break;
1904
1905 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1906 break;
eb1e4f43
SW
1907 }
1908#endif
1909#ifdef CONFIG_KVM_MPIC
1910 case KVM_CAP_IRQ_MPIC: {
70abaded 1911 struct fd f;
eb1e4f43
SW
1912 struct kvm_device *dev;
1913
1914 r = -EBADF;
70abaded
AV
1915 f = fdget(cap->args[0]);
1916 if (!f.file)
eb1e4f43
SW
1917 break;
1918
1919 r = -EPERM;
70abaded 1920 dev = kvm_device_from_filp(f.file);
eb1e4f43
SW
1921 if (dev)
1922 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1923
70abaded 1924 fdput(f);
eb1e4f43 1925 break;
dc83b8bc
SW
1926 }
1927#endif
5975a2e0
PM
1928#ifdef CONFIG_KVM_XICS
1929 case KVM_CAP_IRQ_XICS: {
70abaded 1930 struct fd f;
5975a2e0
PM
1931 struct kvm_device *dev;
1932
1933 r = -EBADF;
70abaded
AV
1934 f = fdget(cap->args[0]);
1935 if (!f.file)
5975a2e0
PM
1936 break;
1937
1938 r = -EPERM;
70abaded 1939 dev = kvm_device_from_filp(f.file);
5af50993 1940 if (dev) {
03f95332 1941 if (xics_on_xive())
5af50993
BH
1942 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1943 else
1944 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1945 }
5975a2e0 1946
70abaded 1947 fdput(f);
5975a2e0
PM
1948 break;
1949 }
1950#endif /* CONFIG_KVM_XICS */
eacc56bb
CLG
1951#ifdef CONFIG_KVM_XIVE
1952 case KVM_CAP_PPC_IRQ_XIVE: {
1953 struct fd f;
1954 struct kvm_device *dev;
1955
1956 r = -EBADF;
1957 f = fdget(cap->args[0]);
1958 if (!f.file)
1959 break;
1960
1961 r = -ENXIO;
1962 if (!xive_enabled())
1963 break;
1964
1965 r = -EPERM;
1966 dev = kvm_device_from_filp(f.file);
1967 if (dev)
1968 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
1969 cap->args[1]);
1970
1971 fdput(f);
1972 break;
1973 }
1974#endif /* CONFIG_KVM_XIVE */
134764ed
AP
1975#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1976 case KVM_CAP_PPC_FWNMI:
1977 r = -EINVAL;
1978 if (!is_kvmppc_hv_enabled(vcpu->kvm))
1979 break;
1980 r = 0;
1981 vcpu->kvm->arch.fwnmi_enabled = true;
1982 break;
1983#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
71fbfd5f
AG
1984 default:
1985 r = -EINVAL;
1986 break;
1987 }
1988
af8f38b3
AG
1989 if (!r)
1990 r = kvmppc_sanity_check(vcpu);
1991
71fbfd5f
AG
1992 return r;
1993}
1994
34a75b0f
PM
1995bool kvm_arch_intc_initialized(struct kvm *kvm)
1996{
1997#ifdef CONFIG_KVM_MPIC
1998 if (kvm->arch.mpic)
1999 return true;
2000#endif
2001#ifdef CONFIG_KVM_XICS
5af50993 2002 if (kvm->arch.xics || kvm->arch.xive)
34a75b0f
PM
2003 return true;
2004#endif
2005 return false;
2006}
2007
bbf45ba5
HB
2008int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2009 struct kvm_mp_state *mp_state)
2010{
2011 return -EINVAL;
2012}
2013
2014int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2015 struct kvm_mp_state *mp_state)
2016{
2017 return -EINVAL;
2018}
2019
5cb0944c
PB
2020long kvm_arch_vcpu_async_ioctl(struct file *filp,
2021 unsigned int ioctl, unsigned long arg)
bbf45ba5
HB
2022{
2023 struct kvm_vcpu *vcpu = filp->private_data;
2024 void __user *argp = (void __user *)arg;
bbf45ba5 2025
9b062471 2026 if (ioctl == KVM_INTERRUPT) {
bbf45ba5 2027 struct kvm_interrupt irq;
bbf45ba5 2028 if (copy_from_user(&irq, argp, sizeof(irq)))
9b062471
CD
2029 return -EFAULT;
2030 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
bbf45ba5 2031 }
5cb0944c
PB
2032 return -ENOIOCTLCMD;
2033}
2034
2035long kvm_arch_vcpu_ioctl(struct file *filp,
2036 unsigned int ioctl, unsigned long arg)
2037{
2038 struct kvm_vcpu *vcpu = filp->private_data;
2039 void __user *argp = (void __user *)arg;
2040 long r;
19483d14 2041
9b062471 2042 switch (ioctl) {
71fbfd5f
AG
2043 case KVM_ENABLE_CAP:
2044 {
2045 struct kvm_enable_cap cap;
2046 r = -EFAULT;
b3cebfe8 2047 vcpu_load(vcpu);
71fbfd5f
AG
2048 if (copy_from_user(&cap, argp, sizeof(cap)))
2049 goto out;
2050 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
b3cebfe8 2051 vcpu_put(vcpu);
71fbfd5f
AG
2052 break;
2053 }
dc83b8bc 2054
e24ed81f
AG
2055 case KVM_SET_ONE_REG:
2056 case KVM_GET_ONE_REG:
2057 {
2058 struct kvm_one_reg reg;
2059 r = -EFAULT;
2060 if (copy_from_user(&reg, argp, sizeof(reg)))
2061 goto out;
2062 if (ioctl == KVM_SET_ONE_REG)
2063 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
2064 else
2065 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
2066 break;
2067 }
2068
bf7ca4bd 2069#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
2070 case KVM_DIRTY_TLB: {
2071 struct kvm_dirty_tlb dirty;
2072 r = -EFAULT;
b3cebfe8 2073 vcpu_load(vcpu);
dc83b8bc
SW
2074 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2075 goto out;
2076 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
b3cebfe8 2077 vcpu_put(vcpu);
dc83b8bc
SW
2078 break;
2079 }
2080#endif
bbf45ba5
HB
2081 default:
2082 r = -EINVAL;
2083 }
2084
2085out:
2086 return r;
2087}
2088
1499fa80 2089vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5b1c1493
CO
2090{
2091 return VM_FAULT_SIGBUS;
2092}
2093
15711e9c
AG
2094static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2095{
784bafac
SY
2096 u32 inst_nop = 0x60000000;
2097#ifdef CONFIG_KVM_BOOKE_HV
2098 u32 inst_sc1 = 0x44000022;
2743103f
AG
2099 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2100 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2101 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2102 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 2103#else
15711e9c
AG
2104 u32 inst_lis = 0x3c000000;
2105 u32 inst_ori = 0x60000000;
15711e9c
AG
2106 u32 inst_sc = 0x44000002;
2107 u32 inst_imm_mask = 0xffff;
2108
2109 /*
2110 * The hypercall to get into KVM from within guest context is as
2111 * follows:
2112 *
2113 * lis r0, r0, KVM_SC_MAGIC_R0@h
2114 * ori r0, KVM_SC_MAGIC_R0@l
2115 * sc
2116 * nop
2117 */
2743103f
AG
2118 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2119 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2120 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2121 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 2122#endif
15711e9c 2123
9202e076
LYB
2124 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2125
15711e9c
AG
2126 return 0;
2127}
2128
5efdb4be
AG
2129int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2130 bool line_status)
2131{
2132 if (!irqchip_in_kernel(kvm))
2133 return -ENXIO;
2134
2135 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2136 irq_event->irq, irq_event->level,
2137 line_status);
2138 return 0;
2139}
2140
699a0ea0 2141
e5d83c74
PB
2142int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2143 struct kvm_enable_cap *cap)
699a0ea0
PM
2144{
2145 int r;
2146
2147 if (cap->flags)
2148 return -EINVAL;
2149
2150 switch (cap->cap) {
2151#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2152 case KVM_CAP_PPC_ENABLE_HCALL: {
2153 unsigned long hcall = cap->args[0];
2154
2155 r = -EINVAL;
2156 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2157 cap->args[1] > 1)
2158 break;
ae2113a4
PM
2159 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2160 break;
699a0ea0
PM
2161 if (cap->args[1])
2162 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2163 else
2164 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2165 r = 0;
2166 break;
2167 }
3c313524
PM
2168 case KVM_CAP_PPC_SMT: {
2169 unsigned long mode = cap->args[0];
2170 unsigned long flags = cap->args[1];
2171
2172 r = -EINVAL;
2173 if (kvm->arch.kvm_ops->set_smt_mode)
2174 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2175 break;
2176 }
aa069a99
PM
2177
2178 case KVM_CAP_PPC_NESTED_HV:
2179 r = -EINVAL;
2180 if (!is_kvmppc_hv_enabled(kvm) ||
2181 !kvm->arch.kvm_ops->enable_nested)
2182 break;
2183 r = kvm->arch.kvm_ops->enable_nested(kvm);
2184 break;
699a0ea0
PM
2185#endif
2186 default:
2187 r = -EINVAL;
2188 break;
2189 }
2190
2191 return r;
2192}
2193
3214d01f
PM
2194#ifdef CONFIG_PPC_BOOK3S_64
2195/*
2196 * These functions check whether the underlying hardware is safe
2197 * against attacks based on observing the effects of speculatively
2198 * executed instructions, and whether it supplies instructions for
2199 * use in workarounds. The information comes from firmware, either
2200 * via the device tree on powernv platforms or from an hcall on
2201 * pseries platforms.
2202 */
2203#ifdef CONFIG_PPC_PSERIES
2204static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2205{
2206 struct h_cpu_char_result c;
2207 unsigned long rc;
2208
2209 if (!machine_is(pseries))
2210 return -ENOTTY;
2211
2212 rc = plpar_get_cpu_characteristics(&c);
2213 if (rc == H_SUCCESS) {
2214 cp->character = c.character;
2215 cp->behaviour = c.behaviour;
2216 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2217 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2218 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2219 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2220 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2221 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2222 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2b57ecd0
SJS
2223 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2224 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
3214d01f
PM
2225 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2226 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2b57ecd0
SJS
2227 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2228 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
3214d01f
PM
2229 }
2230 return 0;
2231}
2232#else
2233static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2234{
2235 return -ENOTTY;
2236}
2237#endif
2238
2239static inline bool have_fw_feat(struct device_node *fw_features,
2240 const char *state, const char *name)
2241{
2242 struct device_node *np;
2243 bool r = false;
2244
2245 np = of_get_child_by_name(fw_features, name);
2246 if (np) {
2247 r = of_property_read_bool(np, state);
2248 of_node_put(np);
2249 }
2250 return r;
2251}
2252
2253static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2254{
2255 struct device_node *np, *fw_features;
2256 int r;
2257
2258 memset(cp, 0, sizeof(*cp));
2259 r = pseries_get_cpu_char(cp);
2260 if (r != -ENOTTY)
2261 return r;
2262
2263 np = of_find_node_by_name(NULL, "ibm,opal");
2264 if (np) {
2265 fw_features = of_get_child_by_name(np, "fw-features");
2266 of_node_put(np);
2267 if (!fw_features)
2268 return 0;
2269 if (have_fw_feat(fw_features, "enabled",
2270 "inst-spec-barrier-ori31,31,0"))
2271 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2272 if (have_fw_feat(fw_features, "enabled",
2273 "fw-bcctrl-serialized"))
2274 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2275 if (have_fw_feat(fw_features, "enabled",
2276 "inst-l1d-flush-ori30,30,0"))
2277 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2278 if (have_fw_feat(fw_features, "enabled",
2279 "inst-l1d-flush-trig2"))
2280 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2281 if (have_fw_feat(fw_features, "enabled",
2282 "fw-l1d-thread-split"))
2283 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2284 if (have_fw_feat(fw_features, "enabled",
2285 "fw-count-cache-disabled"))
2286 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2b57ecd0
SJS
2287 if (have_fw_feat(fw_features, "enabled",
2288 "fw-count-cache-flush-bcctr2,0,0"))
2289 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
3214d01f
PM
2290 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2291 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2292 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2293 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2294 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2b57ecd0
SJS
2295 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2296 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
3214d01f
PM
2297
2298 if (have_fw_feat(fw_features, "enabled",
2299 "speculation-policy-favor-security"))
2300 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2301 if (!have_fw_feat(fw_features, "disabled",
2302 "needs-l1d-flush-msr-pr-0-to-1"))
2303 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2304 if (!have_fw_feat(fw_features, "disabled",
2305 "needs-spec-barrier-for-bound-checks"))
2306 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2b57ecd0
SJS
2307 if (have_fw_feat(fw_features, "enabled",
2308 "needs-count-cache-flush-on-context-switch"))
2309 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
3214d01f
PM
2310 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2311 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2b57ecd0
SJS
2312 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2313 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
3214d01f
PM
2314
2315 of_node_put(fw_features);
2316 }
2317
2318 return 0;
2319}
2320#endif
2321
bbf45ba5
HB
2322long kvm_arch_vm_ioctl(struct file *filp,
2323 unsigned int ioctl, unsigned long arg)
2324{
5df554ad 2325 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 2326 void __user *argp = (void __user *)arg;
bbf45ba5
HB
2327 long r;
2328
2329 switch (ioctl) {
15711e9c
AG
2330 case KVM_PPC_GET_PVINFO: {
2331 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 2332 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
2333 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2334 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2335 r = -EFAULT;
2336 goto out;
2337 }
2338
2339 break;
2340 }
76d837a4 2341#ifdef CONFIG_SPAPR_TCE_IOMMU
58ded420
AK
2342 case KVM_CREATE_SPAPR_TCE_64: {
2343 struct kvm_create_spapr_tce_64 create_tce_64;
2344
2345 r = -EFAULT;
2346 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2347 goto out;
2348 if (create_tce_64.flags) {
2349 r = -EINVAL;
2350 goto out;
2351 }
2352 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2353 goto out;
2354 }
54738c09
DG
2355 case KVM_CREATE_SPAPR_TCE: {
2356 struct kvm_create_spapr_tce create_tce;
58ded420 2357 struct kvm_create_spapr_tce_64 create_tce_64;
54738c09
DG
2358
2359 r = -EFAULT;
2360 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2361 goto out;
58ded420
AK
2362
2363 create_tce_64.liobn = create_tce.liobn;
2364 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2365 create_tce_64.offset = 0;
2366 create_tce_64.size = create_tce.window_size >>
2367 IOMMU_PAGE_SHIFT_4K;
2368 create_tce_64.flags = 0;
2369 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
54738c09
DG
2370 goto out;
2371 }
76d837a4
PM
2372#endif
2373#ifdef CONFIG_PPC_BOOK3S_64
5b74716e 2374 case KVM_PPC_GET_SMMU_INFO: {
5b74716e 2375 struct kvm_ppc_smmu_info info;
cbbc58d4 2376 struct kvm *kvm = filp->private_data;
5b74716e
BH
2377
2378 memset(&info, 0, sizeof(info));
cbbc58d4 2379 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
5b74716e
BH
2380 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2381 r = -EFAULT;
2382 break;
2383 }
8e591cb7
ME
2384 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2385 struct kvm *kvm = filp->private_data;
2386
2387 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2388 break;
2389 }
c9270132
PM
2390 case KVM_PPC_CONFIGURE_V3_MMU: {
2391 struct kvm *kvm = filp->private_data;
2392 struct kvm_ppc_mmuv3_cfg cfg;
2393
2394 r = -EINVAL;
2395 if (!kvm->arch.kvm_ops->configure_mmu)
2396 goto out;
2397 r = -EFAULT;
2398 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2399 goto out;
2400 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2401 break;
2402 }
2403 case KVM_PPC_GET_RMMU_INFO: {
2404 struct kvm *kvm = filp->private_data;
2405 struct kvm_ppc_rmmu_info info;
2406
2407 r = -EINVAL;
2408 if (!kvm->arch.kvm_ops->get_rmmu_info)
2409 goto out;
2410 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2411 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2412 r = -EFAULT;
2413 break;
2414 }
3214d01f
PM
2415 case KVM_PPC_GET_CPU_CHAR: {
2416 struct kvm_ppc_cpu_char cpuchar;
2417
2418 r = kvmppc_get_cpu_char(&cpuchar);
2419 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2420 r = -EFAULT;
2421 break;
2422 }
cbbc58d4
AK
2423 default: {
2424 struct kvm *kvm = filp->private_data;
2425 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2426 }
3a167bea 2427#else /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 2428 default:
367e1319 2429 r = -ENOTTY;
3a167bea 2430#endif
bbf45ba5 2431 }
15711e9c 2432out:
bbf45ba5
HB
2433 return r;
2434}
2435
043cc4d7
SW
2436static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
2437static unsigned long nr_lpids;
2438
2439long kvmppc_alloc_lpid(void)
2440{
2441 long lpid;
2442
2443 do {
2444 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
2445 if (lpid >= nr_lpids) {
2446 pr_err("%s: No LPIDs free\n", __func__);
2447 return -ENOMEM;
2448 }
2449 } while (test_and_set_bit(lpid, lpid_inuse));
2450
2451 return lpid;
2452}
2ba9f0d8 2453EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
043cc4d7
SW
2454
2455void kvmppc_claim_lpid(long lpid)
2456{
2457 set_bit(lpid, lpid_inuse);
2458}
2ba9f0d8 2459EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
043cc4d7
SW
2460
2461void kvmppc_free_lpid(long lpid)
2462{
2463 clear_bit(lpid, lpid_inuse);
2464}
2ba9f0d8 2465EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
043cc4d7
SW
2466
2467void kvmppc_init_lpid(unsigned long nr_lpids_param)
2468{
2469 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
2470 memset(lpid_inuse, 0, sizeof(lpid_inuse));
2471}
2ba9f0d8 2472EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
043cc4d7 2473
bbf45ba5
HB
2474int kvm_arch_init(void *opaque)
2475{
2476 return 0;
2477}
2478
478d6686 2479EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);