Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
bbf45ba5 | 24 | #include <linux/vmalloc.h> |
544c6761 | 25 | #include <linux/hrtimer.h> |
174cd4b1 | 26 | #include <linux/sched/signal.h> |
bbf45ba5 | 27 | #include <linux/fs.h> |
5a0e3ad6 | 28 | #include <linux/slab.h> |
eb1e4f43 | 29 | #include <linux/file.h> |
cbbc58d4 | 30 | #include <linux/module.h> |
9576730d SW |
31 | #include <linux/irqbypass.h> |
32 | #include <linux/kvm_irqfd.h> | |
bbf45ba5 | 33 | #include <asm/cputable.h> |
7c0f6ba6 | 34 | #include <linux/uaccess.h> |
bbf45ba5 | 35 | #include <asm/kvm_ppc.h> |
83aae4a8 | 36 | #include <asm/tlbflush.h> |
371fefd6 | 37 | #include <asm/cputhreads.h> |
bd2be683 | 38 | #include <asm/irqflags.h> |
58ded420 | 39 | #include <asm/iommu.h> |
6f63e81b | 40 | #include <asm/switch_to.h> |
5af50993 | 41 | #include <asm/xive.h> |
3214d01f PM |
42 | #ifdef CONFIG_PPC_PSERIES |
43 | #include <asm/hvcall.h> | |
44 | #include <asm/plpar_wrappers.h> | |
45 | #endif | |
5af50993 | 46 | |
73e75b41 | 47 | #include "timing.h" |
5efdb4be | 48 | #include "irq.h" |
fad7b9b5 | 49 | #include "../mm/mmu_decl.h" |
bbf45ba5 | 50 | |
46f43c6e MT |
51 | #define CREATE_TRACE_POINTS |
52 | #include "trace.h" | |
53 | ||
cbbc58d4 AK |
54 | struct kvmppc_ops *kvmppc_hv_ops; |
55 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | |
56 | struct kvmppc_ops *kvmppc_pr_ops; | |
57 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | |
58 | ||
3a167bea | 59 | |
bbf45ba5 HB |
60 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
61 | { | |
2fa6e1e1 | 62 | return !!(v->arch.pending_exceptions) || kvm_request_pending(v); |
bbf45ba5 HB |
63 | } |
64 | ||
199b5763 LM |
65 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
66 | { | |
67 | return false; | |
68 | } | |
69 | ||
b6d33834 CD |
70 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
71 | { | |
72 | return 1; | |
73 | } | |
74 | ||
03d25c5b AG |
75 | /* |
76 | * Common checks before entering the guest world. Call with interrupts | |
77 | * disabled. | |
78 | * | |
7ee78855 AG |
79 | * returns: |
80 | * | |
81 | * == 1 if we're ready to go into guest state | |
82 | * <= 0 if we need to go back to the host with return value | |
03d25c5b AG |
83 | */ |
84 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
85 | { | |
6c85f52b SW |
86 | int r; |
87 | ||
88 | WARN_ON(irqs_disabled()); | |
89 | hard_irq_disable(); | |
03d25c5b | 90 | |
03d25c5b AG |
91 | while (true) { |
92 | if (need_resched()) { | |
93 | local_irq_enable(); | |
94 | cond_resched(); | |
6c85f52b | 95 | hard_irq_disable(); |
03d25c5b AG |
96 | continue; |
97 | } | |
98 | ||
99 | if (signal_pending(current)) { | |
7ee78855 AG |
100 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
101 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
102 | r = -EINTR; | |
03d25c5b AG |
103 | break; |
104 | } | |
105 | ||
5bd1cf11 SW |
106 | vcpu->mode = IN_GUEST_MODE; |
107 | ||
108 | /* | |
109 | * Reading vcpu->requests must happen after setting vcpu->mode, | |
110 | * so we don't miss a request because the requester sees | |
111 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests | |
112 | * before next entering the guest (and thus doesn't IPI). | |
489153c7 LT |
113 | * This also orders the write to mode from any reads |
114 | * to the page tables done while the VCPU is running. | |
115 | * Please see the comment in kvm_flush_remote_tlbs. | |
5bd1cf11 | 116 | */ |
03d25c5b | 117 | smp_mb(); |
5bd1cf11 | 118 | |
2fa6e1e1 | 119 | if (kvm_request_pending(vcpu)) { |
03d25c5b AG |
120 | /* Make sure we process requests preemptable */ |
121 | local_irq_enable(); | |
122 | trace_kvm_check_requests(vcpu); | |
7c973a2e | 123 | r = kvmppc_core_check_requests(vcpu); |
6c85f52b | 124 | hard_irq_disable(); |
7c973a2e AG |
125 | if (r > 0) |
126 | continue; | |
127 | break; | |
03d25c5b AG |
128 | } |
129 | ||
130 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
131 | /* interrupts got enabled in between, so we | |
132 | are back at square 1 */ | |
133 | continue; | |
134 | } | |
135 | ||
6edaa530 | 136 | guest_enter_irqoff(); |
6c85f52b | 137 | return 1; |
03d25c5b AG |
138 | } |
139 | ||
6c85f52b SW |
140 | /* return to host */ |
141 | local_irq_enable(); | |
03d25c5b AG |
142 | return r; |
143 | } | |
2ba9f0d8 | 144 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
03d25c5b | 145 | |
5deb8e7a AG |
146 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
147 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) | |
148 | { | |
149 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | |
150 | int i; | |
151 | ||
152 | shared->sprg0 = swab64(shared->sprg0); | |
153 | shared->sprg1 = swab64(shared->sprg1); | |
154 | shared->sprg2 = swab64(shared->sprg2); | |
155 | shared->sprg3 = swab64(shared->sprg3); | |
156 | shared->srr0 = swab64(shared->srr0); | |
157 | shared->srr1 = swab64(shared->srr1); | |
158 | shared->dar = swab64(shared->dar); | |
159 | shared->msr = swab64(shared->msr); | |
160 | shared->dsisr = swab32(shared->dsisr); | |
161 | shared->int_pending = swab32(shared->int_pending); | |
162 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) | |
163 | shared->sr[i] = swab32(shared->sr[i]); | |
164 | } | |
165 | #endif | |
166 | ||
2a342ed5 AG |
167 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
168 | { | |
169 | int nr = kvmppc_get_gpr(vcpu, 11); | |
170 | int r; | |
171 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
172 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
173 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
174 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
175 | unsigned long r2 = 0; | |
176 | ||
5deb8e7a | 177 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
2a342ed5 AG |
178 | /* 32 bit mode */ |
179 | param1 &= 0xffffffff; | |
180 | param2 &= 0xffffffff; | |
181 | param3 &= 0xffffffff; | |
182 | param4 &= 0xffffffff; | |
183 | } | |
184 | ||
185 | switch (nr) { | |
fdcf8bd7 | 186 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
5fc87407 | 187 | { |
5deb8e7a AG |
188 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
189 | /* Book3S can be little endian, find it out here */ | |
190 | int shared_big_endian = true; | |
191 | if (vcpu->arch.intr_msr & MSR_LE) | |
192 | shared_big_endian = false; | |
193 | if (shared_big_endian != vcpu->arch.shared_big_endian) | |
194 | kvmppc_swab_shared(vcpu); | |
195 | vcpu->arch.shared_big_endian = shared_big_endian; | |
196 | #endif | |
197 | ||
f3383cf8 AG |
198 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
199 | /* | |
200 | * Older versions of the Linux magic page code had | |
201 | * a bug where they would map their trampoline code | |
202 | * NX. If that's the case, remove !PR NX capability. | |
203 | */ | |
204 | vcpu->arch.disable_kernel_nx = true; | |
205 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
206 | } | |
207 | ||
208 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | |
209 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | |
5fc87407 | 210 | |
89b68c96 AG |
211 | #ifdef CONFIG_PPC_64K_PAGES |
212 | /* | |
213 | * Make sure our 4k magic page is in the same window of a 64k | |
214 | * page within the guest and within the host's page. | |
215 | */ | |
216 | if ((vcpu->arch.magic_page_pa & 0xf000) != | |
217 | ((ulong)vcpu->arch.shared & 0xf000)) { | |
218 | void *old_shared = vcpu->arch.shared; | |
219 | ulong shared = (ulong)vcpu->arch.shared; | |
220 | void *new_shared; | |
221 | ||
222 | shared &= PAGE_MASK; | |
223 | shared |= vcpu->arch.magic_page_pa & 0xf000; | |
224 | new_shared = (void*)shared; | |
225 | memcpy(new_shared, old_shared, 0x1000); | |
226 | vcpu->arch.shared = new_shared; | |
227 | } | |
228 | #endif | |
229 | ||
b5904972 | 230 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
7508e16c | 231 | |
fdcf8bd7 | 232 | r = EV_SUCCESS; |
5fc87407 AG |
233 | break; |
234 | } | |
fdcf8bd7 SY |
235 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
236 | r = EV_SUCCESS; | |
bf7ca4bd | 237 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
5fc87407 AG |
238 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
239 | #endif | |
2a342ed5 AG |
240 | |
241 | /* Second return value is in r4 */ | |
2a342ed5 | 242 | break; |
9202e076 LYB |
243 | case EV_HCALL_TOKEN(EV_IDLE): |
244 | r = EV_SUCCESS; | |
245 | kvm_vcpu_block(vcpu); | |
72875d8a | 246 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
9202e076 | 247 | break; |
2a342ed5 | 248 | default: |
fdcf8bd7 | 249 | r = EV_UNIMPLEMENTED; |
2a342ed5 AG |
250 | break; |
251 | } | |
252 | ||
7508e16c AG |
253 | kvmppc_set_gpr(vcpu, 4, r2); |
254 | ||
2a342ed5 AG |
255 | return r; |
256 | } | |
2ba9f0d8 | 257 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
bbf45ba5 | 258 | |
af8f38b3 AG |
259 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
260 | { | |
261 | int r = false; | |
262 | ||
263 | /* We have to know what CPU to virtualize */ | |
264 | if (!vcpu->arch.pvr) | |
265 | goto out; | |
266 | ||
267 | /* PAPR only works with book3s_64 */ | |
268 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
269 | goto out; | |
270 | ||
af8f38b3 | 271 | /* HV KVM can only do PAPR mode for now */ |
a78b55d1 | 272 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
af8f38b3 | 273 | goto out; |
af8f38b3 | 274 | |
d30f6e48 SW |
275 | #ifdef CONFIG_KVM_BOOKE_HV |
276 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
277 | goto out; | |
278 | #endif | |
279 | ||
af8f38b3 AG |
280 | r = true; |
281 | ||
282 | out: | |
283 | vcpu->arch.sane = r; | |
284 | return r ? 0 : -EINVAL; | |
285 | } | |
2ba9f0d8 | 286 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
af8f38b3 | 287 | |
bbf45ba5 HB |
288 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
289 | { | |
290 | enum emulation_result er; | |
291 | int r; | |
292 | ||
d69614a2 | 293 | er = kvmppc_emulate_loadstore(vcpu); |
bbf45ba5 HB |
294 | switch (er) { |
295 | case EMULATE_DONE: | |
296 | /* Future optimization: only reload non-volatiles if they were | |
297 | * actually modified. */ | |
298 | r = RESUME_GUEST_NV; | |
299 | break; | |
51f04726 MC |
300 | case EMULATE_AGAIN: |
301 | r = RESUME_GUEST; | |
302 | break; | |
bbf45ba5 HB |
303 | case EMULATE_DO_MMIO: |
304 | run->exit_reason = KVM_EXIT_MMIO; | |
305 | /* We must reload nonvolatiles because "update" load/store | |
306 | * instructions modify register state. */ | |
307 | /* Future optimization: only reload non-volatiles if they were | |
308 | * actually modified. */ | |
309 | r = RESUME_HOST_NV; | |
310 | break; | |
311 | case EMULATE_FAIL: | |
51f04726 MC |
312 | { |
313 | u32 last_inst; | |
314 | ||
8d0eff63 | 315 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
bbf45ba5 | 316 | /* XXX Deliver Program interrupt to guest. */ |
51f04726 | 317 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
bbf45ba5 HB |
318 | r = RESUME_HOST; |
319 | break; | |
51f04726 | 320 | } |
bbf45ba5 | 321 | default: |
5a33169e AG |
322 | WARN_ON(1); |
323 | r = RESUME_GUEST; | |
bbf45ba5 HB |
324 | } |
325 | ||
326 | return r; | |
327 | } | |
2ba9f0d8 | 328 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
bbf45ba5 | 329 | |
35c4a733 AG |
330 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
331 | bool data) | |
332 | { | |
c12fb43c | 333 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 AG |
334 | struct kvmppc_pte pte; |
335 | int r; | |
336 | ||
337 | vcpu->stat.st++; | |
338 | ||
339 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
340 | XLATE_WRITE, &pte); | |
341 | if (r < 0) | |
342 | return r; | |
343 | ||
344 | *eaddr = pte.raddr; | |
345 | ||
346 | if (!pte.may_write) | |
347 | return -EPERM; | |
348 | ||
c12fb43c AG |
349 | /* Magic page override */ |
350 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
351 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
352 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
353 | void *magic = vcpu->arch.shared; | |
354 | magic += pte.eaddr & 0xfff; | |
355 | memcpy(magic, ptr, size); | |
356 | return EMULATE_DONE; | |
357 | } | |
358 | ||
35c4a733 AG |
359 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
360 | return EMULATE_DO_MMIO; | |
361 | ||
362 | return EMULATE_DONE; | |
363 | } | |
364 | EXPORT_SYMBOL_GPL(kvmppc_st); | |
365 | ||
366 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
367 | bool data) | |
368 | { | |
c12fb43c | 369 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 | 370 | struct kvmppc_pte pte; |
35c4a733 AG |
371 | int rc; |
372 | ||
373 | vcpu->stat.ld++; | |
374 | ||
375 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
376 | XLATE_READ, &pte); | |
377 | if (rc) | |
378 | return rc; | |
379 | ||
380 | *eaddr = pte.raddr; | |
381 | ||
382 | if (!pte.may_read) | |
383 | return -EPERM; | |
384 | ||
385 | if (!data && !pte.may_execute) | |
386 | return -ENOEXEC; | |
387 | ||
c12fb43c AG |
388 | /* Magic page override */ |
389 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
390 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
391 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
392 | void *magic = vcpu->arch.shared; | |
393 | magic += pte.eaddr & 0xfff; | |
394 | memcpy(ptr, magic, size); | |
395 | return EMULATE_DONE; | |
396 | } | |
397 | ||
c45c5514 AG |
398 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
399 | return EMULATE_DO_MMIO; | |
35c4a733 AG |
400 | |
401 | return EMULATE_DONE; | |
35c4a733 AG |
402 | } |
403 | EXPORT_SYMBOL_GPL(kvmppc_ld); | |
404 | ||
13a34e06 | 405 | int kvm_arch_hardware_enable(void) |
bbf45ba5 | 406 | { |
10474ae8 | 407 | return 0; |
bbf45ba5 HB |
408 | } |
409 | ||
bbf45ba5 HB |
410 | int kvm_arch_hardware_setup(void) |
411 | { | |
412 | return 0; | |
413 | } | |
414 | ||
bbf45ba5 HB |
415 | void kvm_arch_check_processor_compat(void *rtn) |
416 | { | |
9dd921cf | 417 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
418 | } |
419 | ||
e08b9637 | 420 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
bbf45ba5 | 421 | { |
cbbc58d4 AK |
422 | struct kvmppc_ops *kvm_ops = NULL; |
423 | /* | |
424 | * if we have both HV and PR enabled, default is HV | |
425 | */ | |
426 | if (type == 0) { | |
427 | if (kvmppc_hv_ops) | |
428 | kvm_ops = kvmppc_hv_ops; | |
429 | else | |
430 | kvm_ops = kvmppc_pr_ops; | |
431 | if (!kvm_ops) | |
432 | goto err_out; | |
433 | } else if (type == KVM_VM_PPC_HV) { | |
434 | if (!kvmppc_hv_ops) | |
435 | goto err_out; | |
436 | kvm_ops = kvmppc_hv_ops; | |
437 | } else if (type == KVM_VM_PPC_PR) { | |
438 | if (!kvmppc_pr_ops) | |
439 | goto err_out; | |
440 | kvm_ops = kvmppc_pr_ops; | |
441 | } else | |
442 | goto err_out; | |
443 | ||
444 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | |
445 | return -ENOENT; | |
446 | ||
447 | kvm->arch.kvm_ops = kvm_ops; | |
f9e0554d | 448 | return kvmppc_core_init_vm(kvm); |
cbbc58d4 AK |
449 | err_out: |
450 | return -EINVAL; | |
bbf45ba5 HB |
451 | } |
452 | ||
235539b4 LC |
453 | bool kvm_arch_has_vcpu_debugfs(void) |
454 | { | |
455 | return false; | |
456 | } | |
457 | ||
458 | int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) | |
459 | { | |
460 | return 0; | |
461 | } | |
462 | ||
d89f5eff | 463 | void kvm_arch_destroy_vm(struct kvm *kvm) |
bbf45ba5 HB |
464 | { |
465 | unsigned int i; | |
988a2cae | 466 | struct kvm_vcpu *vcpu; |
bbf45ba5 | 467 | |
e17769eb SW |
468 | #ifdef CONFIG_KVM_XICS |
469 | /* | |
470 | * We call kick_all_cpus_sync() to ensure that all | |
471 | * CPUs have executed any pending IPIs before we | |
472 | * continue and free VCPUs structures below. | |
473 | */ | |
474 | if (is_kvmppc_hv_enabled(kvm)) | |
475 | kick_all_cpus_sync(); | |
476 | #endif | |
477 | ||
988a2cae GN |
478 | kvm_for_each_vcpu(i, vcpu, kvm) |
479 | kvm_arch_vcpu_free(vcpu); | |
480 | ||
481 | mutex_lock(&kvm->lock); | |
482 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
483 | kvm->vcpus[i] = NULL; | |
484 | ||
485 | atomic_set(&kvm->online_vcpus, 0); | |
f9e0554d PM |
486 | |
487 | kvmppc_core_destroy_vm(kvm); | |
488 | ||
988a2cae | 489 | mutex_unlock(&kvm->lock); |
cbbc58d4 AK |
490 | |
491 | /* drop the module reference */ | |
492 | module_put(kvm->arch.kvm_ops->owner); | |
bbf45ba5 HB |
493 | } |
494 | ||
784aa3d7 | 495 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
bbf45ba5 HB |
496 | { |
497 | int r; | |
7a58777a | 498 | /* Assume we're using HV mode when the HV module is loaded */ |
cbbc58d4 | 499 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
bbf45ba5 | 500 | |
7a58777a AG |
501 | if (kvm) { |
502 | /* | |
503 | * Hooray - we know which VM type we're running on. Depend on | |
504 | * that rather than the guess above. | |
505 | */ | |
506 | hv_enabled = is_kvmppc_hv_enabled(kvm); | |
507 | } | |
508 | ||
bbf45ba5 | 509 | switch (ext) { |
5ce941ee SW |
510 | #ifdef CONFIG_BOOKE |
511 | case KVM_CAP_PPC_BOOKE_SREGS: | |
f61c94bb | 512 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
1c810636 | 513 | case KVM_CAP_PPC_EPR: |
5ce941ee | 514 | #else |
e15a1137 | 515 | case KVM_CAP_PPC_SEGSTATE: |
1022fc3d | 516 | case KVM_CAP_PPC_HIOR: |
930b412a | 517 | case KVM_CAP_PPC_PAPR: |
5ce941ee | 518 | #endif |
18978768 | 519 | case KVM_CAP_PPC_UNSET_IRQ: |
7b4203e8 | 520 | case KVM_CAP_PPC_IRQ_LEVEL: |
71fbfd5f | 521 | case KVM_CAP_ENABLE_CAP: |
699a0ea0 | 522 | case KVM_CAP_ENABLE_CAP_VM: |
e24ed81f | 523 | case KVM_CAP_ONE_REG: |
0e673fb6 | 524 | case KVM_CAP_IOEVENTFD: |
5df554ad | 525 | case KVM_CAP_DEVICE_CTRL: |
460df4c1 | 526 | case KVM_CAP_IMMEDIATE_EXIT: |
de56a948 PM |
527 | r = 1; |
528 | break; | |
de56a948 | 529 | case KVM_CAP_PPC_PAIRED_SINGLES: |
ad0a048b | 530 | case KVM_CAP_PPC_OSI: |
15711e9c | 531 | case KVM_CAP_PPC_GET_PVINFO: |
bf7ca4bd | 532 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc | 533 | case KVM_CAP_SW_TLB: |
eb1e4f43 | 534 | #endif |
699cc876 | 535 | /* We support this only for PR */ |
cbbc58d4 | 536 | r = !hv_enabled; |
e15a1137 | 537 | break; |
699cc876 AK |
538 | #ifdef CONFIG_KVM_MPIC |
539 | case KVM_CAP_IRQ_MPIC: | |
540 | r = 1; | |
541 | break; | |
542 | #endif | |
543 | ||
f31e65e1 | 544 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 | 545 | case KVM_CAP_SPAPR_TCE: |
58ded420 | 546 | case KVM_CAP_SPAPR_TCE_64: |
121f80ba AK |
547 | /* fallthrough */ |
548 | case KVM_CAP_SPAPR_TCE_VFIO: | |
8e591cb7 | 549 | case KVM_CAP_PPC_RTAS: |
f2e91042 | 550 | case KVM_CAP_PPC_FIXUP_HCALL: |
699a0ea0 | 551 | case KVM_CAP_PPC_ENABLE_HCALL: |
5975a2e0 PM |
552 | #ifdef CONFIG_KVM_XICS |
553 | case KVM_CAP_IRQ_XICS: | |
554 | #endif | |
3214d01f | 555 | case KVM_CAP_PPC_GET_CPU_CHAR: |
54738c09 DG |
556 | r = 1; |
557 | break; | |
a8acaece DG |
558 | |
559 | case KVM_CAP_PPC_ALLOC_HTAB: | |
560 | r = hv_enabled; | |
561 | break; | |
f31e65e1 | 562 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
699cc876 | 563 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
371fefd6 | 564 | case KVM_CAP_PPC_SMT: |
45c940ba | 565 | r = 0; |
57900694 PM |
566 | if (kvm) { |
567 | if (kvm->arch.emul_smt_mode > 1) | |
568 | r = kvm->arch.emul_smt_mode; | |
569 | else | |
570 | r = kvm->arch.smt_mode; | |
571 | } else if (hv_enabled) { | |
45c940ba PM |
572 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
573 | r = 1; | |
574 | else | |
575 | r = threads_per_subcore; | |
576 | } | |
371fefd6 | 577 | break; |
2ed4f9dd PM |
578 | case KVM_CAP_PPC_SMT_POSSIBLE: |
579 | r = 1; | |
580 | if (hv_enabled) { | |
581 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
582 | r = ((threads_per_subcore << 1) - 1); | |
583 | else | |
584 | /* P9 can emulate dbells, so allow any mode */ | |
585 | r = 8 | 4 | 2 | 1; | |
586 | } | |
587 | break; | |
aa04b4cc | 588 | case KVM_CAP_PPC_RMA: |
c17b98cf | 589 | r = 0; |
aa04b4cc | 590 | break; |
e928e9cb ME |
591 | case KVM_CAP_PPC_HWRNG: |
592 | r = kvmppc_hwrng_present(); | |
593 | break; | |
c9270132 | 594 | case KVM_CAP_PPC_MMU_RADIX: |
8cf4ecc0 | 595 | r = !!(hv_enabled && radix_enabled()); |
c9270132 PM |
596 | break; |
597 | case KVM_CAP_PPC_MMU_HASH_V3: | |
18c3640c | 598 | r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300)); |
c9270132 | 599 | break; |
f4800b1f | 600 | #endif |
342d3db7 | 601 | case KVM_CAP_SYNC_MMU: |
699cc876 | 602 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
c17b98cf | 603 | r = hv_enabled; |
f4800b1f AG |
604 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
605 | r = 1; | |
606 | #else | |
607 | r = 0; | |
a2932923 | 608 | #endif |
699cc876 AK |
609 | break; |
610 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
a2932923 | 611 | case KVM_CAP_PPC_HTAB_FD: |
cbbc58d4 | 612 | r = hv_enabled; |
a2932923 | 613 | break; |
de56a948 | 614 | #endif |
b5434032 ME |
615 | case KVM_CAP_NR_VCPUS: |
616 | /* | |
617 | * Recommending a number of CPUs is somewhat arbitrary; we | |
618 | * return the number of present CPUs for -HV (since a host | |
619 | * will have secondary threads "offline"), and for other KVM | |
620 | * implementations just count online CPUs. | |
621 | */ | |
cbbc58d4 | 622 | if (hv_enabled) |
699cc876 AK |
623 | r = num_present_cpus(); |
624 | else | |
625 | r = num_online_cpus(); | |
b5434032 | 626 | break; |
bfec5c2c ND |
627 | case KVM_CAP_NR_MEMSLOTS: |
628 | r = KVM_USER_MEM_SLOTS; | |
629 | break; | |
b5434032 ME |
630 | case KVM_CAP_MAX_VCPUS: |
631 | r = KVM_MAX_VCPUS; | |
632 | break; | |
5b74716e BH |
633 | #ifdef CONFIG_PPC_BOOK3S_64 |
634 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
635 | r = 1; | |
636 | break; | |
d3695aa4 AK |
637 | case KVM_CAP_SPAPR_MULTITCE: |
638 | r = 1; | |
639 | break; | |
050f2339 | 640 | case KVM_CAP_SPAPR_RESIZE_HPT: |
790a9df5 | 641 | r = !!hv_enabled; |
050f2339 | 642 | break; |
134764ed AP |
643 | #endif |
644 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
645 | case KVM_CAP_PPC_FWNMI: | |
646 | r = hv_enabled; | |
647 | break; | |
5b74716e | 648 | #endif |
4bb3c7a0 | 649 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
23528bb2 | 650 | case KVM_CAP_PPC_HTM: |
d234d68e SG |
651 | r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || |
652 | (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); | |
23528bb2 | 653 | break; |
4bb3c7a0 | 654 | #endif |
bbf45ba5 HB |
655 | default: |
656 | r = 0; | |
657 | break; | |
658 | } | |
659 | return r; | |
660 | ||
661 | } | |
662 | ||
663 | long kvm_arch_dev_ioctl(struct file *filp, | |
664 | unsigned int ioctl, unsigned long arg) | |
665 | { | |
666 | return -EINVAL; | |
667 | } | |
668 | ||
5587027c | 669 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
db3fe4eb TY |
670 | struct kvm_memory_slot *dont) |
671 | { | |
5587027c | 672 | kvmppc_core_free_memslot(kvm, free, dont); |
db3fe4eb TY |
673 | } |
674 | ||
5587027c AK |
675 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
676 | unsigned long npages) | |
db3fe4eb | 677 | { |
5587027c | 678 | return kvmppc_core_create_memslot(kvm, slot, npages); |
db3fe4eb TY |
679 | } |
680 | ||
f7784b8e | 681 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
462fce46 | 682 | struct kvm_memory_slot *memslot, |
09170a49 | 683 | const struct kvm_userspace_memory_region *mem, |
7b6195a9 | 684 | enum kvm_mr_change change) |
bbf45ba5 | 685 | { |
a66b48c3 | 686 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem); |
bbf45ba5 HB |
687 | } |
688 | ||
f7784b8e | 689 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a49 | 690 | const struct kvm_userspace_memory_region *mem, |
8482644a | 691 | const struct kvm_memory_slot *old, |
f36f3f28 | 692 | const struct kvm_memory_slot *new, |
8482644a | 693 | enum kvm_mr_change change) |
f7784b8e | 694 | { |
f36f3f28 | 695 | kvmppc_core_commit_memory_region(kvm, mem, old, new); |
f7784b8e MT |
696 | } |
697 | ||
2df72e9b MT |
698 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
699 | struct kvm_memory_slot *slot) | |
34d4cb8f | 700 | { |
dfe49dbd | 701 | kvmppc_core_flush_memslot(kvm, slot); |
34d4cb8f MT |
702 | } |
703 | ||
bbf45ba5 HB |
704 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
705 | { | |
73e75b41 HB |
706 | struct kvm_vcpu *vcpu; |
707 | vcpu = kvmppc_core_vcpu_create(kvm, id); | |
03cdab53 ME |
708 | if (!IS_ERR(vcpu)) { |
709 | vcpu->arch.wqp = &vcpu->wq; | |
06056bfb | 710 | kvmppc_create_vcpu_debugfs(vcpu, id); |
03cdab53 | 711 | } |
73e75b41 | 712 | return vcpu; |
bbf45ba5 HB |
713 | } |
714 | ||
31928aa5 | 715 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
42897d86 | 716 | { |
42897d86 MT |
717 | } |
718 | ||
bbf45ba5 HB |
719 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
720 | { | |
a595405d AG |
721 | /* Make sure we're not using the vcpu anymore */ |
722 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
a595405d | 723 | |
73e75b41 | 724 | kvmppc_remove_vcpu_debugfs(vcpu); |
eb1e4f43 SW |
725 | |
726 | switch (vcpu->arch.irq_type) { | |
727 | case KVMPPC_IRQ_MPIC: | |
728 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); | |
729 | break; | |
bc5ad3f3 | 730 | case KVMPPC_IRQ_XICS: |
5af50993 BH |
731 | if (xive_enabled()) |
732 | kvmppc_xive_cleanup_vcpu(vcpu); | |
733 | else | |
734 | kvmppc_xics_free_icp(vcpu); | |
bc5ad3f3 | 735 | break; |
eb1e4f43 SW |
736 | } |
737 | ||
db93f574 | 738 | kvmppc_core_vcpu_free(vcpu); |
bbf45ba5 HB |
739 | } |
740 | ||
741 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
742 | { | |
743 | kvm_arch_vcpu_free(vcpu); | |
744 | } | |
745 | ||
746 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
747 | { | |
9dd921cf | 748 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
749 | } |
750 | ||
5358a963 | 751 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
544c6761 AG |
752 | { |
753 | struct kvm_vcpu *vcpu; | |
754 | ||
755 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
d02d4d15 | 756 | kvmppc_decrementer_func(vcpu); |
544c6761 AG |
757 | |
758 | return HRTIMER_NORESTART; | |
759 | } | |
760 | ||
bbf45ba5 HB |
761 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
762 | { | |
f61c94bb BB |
763 | int ret; |
764 | ||
544c6761 | 765 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
544c6761 | 766 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
5855564c | 767 | vcpu->arch.dec_expires = get_tb(); |
bbf45ba5 | 768 | |
09000adb BB |
769 | #ifdef CONFIG_KVM_EXIT_TIMING |
770 | mutex_init(&vcpu->arch.exit_timing_lock); | |
771 | #endif | |
f61c94bb BB |
772 | ret = kvmppc_subarch_vcpu_init(vcpu); |
773 | return ret; | |
bbf45ba5 HB |
774 | } |
775 | ||
776 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
777 | { | |
ecc0981f | 778 | kvmppc_mmu_destroy(vcpu); |
f61c94bb | 779 | kvmppc_subarch_vcpu_uninit(vcpu); |
bbf45ba5 HB |
780 | } |
781 | ||
782 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
783 | { | |
eab17672 SW |
784 | #ifdef CONFIG_BOOKE |
785 | /* | |
786 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
787 | * be used by the guest. | |
788 | * | |
789 | * On non-booke this is associated with Altivec and | |
790 | * is handled by code in book3s.c. | |
791 | */ | |
792 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
793 | #endif | |
9dd921cf | 794 | kvmppc_core_vcpu_load(vcpu, cpu); |
bbf45ba5 HB |
795 | } |
796 | ||
797 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
798 | { | |
9dd921cf | 799 | kvmppc_core_vcpu_put(vcpu); |
eab17672 SW |
800 | #ifdef CONFIG_BOOKE |
801 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
802 | #endif | |
bbf45ba5 HB |
803 | } |
804 | ||
9576730d SW |
805 | /* |
806 | * irq_bypass_add_producer and irq_bypass_del_producer are only | |
807 | * useful if the architecture supports PCI passthrough. | |
808 | * irq_bypass_stop and irq_bypass_start are not needed and so | |
809 | * kvm_ops are not defined for them. | |
810 | */ | |
811 | bool kvm_arch_has_irq_bypass(void) | |
812 | { | |
813 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || | |
814 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); | |
815 | } | |
816 | ||
817 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, | |
818 | struct irq_bypass_producer *prod) | |
819 | { | |
820 | struct kvm_kernel_irqfd *irqfd = | |
821 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
822 | struct kvm *kvm = irqfd->kvm; | |
823 | ||
824 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) | |
825 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); | |
826 | ||
827 | return 0; | |
828 | } | |
829 | ||
830 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | |
831 | struct irq_bypass_producer *prod) | |
832 | { | |
833 | struct kvm_kernel_irqfd *irqfd = | |
834 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
835 | struct kvm *kvm = irqfd->kvm; | |
836 | ||
837 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) | |
838 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); | |
839 | } | |
840 | ||
6f63e81b BL |
841 | #ifdef CONFIG_VSX |
842 | static inline int kvmppc_get_vsr_dword_offset(int index) | |
843 | { | |
844 | int offset; | |
845 | ||
846 | if ((index != 0) && (index != 1)) | |
847 | return -1; | |
848 | ||
849 | #ifdef __BIG_ENDIAN | |
850 | offset = index; | |
851 | #else | |
852 | offset = 1 - index; | |
853 | #endif | |
854 | ||
855 | return offset; | |
856 | } | |
857 | ||
858 | static inline int kvmppc_get_vsr_word_offset(int index) | |
859 | { | |
860 | int offset; | |
861 | ||
862 | if ((index > 3) || (index < 0)) | |
863 | return -1; | |
864 | ||
865 | #ifdef __BIG_ENDIAN | |
866 | offset = index; | |
867 | #else | |
868 | offset = 3 - index; | |
869 | #endif | |
870 | return offset; | |
871 | } | |
872 | ||
873 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, | |
874 | u64 gpr) | |
875 | { | |
876 | union kvmppc_one_reg val; | |
877 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); | |
878 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
879 | ||
880 | if (offset == -1) | |
881 | return; | |
882 | ||
883 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
884 | val.vval = VCPU_VSX_VR(vcpu, index); | |
885 | val.vsxval[offset] = gpr; | |
886 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
887 | } else { | |
888 | VCPU_VSX_FPR(vcpu, index, offset) = gpr; | |
889 | } | |
890 | } | |
891 | ||
892 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, | |
893 | u64 gpr) | |
894 | { | |
895 | union kvmppc_one_reg val; | |
896 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
897 | ||
898 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
899 | val.vval = VCPU_VSX_VR(vcpu, index); | |
900 | val.vsxval[0] = gpr; | |
901 | val.vsxval[1] = gpr; | |
902 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
903 | } else { | |
904 | VCPU_VSX_FPR(vcpu, index, 0) = gpr; | |
905 | VCPU_VSX_FPR(vcpu, index, 1) = gpr; | |
906 | } | |
907 | } | |
908 | ||
94dd7fa1 SG |
909 | static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu, |
910 | u32 gpr) | |
911 | { | |
912 | union kvmppc_one_reg val; | |
913 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
914 | ||
915 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
916 | val.vsx32val[0] = gpr; | |
917 | val.vsx32val[1] = gpr; | |
918 | val.vsx32val[2] = gpr; | |
919 | val.vsx32val[3] = gpr; | |
920 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
921 | } else { | |
922 | val.vsx32val[0] = gpr; | |
923 | val.vsx32val[1] = gpr; | |
924 | VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0]; | |
925 | VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0]; | |
926 | } | |
927 | } | |
928 | ||
6f63e81b BL |
929 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, |
930 | u32 gpr32) | |
931 | { | |
932 | union kvmppc_one_reg val; | |
933 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); | |
934 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
935 | int dword_offset, word_offset; | |
936 | ||
937 | if (offset == -1) | |
938 | return; | |
939 | ||
940 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
941 | val.vval = VCPU_VSX_VR(vcpu, index); | |
942 | val.vsx32val[offset] = gpr32; | |
943 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
944 | } else { | |
945 | dword_offset = offset / 2; | |
946 | word_offset = offset % 2; | |
947 | val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); | |
948 | val.vsx32val[word_offset] = gpr32; | |
949 | VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; | |
950 | } | |
951 | } | |
952 | #endif /* CONFIG_VSX */ | |
953 | ||
09f98496 | 954 | #ifdef CONFIG_ALTIVEC |
acc9eb93 SG |
955 | static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu, |
956 | int index, int element_size) | |
957 | { | |
958 | int offset; | |
959 | int elts = sizeof(vector128)/element_size; | |
960 | ||
961 | if ((index < 0) || (index >= elts)) | |
962 | return -1; | |
963 | ||
964 | if (kvmppc_need_byteswap(vcpu)) | |
965 | offset = elts - index - 1; | |
966 | else | |
967 | offset = index; | |
968 | ||
969 | return offset; | |
970 | } | |
971 | ||
972 | static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu, | |
973 | int index) | |
974 | { | |
975 | return kvmppc_get_vmx_offset_generic(vcpu, index, 8); | |
976 | } | |
977 | ||
978 | static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu, | |
979 | int index) | |
980 | { | |
981 | return kvmppc_get_vmx_offset_generic(vcpu, index, 4); | |
982 | } | |
983 | ||
984 | static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu, | |
985 | int index) | |
986 | { | |
987 | return kvmppc_get_vmx_offset_generic(vcpu, index, 2); | |
988 | } | |
989 | ||
990 | static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu, | |
991 | int index) | |
992 | { | |
993 | return kvmppc_get_vmx_offset_generic(vcpu, index, 1); | |
994 | } | |
995 | ||
996 | ||
09f98496 | 997 | static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu, |
acc9eb93 | 998 | u64 gpr) |
09f98496 | 999 | { |
acc9eb93 SG |
1000 | union kvmppc_one_reg val; |
1001 | int offset = kvmppc_get_vmx_dword_offset(vcpu, | |
1002 | vcpu->arch.mmio_vmx_offset); | |
09f98496 | 1003 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; |
09f98496 | 1004 | |
acc9eb93 SG |
1005 | if (offset == -1) |
1006 | return; | |
1007 | ||
1008 | val.vval = VCPU_VSX_VR(vcpu, index); | |
1009 | val.vsxval[offset] = gpr; | |
1010 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
1011 | } | |
1012 | ||
1013 | static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu, | |
1014 | u32 gpr32) | |
1015 | { | |
1016 | union kvmppc_one_reg val; | |
1017 | int offset = kvmppc_get_vmx_word_offset(vcpu, | |
1018 | vcpu->arch.mmio_vmx_offset); | |
1019 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
09f98496 | 1020 | |
acc9eb93 | 1021 | if (offset == -1) |
09f98496 JRZ |
1022 | return; |
1023 | ||
acc9eb93 SG |
1024 | val.vval = VCPU_VSX_VR(vcpu, index); |
1025 | val.vsx32val[offset] = gpr32; | |
1026 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
1027 | } | |
09f98496 | 1028 | |
acc9eb93 SG |
1029 | static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu, |
1030 | u16 gpr16) | |
1031 | { | |
1032 | union kvmppc_one_reg val; | |
1033 | int offset = kvmppc_get_vmx_hword_offset(vcpu, | |
1034 | vcpu->arch.mmio_vmx_offset); | |
1035 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
1036 | ||
1037 | if (offset == -1) | |
09f98496 JRZ |
1038 | return; |
1039 | ||
acc9eb93 SG |
1040 | val.vval = VCPU_VSX_VR(vcpu, index); |
1041 | val.vsx16val[offset] = gpr16; | |
1042 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
1043 | } | |
1044 | ||
1045 | static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu, | |
1046 | u8 gpr8) | |
1047 | { | |
1048 | union kvmppc_one_reg val; | |
1049 | int offset = kvmppc_get_vmx_byte_offset(vcpu, | |
1050 | vcpu->arch.mmio_vmx_offset); | |
1051 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
09f98496 | 1052 | |
acc9eb93 SG |
1053 | if (offset == -1) |
1054 | return; | |
09f98496 | 1055 | |
acc9eb93 SG |
1056 | val.vval = VCPU_VSX_VR(vcpu, index); |
1057 | val.vsx8val[offset] = gpr8; | |
1058 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
09f98496 JRZ |
1059 | } |
1060 | #endif /* CONFIG_ALTIVEC */ | |
1061 | ||
6f63e81b BL |
1062 | #ifdef CONFIG_PPC_FPU |
1063 | static inline u64 sp_to_dp(u32 fprs) | |
1064 | { | |
1065 | u64 fprd; | |
1066 | ||
1067 | preempt_disable(); | |
1068 | enable_kernel_fp(); | |
1069 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) | |
1070 | : "fr0"); | |
1071 | preempt_enable(); | |
1072 | return fprd; | |
1073 | } | |
1074 | ||
1075 | static inline u32 dp_to_sp(u64 fprd) | |
1076 | { | |
1077 | u32 fprs; | |
1078 | ||
1079 | preempt_disable(); | |
1080 | enable_kernel_fp(); | |
1081 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) | |
1082 | : "fr0"); | |
1083 | preempt_enable(); | |
1084 | return fprs; | |
1085 | } | |
1086 | ||
1087 | #else | |
1088 | #define sp_to_dp(x) (x) | |
1089 | #define dp_to_sp(x) (x) | |
1090 | #endif /* CONFIG_PPC_FPU */ | |
1091 | ||
bbf45ba5 HB |
1092 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
1093 | struct kvm_run *run) | |
1094 | { | |
69b61833 | 1095 | u64 uninitialized_var(gpr); |
bbf45ba5 | 1096 | |
8e5b26b5 | 1097 | if (run->mmio.len > sizeof(gpr)) { |
bbf45ba5 HB |
1098 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
1099 | return; | |
1100 | } | |
1101 | ||
d078eed3 | 1102 | if (!vcpu->arch.mmio_host_swabbed) { |
bbf45ba5 | 1103 | switch (run->mmio.len) { |
b104d066 | 1104 | case 8: gpr = *(u64 *)run->mmio.data; break; |
8e5b26b5 AG |
1105 | case 4: gpr = *(u32 *)run->mmio.data; break; |
1106 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
1107 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
1108 | } |
1109 | } else { | |
bbf45ba5 | 1110 | switch (run->mmio.len) { |
d078eed3 DG |
1111 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
1112 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; | |
1113 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; | |
8e5b26b5 | 1114 | case 1: gpr = *(u8 *)run->mmio.data; break; |
bbf45ba5 HB |
1115 | } |
1116 | } | |
8e5b26b5 | 1117 | |
6f63e81b BL |
1118 | /* conversion between single and double precision */ |
1119 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) | |
1120 | gpr = sp_to_dp(gpr); | |
1121 | ||
3587d534 AG |
1122 | if (vcpu->arch.mmio_sign_extend) { |
1123 | switch (run->mmio.len) { | |
1124 | #ifdef CONFIG_PPC64 | |
1125 | case 4: | |
1126 | gpr = (s64)(s32)gpr; | |
1127 | break; | |
1128 | #endif | |
1129 | case 2: | |
1130 | gpr = (s64)(s16)gpr; | |
1131 | break; | |
1132 | case 1: | |
1133 | gpr = (s64)(s8)gpr; | |
1134 | break; | |
1135 | } | |
1136 | } | |
1137 | ||
b3c5d3c2 AG |
1138 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
1139 | case KVM_MMIO_REG_GPR: | |
b104d066 AG |
1140 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
1141 | break; | |
b3c5d3c2 | 1142 | case KVM_MMIO_REG_FPR: |
2e6baa46 SG |
1143 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1144 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); | |
1145 | ||
efff1912 | 1146 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b104d066 | 1147 | break; |
287d5611 | 1148 | #ifdef CONFIG_PPC_BOOK3S |
b3c5d3c2 AG |
1149 | case KVM_MMIO_REG_QPR: |
1150 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 1151 | break; |
b3c5d3c2 | 1152 | case KVM_MMIO_REG_FQPR: |
efff1912 | 1153 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b3c5d3c2 | 1154 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
b104d066 | 1155 | break; |
6f63e81b BL |
1156 | #endif |
1157 | #ifdef CONFIG_VSX | |
1158 | case KVM_MMIO_REG_VSX: | |
2e6baa46 SG |
1159 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1160 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); | |
1161 | ||
da2a32b8 | 1162 | if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) |
6f63e81b | 1163 | kvmppc_set_vsr_dword(vcpu, gpr); |
da2a32b8 | 1164 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) |
6f63e81b | 1165 | kvmppc_set_vsr_word(vcpu, gpr); |
da2a32b8 | 1166 | else if (vcpu->arch.mmio_copy_type == |
6f63e81b BL |
1167 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) |
1168 | kvmppc_set_vsr_dword_dump(vcpu, gpr); | |
da2a32b8 | 1169 | else if (vcpu->arch.mmio_copy_type == |
94dd7fa1 SG |
1170 | KVMPPC_VSX_COPY_WORD_LOAD_DUMP) |
1171 | kvmppc_set_vsr_word_dump(vcpu, gpr); | |
6f63e81b | 1172 | break; |
09f98496 JRZ |
1173 | #endif |
1174 | #ifdef CONFIG_ALTIVEC | |
1175 | case KVM_MMIO_REG_VMX: | |
2e6baa46 SG |
1176 | if (vcpu->kvm->arch.kvm_ops->giveup_ext) |
1177 | vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); | |
1178 | ||
acc9eb93 SG |
1179 | if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) |
1180 | kvmppc_set_vmx_dword(vcpu, gpr); | |
1181 | else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) | |
1182 | kvmppc_set_vmx_word(vcpu, gpr); | |
1183 | else if (vcpu->arch.mmio_copy_type == | |
1184 | KVMPPC_VMX_COPY_HWORD) | |
1185 | kvmppc_set_vmx_hword(vcpu, gpr); | |
1186 | else if (vcpu->arch.mmio_copy_type == | |
1187 | KVMPPC_VMX_COPY_BYTE) | |
1188 | kvmppc_set_vmx_byte(vcpu, gpr); | |
09f98496 | 1189 | break; |
287d5611 | 1190 | #endif |
b104d066 AG |
1191 | default: |
1192 | BUG(); | |
1193 | } | |
bbf45ba5 HB |
1194 | } |
1195 | ||
eb8b0560 PM |
1196 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1197 | unsigned int rt, unsigned int bytes, | |
1198 | int is_default_endian, int sign_extend) | |
bbf45ba5 | 1199 | { |
ed840ee9 | 1200 | int idx, ret; |
d078eed3 | 1201 | bool host_swabbed; |
73601775 | 1202 | |
d078eed3 | 1203 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 1204 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 1205 | host_swabbed = is_default_endian; |
73601775 | 1206 | } else { |
d078eed3 | 1207 | host_swabbed = !is_default_endian; |
73601775 | 1208 | } |
ed840ee9 | 1209 | |
bbf45ba5 HB |
1210 | if (bytes > sizeof(run->mmio.data)) { |
1211 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
1212 | run->mmio.len); | |
1213 | } | |
1214 | ||
1215 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
1216 | run->mmio.len = bytes; | |
1217 | run->mmio.is_write = 0; | |
1218 | ||
1219 | vcpu->arch.io_gpr = rt; | |
d078eed3 | 1220 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
bbf45ba5 HB |
1221 | vcpu->mmio_needed = 1; |
1222 | vcpu->mmio_is_write = 0; | |
eb8b0560 | 1223 | vcpu->arch.mmio_sign_extend = sign_extend; |
bbf45ba5 | 1224 | |
ed840ee9 SW |
1225 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1226 | ||
e32edf4f | 1227 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
1228 | bytes, &run->mmio.data); |
1229 | ||
1230 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1231 | ||
1232 | if (!ret) { | |
0e673fb6 AG |
1233 | kvmppc_complete_mmio_load(vcpu, run); |
1234 | vcpu->mmio_needed = 0; | |
1235 | return EMULATE_DONE; | |
1236 | } | |
1237 | ||
bbf45ba5 HB |
1238 | return EMULATE_DO_MMIO; |
1239 | } | |
eb8b0560 PM |
1240 | |
1241 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1242 | unsigned int rt, unsigned int bytes, | |
1243 | int is_default_endian) | |
1244 | { | |
1245 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); | |
1246 | } | |
2ba9f0d8 | 1247 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
bbf45ba5 | 1248 | |
3587d534 AG |
1249 | /* Same as above, but sign extends */ |
1250 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
73601775 CLG |
1251 | unsigned int rt, unsigned int bytes, |
1252 | int is_default_endian) | |
3587d534 | 1253 | { |
eb8b0560 | 1254 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
3587d534 AG |
1255 | } |
1256 | ||
6f63e81b BL |
1257 | #ifdef CONFIG_VSX |
1258 | int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1259 | unsigned int rt, unsigned int bytes, | |
1260 | int is_default_endian, int mmio_sign_extend) | |
1261 | { | |
1262 | enum emulation_result emulated = EMULATE_DONE; | |
1263 | ||
9aa6825b PM |
1264 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
1265 | if (vcpu->arch.mmio_vsx_copy_nums > 4) | |
6f63e81b | 1266 | return EMULATE_FAIL; |
6f63e81b BL |
1267 | |
1268 | while (vcpu->arch.mmio_vsx_copy_nums) { | |
1269 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, | |
1270 | is_default_endian, mmio_sign_extend); | |
1271 | ||
1272 | if (emulated != EMULATE_DONE) | |
1273 | break; | |
1274 | ||
1275 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1276 | ||
1277 | vcpu->arch.mmio_vsx_copy_nums--; | |
1278 | vcpu->arch.mmio_vsx_offset++; | |
1279 | } | |
1280 | return emulated; | |
1281 | } | |
1282 | #endif /* CONFIG_VSX */ | |
1283 | ||
bbf45ba5 | 1284 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
73601775 | 1285 | u64 val, unsigned int bytes, int is_default_endian) |
bbf45ba5 HB |
1286 | { |
1287 | void *data = run->mmio.data; | |
ed840ee9 | 1288 | int idx, ret; |
d078eed3 | 1289 | bool host_swabbed; |
73601775 | 1290 | |
d078eed3 | 1291 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 1292 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 1293 | host_swabbed = is_default_endian; |
73601775 | 1294 | } else { |
d078eed3 | 1295 | host_swabbed = !is_default_endian; |
73601775 | 1296 | } |
bbf45ba5 HB |
1297 | |
1298 | if (bytes > sizeof(run->mmio.data)) { | |
1299 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
1300 | run->mmio.len); | |
1301 | } | |
1302 | ||
1303 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
1304 | run->mmio.len = bytes; | |
1305 | run->mmio.is_write = 1; | |
1306 | vcpu->mmio_needed = 1; | |
1307 | vcpu->mmio_is_write = 1; | |
1308 | ||
6f63e81b BL |
1309 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) |
1310 | val = dp_to_sp(val); | |
1311 | ||
bbf45ba5 | 1312 | /* Store the value at the lowest bytes in 'data'. */ |
d078eed3 | 1313 | if (!host_swabbed) { |
bbf45ba5 | 1314 | switch (bytes) { |
b104d066 | 1315 | case 8: *(u64 *)data = val; break; |
bbf45ba5 HB |
1316 | case 4: *(u32 *)data = val; break; |
1317 | case 2: *(u16 *)data = val; break; | |
1318 | case 1: *(u8 *)data = val; break; | |
1319 | } | |
1320 | } else { | |
bbf45ba5 | 1321 | switch (bytes) { |
d078eed3 DG |
1322 | case 8: *(u64 *)data = swab64(val); break; |
1323 | case 4: *(u32 *)data = swab32(val); break; | |
1324 | case 2: *(u16 *)data = swab16(val); break; | |
1325 | case 1: *(u8 *)data = val; break; | |
bbf45ba5 HB |
1326 | } |
1327 | } | |
1328 | ||
ed840ee9 SW |
1329 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1330 | ||
e32edf4f | 1331 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
1332 | bytes, &run->mmio.data); |
1333 | ||
1334 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1335 | ||
1336 | if (!ret) { | |
0e673fb6 AG |
1337 | vcpu->mmio_needed = 0; |
1338 | return EMULATE_DONE; | |
1339 | } | |
1340 | ||
bbf45ba5 HB |
1341 | return EMULATE_DO_MMIO; |
1342 | } | |
2ba9f0d8 | 1343 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
bbf45ba5 | 1344 | |
6f63e81b BL |
1345 | #ifdef CONFIG_VSX |
1346 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | |
1347 | { | |
1348 | u32 dword_offset, word_offset; | |
1349 | union kvmppc_one_reg reg; | |
1350 | int vsx_offset = 0; | |
da2a32b8 | 1351 | int copy_type = vcpu->arch.mmio_copy_type; |
6f63e81b BL |
1352 | int result = 0; |
1353 | ||
1354 | switch (copy_type) { | |
1355 | case KVMPPC_VSX_COPY_DWORD: | |
1356 | vsx_offset = | |
1357 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); | |
1358 | ||
1359 | if (vsx_offset == -1) { | |
1360 | result = -1; | |
1361 | break; | |
1362 | } | |
1363 | ||
1364 | if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
1365 | *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); | |
1366 | } else { | |
1367 | reg.vval = VCPU_VSX_VR(vcpu, rs); | |
1368 | *val = reg.vsxval[vsx_offset]; | |
1369 | } | |
1370 | break; | |
1371 | ||
1372 | case KVMPPC_VSX_COPY_WORD: | |
1373 | vsx_offset = | |
1374 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); | |
1375 | ||
1376 | if (vsx_offset == -1) { | |
1377 | result = -1; | |
1378 | break; | |
1379 | } | |
1380 | ||
1381 | if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
1382 | dword_offset = vsx_offset / 2; | |
1383 | word_offset = vsx_offset % 2; | |
1384 | reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); | |
1385 | *val = reg.vsx32val[word_offset]; | |
1386 | } else { | |
1387 | reg.vval = VCPU_VSX_VR(vcpu, rs); | |
1388 | *val = reg.vsx32val[vsx_offset]; | |
1389 | } | |
1390 | break; | |
1391 | ||
1392 | default: | |
1393 | result = -1; | |
1394 | break; | |
1395 | } | |
1396 | ||
1397 | return result; | |
1398 | } | |
1399 | ||
1400 | int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1401 | int rs, unsigned int bytes, int is_default_endian) | |
1402 | { | |
1403 | u64 val; | |
1404 | enum emulation_result emulated = EMULATE_DONE; | |
1405 | ||
1406 | vcpu->arch.io_gpr = rs; | |
1407 | ||
9aa6825b PM |
1408 | /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */ |
1409 | if (vcpu->arch.mmio_vsx_copy_nums > 4) | |
6f63e81b | 1410 | return EMULATE_FAIL; |
6f63e81b BL |
1411 | |
1412 | while (vcpu->arch.mmio_vsx_copy_nums) { | |
1413 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) | |
1414 | return EMULATE_FAIL; | |
1415 | ||
1416 | emulated = kvmppc_handle_store(run, vcpu, | |
1417 | val, bytes, is_default_endian); | |
1418 | ||
1419 | if (emulated != EMULATE_DONE) | |
1420 | break; | |
1421 | ||
1422 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1423 | ||
1424 | vcpu->arch.mmio_vsx_copy_nums--; | |
1425 | vcpu->arch.mmio_vsx_offset++; | |
1426 | } | |
1427 | ||
1428 | return emulated; | |
1429 | } | |
1430 | ||
1431 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, | |
1432 | struct kvm_run *run) | |
1433 | { | |
1434 | enum emulation_result emulated = EMULATE_FAIL; | |
1435 | int r; | |
1436 | ||
1437 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1438 | ||
1439 | if (!vcpu->mmio_is_write) { | |
1440 | emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, | |
1441 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); | |
1442 | } else { | |
1443 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
1444 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
1445 | } | |
1446 | ||
1447 | switch (emulated) { | |
1448 | case EMULATE_DO_MMIO: | |
1449 | run->exit_reason = KVM_EXIT_MMIO; | |
1450 | r = RESUME_HOST; | |
1451 | break; | |
1452 | case EMULATE_FAIL: | |
1453 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); | |
1454 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1455 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | |
1456 | r = RESUME_HOST; | |
1457 | break; | |
1458 | default: | |
1459 | r = RESUME_GUEST; | |
1460 | break; | |
1461 | } | |
1462 | return r; | |
1463 | } | |
1464 | #endif /* CONFIG_VSX */ | |
1465 | ||
09f98496 | 1466 | #ifdef CONFIG_ALTIVEC |
acc9eb93 SG |
1467 | int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1468 | unsigned int rt, unsigned int bytes, int is_default_endian) | |
09f98496 | 1469 | { |
6df3877f | 1470 | enum emulation_result emulated = EMULATE_DONE; |
09f98496 | 1471 | |
acc9eb93 SG |
1472 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
1473 | return EMULATE_FAIL; | |
1474 | ||
09f98496 | 1475 | while (vcpu->arch.mmio_vmx_copy_nums) { |
acc9eb93 | 1476 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, |
09f98496 JRZ |
1477 | is_default_endian, 0); |
1478 | ||
1479 | if (emulated != EMULATE_DONE) | |
1480 | break; | |
1481 | ||
1482 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1483 | vcpu->arch.mmio_vmx_copy_nums--; | |
acc9eb93 | 1484 | vcpu->arch.mmio_vmx_offset++; |
09f98496 JRZ |
1485 | } |
1486 | ||
1487 | return emulated; | |
1488 | } | |
1489 | ||
acc9eb93 | 1490 | int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val) |
09f98496 | 1491 | { |
acc9eb93 SG |
1492 | union kvmppc_one_reg reg; |
1493 | int vmx_offset = 0; | |
1494 | int result = 0; | |
09f98496 | 1495 | |
acc9eb93 SG |
1496 | vmx_offset = |
1497 | kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
09f98496 | 1498 | |
acc9eb93 | 1499 | if (vmx_offset == -1) |
09f98496 JRZ |
1500 | return -1; |
1501 | ||
acc9eb93 SG |
1502 | reg.vval = VCPU_VSX_VR(vcpu, index); |
1503 | *val = reg.vsxval[vmx_offset]; | |
09f98496 | 1504 | |
acc9eb93 SG |
1505 | return result; |
1506 | } | |
09f98496 | 1507 | |
acc9eb93 SG |
1508 | int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val) |
1509 | { | |
1510 | union kvmppc_one_reg reg; | |
1511 | int vmx_offset = 0; | |
1512 | int result = 0; | |
1513 | ||
1514 | vmx_offset = | |
1515 | kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
1516 | ||
1517 | if (vmx_offset == -1) | |
1518 | return -1; | |
1519 | ||
1520 | reg.vval = VCPU_VSX_VR(vcpu, index); | |
1521 | *val = reg.vsx32val[vmx_offset]; | |
1522 | ||
1523 | return result; | |
1524 | } | |
1525 | ||
1526 | int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val) | |
1527 | { | |
1528 | union kvmppc_one_reg reg; | |
1529 | int vmx_offset = 0; | |
1530 | int result = 0; | |
1531 | ||
1532 | vmx_offset = | |
1533 | kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
1534 | ||
1535 | if (vmx_offset == -1) | |
1536 | return -1; | |
1537 | ||
1538 | reg.vval = VCPU_VSX_VR(vcpu, index); | |
1539 | *val = reg.vsx16val[vmx_offset]; | |
1540 | ||
1541 | return result; | |
09f98496 JRZ |
1542 | } |
1543 | ||
acc9eb93 SG |
1544 | int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val) |
1545 | { | |
1546 | union kvmppc_one_reg reg; | |
1547 | int vmx_offset = 0; | |
1548 | int result = 0; | |
1549 | ||
1550 | vmx_offset = | |
1551 | kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); | |
1552 | ||
1553 | if (vmx_offset == -1) | |
1554 | return -1; | |
1555 | ||
1556 | reg.vval = VCPU_VSX_VR(vcpu, index); | |
1557 | *val = reg.vsx8val[vmx_offset]; | |
1558 | ||
1559 | return result; | |
09f98496 JRZ |
1560 | } |
1561 | ||
acc9eb93 SG |
1562 | int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
1563 | unsigned int rs, unsigned int bytes, int is_default_endian) | |
09f98496 JRZ |
1564 | { |
1565 | u64 val = 0; | |
acc9eb93 | 1566 | unsigned int index = rs & KVM_MMIO_REG_MASK; |
09f98496 JRZ |
1567 | enum emulation_result emulated = EMULATE_DONE; |
1568 | ||
acc9eb93 SG |
1569 | if (vcpu->arch.mmio_vsx_copy_nums > 2) |
1570 | return EMULATE_FAIL; | |
1571 | ||
09f98496 JRZ |
1572 | vcpu->arch.io_gpr = rs; |
1573 | ||
1574 | while (vcpu->arch.mmio_vmx_copy_nums) { | |
acc9eb93 SG |
1575 | switch (vcpu->arch.mmio_copy_type) { |
1576 | case KVMPPC_VMX_COPY_DWORD: | |
1577 | if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) | |
1578 | return EMULATE_FAIL; | |
1579 | ||
1580 | break; | |
1581 | case KVMPPC_VMX_COPY_WORD: | |
1582 | if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) | |
1583 | return EMULATE_FAIL; | |
1584 | break; | |
1585 | case KVMPPC_VMX_COPY_HWORD: | |
1586 | if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) | |
1587 | return EMULATE_FAIL; | |
1588 | break; | |
1589 | case KVMPPC_VMX_COPY_BYTE: | |
1590 | if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) | |
1591 | return EMULATE_FAIL; | |
1592 | break; | |
1593 | default: | |
09f98496 | 1594 | return EMULATE_FAIL; |
acc9eb93 | 1595 | } |
09f98496 | 1596 | |
acc9eb93 | 1597 | emulated = kvmppc_handle_store(run, vcpu, val, bytes, |
09f98496 JRZ |
1598 | is_default_endian); |
1599 | if (emulated != EMULATE_DONE) | |
1600 | break; | |
1601 | ||
1602 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1603 | vcpu->arch.mmio_vmx_copy_nums--; | |
acc9eb93 | 1604 | vcpu->arch.mmio_vmx_offset++; |
09f98496 JRZ |
1605 | } |
1606 | ||
1607 | return emulated; | |
1608 | } | |
1609 | ||
1610 | static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu, | |
1611 | struct kvm_run *run) | |
1612 | { | |
1613 | enum emulation_result emulated = EMULATE_FAIL; | |
1614 | int r; | |
1615 | ||
1616 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1617 | ||
1618 | if (!vcpu->mmio_is_write) { | |
acc9eb93 SG |
1619 | emulated = kvmppc_handle_vmx_load(run, vcpu, |
1620 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
09f98496 | 1621 | } else { |
acc9eb93 SG |
1622 | emulated = kvmppc_handle_vmx_store(run, vcpu, |
1623 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
09f98496 JRZ |
1624 | } |
1625 | ||
1626 | switch (emulated) { | |
1627 | case EMULATE_DO_MMIO: | |
1628 | run->exit_reason = KVM_EXIT_MMIO; | |
1629 | r = RESUME_HOST; | |
1630 | break; | |
1631 | case EMULATE_FAIL: | |
1632 | pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); | |
1633 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1634 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | |
1635 | r = RESUME_HOST; | |
1636 | break; | |
1637 | default: | |
1638 | r = RESUME_GUEST; | |
1639 | break; | |
1640 | } | |
1641 | return r; | |
1642 | } | |
1643 | #endif /* CONFIG_ALTIVEC */ | |
1644 | ||
8a41ea53 MC |
1645 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
1646 | { | |
1647 | int r = 0; | |
1648 | union kvmppc_one_reg val; | |
1649 | int size; | |
1650 | ||
1651 | size = one_reg_size(reg->id); | |
1652 | if (size > sizeof(val)) | |
1653 | return -EINVAL; | |
1654 | ||
1655 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | |
1656 | if (r == -EINVAL) { | |
1657 | r = 0; | |
1658 | switch (reg->id) { | |
3840edc8 MC |
1659 | #ifdef CONFIG_ALTIVEC |
1660 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1661 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1662 | r = -ENXIO; | |
1663 | break; | |
1664 | } | |
b4d7f161 | 1665 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
3840edc8 MC |
1666 | break; |
1667 | case KVM_REG_PPC_VSCR: | |
1668 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1669 | r = -ENXIO; | |
1670 | break; | |
1671 | } | |
b4d7f161 | 1672 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
3840edc8 MC |
1673 | break; |
1674 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 | 1675 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
3840edc8 MC |
1676 | break; |
1677 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1678 | default: |
1679 | r = -EINVAL; | |
1680 | break; | |
1681 | } | |
1682 | } | |
1683 | ||
1684 | if (r) | |
1685 | return r; | |
1686 | ||
1687 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) | |
1688 | r = -EFAULT; | |
1689 | ||
1690 | return r; | |
1691 | } | |
1692 | ||
1693 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1694 | { | |
1695 | int r; | |
1696 | union kvmppc_one_reg val; | |
1697 | int size; | |
1698 | ||
1699 | size = one_reg_size(reg->id); | |
1700 | if (size > sizeof(val)) | |
1701 | return -EINVAL; | |
1702 | ||
1703 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | |
1704 | return -EFAULT; | |
1705 | ||
1706 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | |
1707 | if (r == -EINVAL) { | |
1708 | r = 0; | |
1709 | switch (reg->id) { | |
3840edc8 MC |
1710 | #ifdef CONFIG_ALTIVEC |
1711 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1712 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1713 | r = -ENXIO; | |
1714 | break; | |
1715 | } | |
b4d7f161 | 1716 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
3840edc8 MC |
1717 | break; |
1718 | case KVM_REG_PPC_VSCR: | |
1719 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1720 | r = -ENXIO; | |
1721 | break; | |
1722 | } | |
b4d7f161 | 1723 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
3840edc8 MC |
1724 | break; |
1725 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 GK |
1726 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1727 | r = -ENXIO; | |
1728 | break; | |
1729 | } | |
1730 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | |
3840edc8 MC |
1731 | break; |
1732 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1733 | default: |
1734 | r = -EINVAL; | |
1735 | break; | |
1736 | } | |
1737 | } | |
1738 | ||
1739 | return r; | |
1740 | } | |
1741 | ||
bbf45ba5 HB |
1742 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1743 | { | |
1744 | int r; | |
bbf45ba5 | 1745 | |
accb757d CD |
1746 | vcpu_load(vcpu); |
1747 | ||
bbf45ba5 | 1748 | if (vcpu->mmio_needed) { |
6f63e81b | 1749 | vcpu->mmio_needed = 0; |
bbf45ba5 HB |
1750 | if (!vcpu->mmio_is_write) |
1751 | kvmppc_complete_mmio_load(vcpu, run); | |
6f63e81b BL |
1752 | #ifdef CONFIG_VSX |
1753 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { | |
1754 | vcpu->arch.mmio_vsx_copy_nums--; | |
1755 | vcpu->arch.mmio_vsx_offset++; | |
1756 | } | |
1757 | ||
1758 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { | |
1759 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); | |
1760 | if (r == RESUME_HOST) { | |
1761 | vcpu->mmio_needed = 1; | |
accb757d | 1762 | goto out; |
6f63e81b BL |
1763 | } |
1764 | } | |
09f98496 JRZ |
1765 | #endif |
1766 | #ifdef CONFIG_ALTIVEC | |
acc9eb93 | 1767 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { |
09f98496 | 1768 | vcpu->arch.mmio_vmx_copy_nums--; |
acc9eb93 SG |
1769 | vcpu->arch.mmio_vmx_offset++; |
1770 | } | |
09f98496 JRZ |
1771 | |
1772 | if (vcpu->arch.mmio_vmx_copy_nums > 0) { | |
1773 | r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run); | |
1774 | if (r == RESUME_HOST) { | |
1775 | vcpu->mmio_needed = 1; | |
1ab03c07 | 1776 | goto out; |
09f98496 JRZ |
1777 | } |
1778 | } | |
6f63e81b | 1779 | #endif |
ad0a048b AG |
1780 | } else if (vcpu->arch.osi_needed) { |
1781 | u64 *gprs = run->osi.gprs; | |
1782 | int i; | |
1783 | ||
1784 | for (i = 0; i < 32; i++) | |
1785 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
1786 | vcpu->arch.osi_needed = 0; | |
de56a948 PM |
1787 | } else if (vcpu->arch.hcall_needed) { |
1788 | int i; | |
1789 | ||
1790 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
1791 | for (i = 0; i < 9; ++i) | |
1792 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
1793 | vcpu->arch.hcall_needed = 0; | |
1c810636 AG |
1794 | #ifdef CONFIG_BOOKE |
1795 | } else if (vcpu->arch.epr_needed) { | |
1796 | kvmppc_set_epr(vcpu, run->epr.epr); | |
1797 | vcpu->arch.epr_needed = 0; | |
1798 | #endif | |
bbf45ba5 HB |
1799 | } |
1800 | ||
20b7035c | 1801 | kvm_sigset_activate(vcpu); |
6f63e81b | 1802 | |
460df4c1 PB |
1803 | if (run->immediate_exit) |
1804 | r = -EINTR; | |
1805 | else | |
1806 | r = kvmppc_vcpu_run(run, vcpu); | |
bbf45ba5 | 1807 | |
20b7035c | 1808 | kvm_sigset_deactivate(vcpu); |
bbf45ba5 | 1809 | |
c662f773 | 1810 | #ifdef CONFIG_ALTIVEC |
accb757d | 1811 | out: |
c662f773 | 1812 | #endif |
accb757d | 1813 | vcpu_put(vcpu); |
bbf45ba5 HB |
1814 | return r; |
1815 | } | |
1816 | ||
1817 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
1818 | { | |
19ccb76a | 1819 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
4fe27d2a | 1820 | kvmppc_core_dequeue_external(vcpu); |
19ccb76a PM |
1821 | return 0; |
1822 | } | |
1823 | ||
1824 | kvmppc_core_queue_external(vcpu, irq); | |
b6d33834 | 1825 | |
dfd4d47e | 1826 | kvm_vcpu_kick(vcpu); |
45c5eb67 | 1827 | |
bbf45ba5 HB |
1828 | return 0; |
1829 | } | |
1830 | ||
71fbfd5f AG |
1831 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
1832 | struct kvm_enable_cap *cap) | |
1833 | { | |
1834 | int r; | |
1835 | ||
1836 | if (cap->flags) | |
1837 | return -EINVAL; | |
1838 | ||
1839 | switch (cap->cap) { | |
ad0a048b AG |
1840 | case KVM_CAP_PPC_OSI: |
1841 | r = 0; | |
1842 | vcpu->arch.osi_enabled = true; | |
1843 | break; | |
930b412a AG |
1844 | case KVM_CAP_PPC_PAPR: |
1845 | r = 0; | |
1846 | vcpu->arch.papr_enabled = true; | |
1847 | break; | |
1c810636 AG |
1848 | case KVM_CAP_PPC_EPR: |
1849 | r = 0; | |
5df554ad SW |
1850 | if (cap->args[0]) |
1851 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; | |
1852 | else | |
1853 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; | |
1c810636 | 1854 | break; |
f61c94bb BB |
1855 | #ifdef CONFIG_BOOKE |
1856 | case KVM_CAP_PPC_BOOKE_WATCHDOG: | |
1857 | r = 0; | |
1858 | vcpu->arch.watchdog_enabled = true; | |
1859 | break; | |
1860 | #endif | |
bf7ca4bd | 1861 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
1862 | case KVM_CAP_SW_TLB: { |
1863 | struct kvm_config_tlb cfg; | |
1864 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
1865 | ||
1866 | r = -EFAULT; | |
1867 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
1868 | break; | |
1869 | ||
1870 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
1871 | break; | |
eb1e4f43 SW |
1872 | } |
1873 | #endif | |
1874 | #ifdef CONFIG_KVM_MPIC | |
1875 | case KVM_CAP_IRQ_MPIC: { | |
70abaded | 1876 | struct fd f; |
eb1e4f43 SW |
1877 | struct kvm_device *dev; |
1878 | ||
1879 | r = -EBADF; | |
70abaded AV |
1880 | f = fdget(cap->args[0]); |
1881 | if (!f.file) | |
eb1e4f43 SW |
1882 | break; |
1883 | ||
1884 | r = -EPERM; | |
70abaded | 1885 | dev = kvm_device_from_filp(f.file); |
eb1e4f43 SW |
1886 | if (dev) |
1887 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); | |
1888 | ||
70abaded | 1889 | fdput(f); |
eb1e4f43 | 1890 | break; |
dc83b8bc SW |
1891 | } |
1892 | #endif | |
5975a2e0 PM |
1893 | #ifdef CONFIG_KVM_XICS |
1894 | case KVM_CAP_IRQ_XICS: { | |
70abaded | 1895 | struct fd f; |
5975a2e0 PM |
1896 | struct kvm_device *dev; |
1897 | ||
1898 | r = -EBADF; | |
70abaded AV |
1899 | f = fdget(cap->args[0]); |
1900 | if (!f.file) | |
5975a2e0 PM |
1901 | break; |
1902 | ||
1903 | r = -EPERM; | |
70abaded | 1904 | dev = kvm_device_from_filp(f.file); |
5af50993 BH |
1905 | if (dev) { |
1906 | if (xive_enabled()) | |
1907 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); | |
1908 | else | |
1909 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); | |
1910 | } | |
5975a2e0 | 1911 | |
70abaded | 1912 | fdput(f); |
5975a2e0 PM |
1913 | break; |
1914 | } | |
1915 | #endif /* CONFIG_KVM_XICS */ | |
134764ed AP |
1916 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
1917 | case KVM_CAP_PPC_FWNMI: | |
1918 | r = -EINVAL; | |
1919 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) | |
1920 | break; | |
1921 | r = 0; | |
1922 | vcpu->kvm->arch.fwnmi_enabled = true; | |
1923 | break; | |
1924 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ | |
71fbfd5f AG |
1925 | default: |
1926 | r = -EINVAL; | |
1927 | break; | |
1928 | } | |
1929 | ||
af8f38b3 AG |
1930 | if (!r) |
1931 | r = kvmppc_sanity_check(vcpu); | |
1932 | ||
71fbfd5f AG |
1933 | return r; |
1934 | } | |
1935 | ||
34a75b0f PM |
1936 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
1937 | { | |
1938 | #ifdef CONFIG_KVM_MPIC | |
1939 | if (kvm->arch.mpic) | |
1940 | return true; | |
1941 | #endif | |
1942 | #ifdef CONFIG_KVM_XICS | |
5af50993 | 1943 | if (kvm->arch.xics || kvm->arch.xive) |
34a75b0f PM |
1944 | return true; |
1945 | #endif | |
1946 | return false; | |
1947 | } | |
1948 | ||
bbf45ba5 HB |
1949 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1950 | struct kvm_mp_state *mp_state) | |
1951 | { | |
1952 | return -EINVAL; | |
1953 | } | |
1954 | ||
1955 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1956 | struct kvm_mp_state *mp_state) | |
1957 | { | |
1958 | return -EINVAL; | |
1959 | } | |
1960 | ||
5cb0944c PB |
1961 | long kvm_arch_vcpu_async_ioctl(struct file *filp, |
1962 | unsigned int ioctl, unsigned long arg) | |
bbf45ba5 HB |
1963 | { |
1964 | struct kvm_vcpu *vcpu = filp->private_data; | |
1965 | void __user *argp = (void __user *)arg; | |
bbf45ba5 | 1966 | |
9b062471 | 1967 | if (ioctl == KVM_INTERRUPT) { |
bbf45ba5 | 1968 | struct kvm_interrupt irq; |
bbf45ba5 | 1969 | if (copy_from_user(&irq, argp, sizeof(irq))) |
9b062471 CD |
1970 | return -EFAULT; |
1971 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
bbf45ba5 | 1972 | } |
5cb0944c PB |
1973 | return -ENOIOCTLCMD; |
1974 | } | |
1975 | ||
1976 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
1977 | unsigned int ioctl, unsigned long arg) | |
1978 | { | |
1979 | struct kvm_vcpu *vcpu = filp->private_data; | |
1980 | void __user *argp = (void __user *)arg; | |
1981 | long r; | |
19483d14 | 1982 | |
9b062471 | 1983 | switch (ioctl) { |
71fbfd5f AG |
1984 | case KVM_ENABLE_CAP: |
1985 | { | |
1986 | struct kvm_enable_cap cap; | |
1987 | r = -EFAULT; | |
b3cebfe8 | 1988 | vcpu_load(vcpu); |
71fbfd5f AG |
1989 | if (copy_from_user(&cap, argp, sizeof(cap))) |
1990 | goto out; | |
1991 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
b3cebfe8 | 1992 | vcpu_put(vcpu); |
71fbfd5f AG |
1993 | break; |
1994 | } | |
dc83b8bc | 1995 | |
e24ed81f AG |
1996 | case KVM_SET_ONE_REG: |
1997 | case KVM_GET_ONE_REG: | |
1998 | { | |
1999 | struct kvm_one_reg reg; | |
2000 | r = -EFAULT; | |
2001 | if (copy_from_user(®, argp, sizeof(reg))) | |
2002 | goto out; | |
2003 | if (ioctl == KVM_SET_ONE_REG) | |
2004 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
2005 | else | |
2006 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
2007 | break; | |
2008 | } | |
2009 | ||
bf7ca4bd | 2010 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
2011 | case KVM_DIRTY_TLB: { |
2012 | struct kvm_dirty_tlb dirty; | |
2013 | r = -EFAULT; | |
b3cebfe8 | 2014 | vcpu_load(vcpu); |
dc83b8bc SW |
2015 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
2016 | goto out; | |
2017 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
b3cebfe8 | 2018 | vcpu_put(vcpu); |
dc83b8bc SW |
2019 | break; |
2020 | } | |
2021 | #endif | |
bbf45ba5 HB |
2022 | default: |
2023 | r = -EINVAL; | |
2024 | } | |
2025 | ||
2026 | out: | |
2027 | return r; | |
2028 | } | |
2029 | ||
1499fa80 | 2030 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
5b1c1493 CO |
2031 | { |
2032 | return VM_FAULT_SIGBUS; | |
2033 | } | |
2034 | ||
15711e9c AG |
2035 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
2036 | { | |
784bafac SY |
2037 | u32 inst_nop = 0x60000000; |
2038 | #ifdef CONFIG_KVM_BOOKE_HV | |
2039 | u32 inst_sc1 = 0x44000022; | |
2743103f AG |
2040 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
2041 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); | |
2042 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); | |
2043 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 2044 | #else |
15711e9c AG |
2045 | u32 inst_lis = 0x3c000000; |
2046 | u32 inst_ori = 0x60000000; | |
15711e9c AG |
2047 | u32 inst_sc = 0x44000002; |
2048 | u32 inst_imm_mask = 0xffff; | |
2049 | ||
2050 | /* | |
2051 | * The hypercall to get into KVM from within guest context is as | |
2052 | * follows: | |
2053 | * | |
2054 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
2055 | * ori r0, KVM_SC_MAGIC_R0@l | |
2056 | * sc | |
2057 | * nop | |
2058 | */ | |
2743103f AG |
2059 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
2060 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); | |
2061 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); | |
2062 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 2063 | #endif |
15711e9c | 2064 | |
9202e076 LYB |
2065 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
2066 | ||
15711e9c AG |
2067 | return 0; |
2068 | } | |
2069 | ||
5efdb4be AG |
2070 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
2071 | bool line_status) | |
2072 | { | |
2073 | if (!irqchip_in_kernel(kvm)) | |
2074 | return -ENXIO; | |
2075 | ||
2076 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | |
2077 | irq_event->irq, irq_event->level, | |
2078 | line_status); | |
2079 | return 0; | |
2080 | } | |
2081 | ||
699a0ea0 PM |
2082 | |
2083 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, | |
2084 | struct kvm_enable_cap *cap) | |
2085 | { | |
2086 | int r; | |
2087 | ||
2088 | if (cap->flags) | |
2089 | return -EINVAL; | |
2090 | ||
2091 | switch (cap->cap) { | |
2092 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | |
2093 | case KVM_CAP_PPC_ENABLE_HCALL: { | |
2094 | unsigned long hcall = cap->args[0]; | |
2095 | ||
2096 | r = -EINVAL; | |
2097 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || | |
2098 | cap->args[1] > 1) | |
2099 | break; | |
ae2113a4 PM |
2100 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
2101 | break; | |
699a0ea0 PM |
2102 | if (cap->args[1]) |
2103 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
2104 | else | |
2105 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
2106 | r = 0; | |
2107 | break; | |
2108 | } | |
3c313524 PM |
2109 | case KVM_CAP_PPC_SMT: { |
2110 | unsigned long mode = cap->args[0]; | |
2111 | unsigned long flags = cap->args[1]; | |
2112 | ||
2113 | r = -EINVAL; | |
2114 | if (kvm->arch.kvm_ops->set_smt_mode) | |
2115 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); | |
2116 | break; | |
2117 | } | |
699a0ea0 PM |
2118 | #endif |
2119 | default: | |
2120 | r = -EINVAL; | |
2121 | break; | |
2122 | } | |
2123 | ||
2124 | return r; | |
2125 | } | |
2126 | ||
3214d01f PM |
2127 | #ifdef CONFIG_PPC_BOOK3S_64 |
2128 | /* | |
2129 | * These functions check whether the underlying hardware is safe | |
2130 | * against attacks based on observing the effects of speculatively | |
2131 | * executed instructions, and whether it supplies instructions for | |
2132 | * use in workarounds. The information comes from firmware, either | |
2133 | * via the device tree on powernv platforms or from an hcall on | |
2134 | * pseries platforms. | |
2135 | */ | |
2136 | #ifdef CONFIG_PPC_PSERIES | |
2137 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) | |
2138 | { | |
2139 | struct h_cpu_char_result c; | |
2140 | unsigned long rc; | |
2141 | ||
2142 | if (!machine_is(pseries)) | |
2143 | return -ENOTTY; | |
2144 | ||
2145 | rc = plpar_get_cpu_characteristics(&c); | |
2146 | if (rc == H_SUCCESS) { | |
2147 | cp->character = c.character; | |
2148 | cp->behaviour = c.behaviour; | |
2149 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | | |
2150 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | | |
2151 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | | |
2152 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | | |
2153 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | | |
2154 | KVM_PPC_CPU_CHAR_BR_HINT_HONOURED | | |
2155 | KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF | | |
2156 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; | |
2157 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | | |
2158 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | | |
2159 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; | |
2160 | } | |
2161 | return 0; | |
2162 | } | |
2163 | #else | |
2164 | static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp) | |
2165 | { | |
2166 | return -ENOTTY; | |
2167 | } | |
2168 | #endif | |
2169 | ||
2170 | static inline bool have_fw_feat(struct device_node *fw_features, | |
2171 | const char *state, const char *name) | |
2172 | { | |
2173 | struct device_node *np; | |
2174 | bool r = false; | |
2175 | ||
2176 | np = of_get_child_by_name(fw_features, name); | |
2177 | if (np) { | |
2178 | r = of_property_read_bool(np, state); | |
2179 | of_node_put(np); | |
2180 | } | |
2181 | return r; | |
2182 | } | |
2183 | ||
2184 | static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp) | |
2185 | { | |
2186 | struct device_node *np, *fw_features; | |
2187 | int r; | |
2188 | ||
2189 | memset(cp, 0, sizeof(*cp)); | |
2190 | r = pseries_get_cpu_char(cp); | |
2191 | if (r != -ENOTTY) | |
2192 | return r; | |
2193 | ||
2194 | np = of_find_node_by_name(NULL, "ibm,opal"); | |
2195 | if (np) { | |
2196 | fw_features = of_get_child_by_name(np, "fw-features"); | |
2197 | of_node_put(np); | |
2198 | if (!fw_features) | |
2199 | return 0; | |
2200 | if (have_fw_feat(fw_features, "enabled", | |
2201 | "inst-spec-barrier-ori31,31,0")) | |
2202 | cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; | |
2203 | if (have_fw_feat(fw_features, "enabled", | |
2204 | "fw-bcctrl-serialized")) | |
2205 | cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; | |
2206 | if (have_fw_feat(fw_features, "enabled", | |
2207 | "inst-l1d-flush-ori30,30,0")) | |
2208 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; | |
2209 | if (have_fw_feat(fw_features, "enabled", | |
2210 | "inst-l1d-flush-trig2")) | |
2211 | cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; | |
2212 | if (have_fw_feat(fw_features, "enabled", | |
2213 | "fw-l1d-thread-split")) | |
2214 | cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; | |
2215 | if (have_fw_feat(fw_features, "enabled", | |
2216 | "fw-count-cache-disabled")) | |
2217 | cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; | |
2218 | cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | | |
2219 | KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED | | |
2220 | KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 | | |
2221 | KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 | | |
2222 | KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV | | |
2223 | KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; | |
2224 | ||
2225 | if (have_fw_feat(fw_features, "enabled", | |
2226 | "speculation-policy-favor-security")) | |
2227 | cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; | |
2228 | if (!have_fw_feat(fw_features, "disabled", | |
2229 | "needs-l1d-flush-msr-pr-0-to-1")) | |
2230 | cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; | |
2231 | if (!have_fw_feat(fw_features, "disabled", | |
2232 | "needs-spec-barrier-for-bound-checks")) | |
2233 | cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; | |
2234 | cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | | |
2235 | KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR | | |
2236 | KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; | |
2237 | ||
2238 | of_node_put(fw_features); | |
2239 | } | |
2240 | ||
2241 | return 0; | |
2242 | } | |
2243 | #endif | |
2244 | ||
bbf45ba5 HB |
2245 | long kvm_arch_vm_ioctl(struct file *filp, |
2246 | unsigned int ioctl, unsigned long arg) | |
2247 | { | |
5df554ad | 2248 | struct kvm *kvm __maybe_unused = filp->private_data; |
15711e9c | 2249 | void __user *argp = (void __user *)arg; |
bbf45ba5 HB |
2250 | long r; |
2251 | ||
2252 | switch (ioctl) { | |
15711e9c AG |
2253 | case KVM_PPC_GET_PVINFO: { |
2254 | struct kvm_ppc_pvinfo pvinfo; | |
d8cdddcd | 2255 | memset(&pvinfo, 0, sizeof(pvinfo)); |
15711e9c AG |
2256 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
2257 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
2258 | r = -EFAULT; | |
2259 | goto out; | |
2260 | } | |
2261 | ||
2262 | break; | |
2263 | } | |
699a0ea0 PM |
2264 | case KVM_ENABLE_CAP: |
2265 | { | |
2266 | struct kvm_enable_cap cap; | |
2267 | r = -EFAULT; | |
2268 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
2269 | goto out; | |
2270 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); | |
2271 | break; | |
2272 | } | |
76d837a4 | 2273 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
58ded420 AK |
2274 | case KVM_CREATE_SPAPR_TCE_64: { |
2275 | struct kvm_create_spapr_tce_64 create_tce_64; | |
2276 | ||
2277 | r = -EFAULT; | |
2278 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) | |
2279 | goto out; | |
2280 | if (create_tce_64.flags) { | |
2281 | r = -EINVAL; | |
2282 | goto out; | |
2283 | } | |
2284 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
2285 | goto out; | |
2286 | } | |
54738c09 DG |
2287 | case KVM_CREATE_SPAPR_TCE: { |
2288 | struct kvm_create_spapr_tce create_tce; | |
58ded420 | 2289 | struct kvm_create_spapr_tce_64 create_tce_64; |
54738c09 DG |
2290 | |
2291 | r = -EFAULT; | |
2292 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
2293 | goto out; | |
58ded420 AK |
2294 | |
2295 | create_tce_64.liobn = create_tce.liobn; | |
2296 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; | |
2297 | create_tce_64.offset = 0; | |
2298 | create_tce_64.size = create_tce.window_size >> | |
2299 | IOMMU_PAGE_SHIFT_4K; | |
2300 | create_tce_64.flags = 0; | |
2301 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
54738c09 DG |
2302 | goto out; |
2303 | } | |
76d837a4 PM |
2304 | #endif |
2305 | #ifdef CONFIG_PPC_BOOK3S_64 | |
5b74716e | 2306 | case KVM_PPC_GET_SMMU_INFO: { |
5b74716e | 2307 | struct kvm_ppc_smmu_info info; |
cbbc58d4 | 2308 | struct kvm *kvm = filp->private_data; |
5b74716e BH |
2309 | |
2310 | memset(&info, 0, sizeof(info)); | |
cbbc58d4 | 2311 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
5b74716e BH |
2312 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
2313 | r = -EFAULT; | |
2314 | break; | |
2315 | } | |
8e591cb7 ME |
2316 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
2317 | struct kvm *kvm = filp->private_data; | |
2318 | ||
2319 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | |
2320 | break; | |
2321 | } | |
c9270132 PM |
2322 | case KVM_PPC_CONFIGURE_V3_MMU: { |
2323 | struct kvm *kvm = filp->private_data; | |
2324 | struct kvm_ppc_mmuv3_cfg cfg; | |
2325 | ||
2326 | r = -EINVAL; | |
2327 | if (!kvm->arch.kvm_ops->configure_mmu) | |
2328 | goto out; | |
2329 | r = -EFAULT; | |
2330 | if (copy_from_user(&cfg, argp, sizeof(cfg))) | |
2331 | goto out; | |
2332 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); | |
2333 | break; | |
2334 | } | |
2335 | case KVM_PPC_GET_RMMU_INFO: { | |
2336 | struct kvm *kvm = filp->private_data; | |
2337 | struct kvm_ppc_rmmu_info info; | |
2338 | ||
2339 | r = -EINVAL; | |
2340 | if (!kvm->arch.kvm_ops->get_rmmu_info) | |
2341 | goto out; | |
2342 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); | |
2343 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
2344 | r = -EFAULT; | |
2345 | break; | |
2346 | } | |
3214d01f PM |
2347 | case KVM_PPC_GET_CPU_CHAR: { |
2348 | struct kvm_ppc_cpu_char cpuchar; | |
2349 | ||
2350 | r = kvmppc_get_cpu_char(&cpuchar); | |
2351 | if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar))) | |
2352 | r = -EFAULT; | |
2353 | break; | |
2354 | } | |
cbbc58d4 AK |
2355 | default: { |
2356 | struct kvm *kvm = filp->private_data; | |
2357 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | |
2358 | } | |
3a167bea | 2359 | #else /* CONFIG_PPC_BOOK3S_64 */ |
bbf45ba5 | 2360 | default: |
367e1319 | 2361 | r = -ENOTTY; |
3a167bea | 2362 | #endif |
bbf45ba5 | 2363 | } |
15711e9c | 2364 | out: |
bbf45ba5 HB |
2365 | return r; |
2366 | } | |
2367 | ||
043cc4d7 SW |
2368 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
2369 | static unsigned long nr_lpids; | |
2370 | ||
2371 | long kvmppc_alloc_lpid(void) | |
2372 | { | |
2373 | long lpid; | |
2374 | ||
2375 | do { | |
2376 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
2377 | if (lpid >= nr_lpids) { | |
2378 | pr_err("%s: No LPIDs free\n", __func__); | |
2379 | return -ENOMEM; | |
2380 | } | |
2381 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
2382 | ||
2383 | return lpid; | |
2384 | } | |
2ba9f0d8 | 2385 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
043cc4d7 SW |
2386 | |
2387 | void kvmppc_claim_lpid(long lpid) | |
2388 | { | |
2389 | set_bit(lpid, lpid_inuse); | |
2390 | } | |
2ba9f0d8 | 2391 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
043cc4d7 SW |
2392 | |
2393 | void kvmppc_free_lpid(long lpid) | |
2394 | { | |
2395 | clear_bit(lpid, lpid_inuse); | |
2396 | } | |
2ba9f0d8 | 2397 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
043cc4d7 SW |
2398 | |
2399 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
2400 | { | |
2401 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
2402 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
2403 | } | |
2ba9f0d8 | 2404 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
043cc4d7 | 2405 | |
bbf45ba5 HB |
2406 | int kvm_arch_init(void *opaque) |
2407 | { | |
2408 | return 0; | |
2409 | } | |
2410 | ||
478d6686 | 2411 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |