Merge with upstream to accommodate with thermal changes
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_pr.c
CommitLineData
f05ed4d5
PM
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
93087948 23#include <linux/export.h>
f05ed4d5
PM
24#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
95327d08 36#include <asm/switch_to.h>
f05ed4d5
PM
37#include <linux/gfp.h>
38#include <linux/sched.h>
39#include <linux/vmalloc.h>
40#include <linux/highmem.h>
41
42#include "trace.h"
43
44/* #define EXIT_DEBUG */
45/* #define DEBUG_EXT */
46
47static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
48 ulong msr);
49
50/* Some compatibility defines */
51#ifdef CONFIG_PPC_BOOK3S_32
52#define MSR_USER32 MSR_USER
53#define MSR_USER64 MSR_USER
54#define HW_PAGE_SIZE PAGE_SIZE
e371f713
AG
55#define __hard_irq_disable local_irq_disable
56#define __hard_irq_enable local_irq_enable
f05ed4d5
PM
57#endif
58
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
60{
61#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
62 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
f05ed4d5
PM
64 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
65 sizeof(get_paca()->shadow_vcpu));
468a12c2
AG
66 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
67 svcpu_put(svcpu);
f05ed4d5
PM
68#endif
69
70#ifdef CONFIG_PPC_BOOK3S_32
71 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
72#endif
73}
74
75void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
76{
77#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
78 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
79 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
f05ed4d5
PM
80 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
81 sizeof(get_paca()->shadow_vcpu));
468a12c2
AG
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu);
f05ed4d5
PM
84#endif
85
86 kvmppc_giveup_ext(vcpu, MSR_FP);
87 kvmppc_giveup_ext(vcpu, MSR_VEC);
88 kvmppc_giveup_ext(vcpu, MSR_VSX);
89}
90
91static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
92{
93 ulong smsr = vcpu->arch.shared->msr;
94
95 /* Guest MSR values */
96 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE;
97 /* Process MSR values */
98 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
99 /* External providers the guest reserved */
100 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
101 /* 64-bit Process MSR values */
102#ifdef CONFIG_PPC_BOOK3S_64
103 smsr |= MSR_ISF | MSR_HV;
104#endif
105 vcpu->arch.shadow_msr = smsr;
106}
107
108void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
109{
110 ulong old_msr = vcpu->arch.shared->msr;
111
112#ifdef EXIT_DEBUG
113 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
114#endif
115
116 msr &= to_book3s(vcpu)->msr_mask;
117 vcpu->arch.shared->msr = msr;
118 kvmppc_recalc_shadow_msr(vcpu);
119
120 if (msr & MSR_POW) {
121 if (!vcpu->arch.pending_exceptions) {
122 kvm_vcpu_block(vcpu);
966cd0f3 123 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
f05ed4d5
PM
124 vcpu->stat.halt_wakeup++;
125
126 /* Unset POW bit after we woke up */
127 msr &= ~MSR_POW;
128 vcpu->arch.shared->msr = msr;
129 }
130 }
131
132 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
133 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
134 kvmppc_mmu_flush_segments(vcpu);
135 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
136
137 /* Preload magic page segment when in kernel mode */
138 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
139 struct kvm_vcpu_arch *a = &vcpu->arch;
140
141 if (msr & MSR_DR)
142 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
143 else
144 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
145 }
146 }
147
bbcc9c06
BH
148 /*
149 * When switching from 32 to 64-bit, we may have a stale 32-bit
150 * magic page around, we need to flush it. Typically 32-bit magic
151 * page will be instanciated when calling into RTAS. Note: We
152 * assume that such transition only happens while in kernel mode,
153 * ie, we never transition from user 32-bit to kernel 64-bit with
154 * a 32-bit magic page around.
155 */
156 if (vcpu->arch.magic_page_pa &&
157 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
158 /* going from RTAS to normal kernel code */
159 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
160 ~0xFFFUL);
161 }
162
f05ed4d5
PM
163 /* Preload FPU if it's enabled */
164 if (vcpu->arch.shared->msr & MSR_FP)
165 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
166}
167
168void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
169{
170 u32 host_pvr;
171
172 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
173 vcpu->arch.pvr = pvr;
174#ifdef CONFIG_PPC_BOOK3S_64
175 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
176 kvmppc_mmu_book3s_64_init(vcpu);
1022fc3d
AG
177 if (!to_book3s(vcpu)->hior_explicit)
178 to_book3s(vcpu)->hior = 0xfff00000;
f05ed4d5 179 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
af8f38b3 180 vcpu->arch.cpu_type = KVM_CPU_3S_64;
f05ed4d5
PM
181 } else
182#endif
183 {
184 kvmppc_mmu_book3s_32_init(vcpu);
1022fc3d
AG
185 if (!to_book3s(vcpu)->hior_explicit)
186 to_book3s(vcpu)->hior = 0;
f05ed4d5 187 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
af8f38b3 188 vcpu->arch.cpu_type = KVM_CPU_3S_32;
f05ed4d5
PM
189 }
190
af8f38b3
AG
191 kvmppc_sanity_check(vcpu);
192
f05ed4d5
PM
193 /* If we are in hypervisor level on 970, we can tell the CPU to
194 * treat DCBZ as 32 bytes store */
195 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
196 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
197 !strcmp(cur_cpu_spec->platform, "ppc970"))
198 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
199
200 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
201 really needs them in a VM on Cell and force disable them. */
202 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
203 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
204
205#ifdef CONFIG_PPC_BOOK3S_32
206 /* 32 bit Book3S always has 32 byte dcbz */
207 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
208#endif
209
210 /* On some CPUs we can execute paired single operations natively */
211 asm ( "mfpvr %0" : "=r"(host_pvr));
212 switch (host_pvr) {
213 case 0x00080200: /* lonestar 2.0 */
214 case 0x00088202: /* lonestar 2.2 */
215 case 0x70000100: /* gekko 1.0 */
216 case 0x00080100: /* gekko 2.0 */
217 case 0x00083203: /* gekko 2.3a */
218 case 0x00083213: /* gekko 2.3b */
219 case 0x00083204: /* gekko 2.4 */
220 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
221 case 0x00087200: /* broadway */
222 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
223 /* Enable HID2.PSE - in case we need it later */
224 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
225 }
226}
227
228/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
229 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
230 * emulate 32 bytes dcbz length.
231 *
232 * The Book3s_64 inventors also realized this case and implemented a special bit
233 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
234 *
235 * My approach here is to patch the dcbz instruction on executing pages.
236 */
237static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
238{
239 struct page *hpage;
240 u64 hpage_offset;
241 u32 *page;
242 int i;
243
244 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
245 if (is_error_page(hpage)) {
246 kvm_release_page_clean(hpage);
247 return;
248 }
249
250 hpage_offset = pte->raddr & ~PAGE_MASK;
251 hpage_offset &= ~0xFFFULL;
252 hpage_offset /= 4;
253
254 get_page(hpage);
2480b208 255 page = kmap_atomic(hpage);
f05ed4d5
PM
256
257 /* patch dcbz into reserved instruction, so we trap */
258 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
259 if ((page[i] & 0xff0007ff) == INS_DCBZ)
260 page[i] &= 0xfffffff7;
261
2480b208 262 kunmap_atomic(page);
f05ed4d5
PM
263 put_page(hpage);
264}
265
266static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
267{
268 ulong mp_pa = vcpu->arch.magic_page_pa;
269
bbcc9c06
BH
270 if (!(vcpu->arch.shared->msr & MSR_SF))
271 mp_pa = (uint32_t)mp_pa;
272
f05ed4d5
PM
273 if (unlikely(mp_pa) &&
274 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
275 return 1;
276 }
277
278 return kvm_is_visible_gfn(vcpu->kvm, gfn);
279}
280
281int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
282 ulong eaddr, int vec)
283{
284 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
285 int r = RESUME_GUEST;
286 int relocated;
287 int page_found = 0;
288 struct kvmppc_pte pte;
289 bool is_mmio = false;
290 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
291 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
292 u64 vsid;
293
294 relocated = data ? dr : ir;
295
296 /* Resolve real address if translation turned on */
297 if (relocated) {
298 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
299 } else {
300 pte.may_execute = true;
301 pte.may_read = true;
302 pte.may_write = true;
303 pte.raddr = eaddr & KVM_PAM;
304 pte.eaddr = eaddr;
305 pte.vpage = eaddr >> 12;
306 }
307
308 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
309 case 0:
310 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
311 break;
312 case MSR_DR:
313 case MSR_IR:
314 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
315
316 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
317 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
318 else
319 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
320 pte.vpage |= vsid;
321
322 if (vsid == -1)
323 page_found = -EINVAL;
324 break;
325 }
326
327 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
328 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
329 /*
330 * If we do the dcbz hack, we have to NX on every execution,
331 * so we can patch the executing code. This renders our guest
332 * NX-less.
333 */
334 pte.may_execute = !data;
335 }
336
337 if (page_found == -ENOENT) {
338 /* Page not found in guest PTE entries */
468a12c2 339 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
f05ed4d5 340 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
468a12c2 341 vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
f05ed4d5 342 vcpu->arch.shared->msr |=
468a12c2
AG
343 (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
344 svcpu_put(svcpu);
f05ed4d5
PM
345 kvmppc_book3s_queue_irqprio(vcpu, vec);
346 } else if (page_found == -EPERM) {
347 /* Storage protection */
468a12c2 348 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
f05ed4d5 349 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
468a12c2 350 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
f05ed4d5
PM
351 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
352 vcpu->arch.shared->msr |=
468a12c2
AG
353 svcpu->shadow_srr1 & 0x00000000f8000000ULL;
354 svcpu_put(svcpu);
f05ed4d5
PM
355 kvmppc_book3s_queue_irqprio(vcpu, vec);
356 } else if (page_found == -EINVAL) {
357 /* Page not found in guest SLB */
358 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
359 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
360 } else if (!is_mmio &&
361 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
362 /* The guest's PTE is not mapped yet. Map on the host */
363 kvmppc_mmu_map_page(vcpu, &pte);
364 if (data)
365 vcpu->stat.sp_storage++;
366 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
367 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
368 kvmppc_patch_dcbz(vcpu, &pte);
369 } else {
370 /* MMIO */
371 vcpu->stat.mmio_exits++;
372 vcpu->arch.paddr_accessed = pte.raddr;
6020c0f6 373 vcpu->arch.vaddr_accessed = pte.eaddr;
f05ed4d5
PM
374 r = kvmppc_emulate_mmio(run, vcpu);
375 if ( r == RESUME_HOST_NV )
376 r = RESUME_HOST;
377 }
378
379 return r;
380}
381
382static inline int get_fpr_index(int i)
383{
384#ifdef CONFIG_VSX
385 i *= 2;
386#endif
387 return i;
388}
389
390/* Give up external provider (FPU, Altivec, VSX) */
391void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
392{
393 struct thread_struct *t = &current->thread;
394 u64 *vcpu_fpr = vcpu->arch.fpr;
395#ifdef CONFIG_VSX
396 u64 *vcpu_vsx = vcpu->arch.vsr;
397#endif
398 u64 *thread_fpr = (u64*)t->fpr;
399 int i;
400
401 if (!(vcpu->arch.guest_owned_ext & msr))
402 return;
403
404#ifdef DEBUG_EXT
405 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
406#endif
407
408 switch (msr) {
409 case MSR_FP:
410 giveup_fpu(current);
411 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
412 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
413
414 vcpu->arch.fpscr = t->fpscr.val;
415 break;
416 case MSR_VEC:
417#ifdef CONFIG_ALTIVEC
418 giveup_altivec(current);
419 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
420 vcpu->arch.vscr = t->vscr;
421#endif
422 break;
423 case MSR_VSX:
424#ifdef CONFIG_VSX
425 __giveup_vsx(current);
426 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
427 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
428#endif
429 break;
430 default:
431 BUG();
432 }
433
434 vcpu->arch.guest_owned_ext &= ~msr;
435 current->thread.regs->msr &= ~msr;
436 kvmppc_recalc_shadow_msr(vcpu);
437}
438
439static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
440{
441 ulong srr0 = kvmppc_get_pc(vcpu);
442 u32 last_inst = kvmppc_get_last_inst(vcpu);
443 int ret;
444
445 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
446 if (ret == -ENOENT) {
447 ulong msr = vcpu->arch.shared->msr;
448
449 msr = kvmppc_set_field(msr, 33, 33, 1);
450 msr = kvmppc_set_field(msr, 34, 36, 0);
451 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
452 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
453 return EMULATE_AGAIN;
454 }
455
456 return EMULATE_DONE;
457}
458
459static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
460{
461
462 /* Need to do paired single emulation? */
463 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
464 return EMULATE_DONE;
465
466 /* Read out the instruction */
467 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
468 /* Need to emulate */
469 return EMULATE_FAIL;
470
471 return EMULATE_AGAIN;
472}
473
474/* Handle external providers (FPU, Altivec, VSX) */
475static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
476 ulong msr)
477{
478 struct thread_struct *t = &current->thread;
479 u64 *vcpu_fpr = vcpu->arch.fpr;
480#ifdef CONFIG_VSX
481 u64 *vcpu_vsx = vcpu->arch.vsr;
482#endif
483 u64 *thread_fpr = (u64*)t->fpr;
484 int i;
485
486 /* When we have paired singles, we emulate in software */
487 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
488 return RESUME_GUEST;
489
490 if (!(vcpu->arch.shared->msr & msr)) {
491 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
492 return RESUME_GUEST;
493 }
494
495 /* We already own the ext */
496 if (vcpu->arch.guest_owned_ext & msr) {
497 return RESUME_GUEST;
498 }
499
500#ifdef DEBUG_EXT
501 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
502#endif
503
504 current->thread.regs->msr |= msr;
505
506 switch (msr) {
507 case MSR_FP:
508 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
509 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
510
511 t->fpscr.val = vcpu->arch.fpscr;
512 t->fpexc_mode = 0;
513 kvmppc_load_up_fpu();
514 break;
515 case MSR_VEC:
516#ifdef CONFIG_ALTIVEC
517 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
518 t->vscr = vcpu->arch.vscr;
519 t->vrsave = -1;
520 kvmppc_load_up_altivec();
521#endif
522 break;
523 case MSR_VSX:
524#ifdef CONFIG_VSX
525 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
526 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
527 kvmppc_load_up_vsx();
528#endif
529 break;
530 default:
531 BUG();
532 }
533
534 vcpu->arch.guest_owned_ext |= msr;
535
536 kvmppc_recalc_shadow_msr(vcpu);
537
538 return RESUME_GUEST;
539}
540
541int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
542 unsigned int exit_nr)
543{
544 int r = RESUME_HOST;
545
546 vcpu->stat.sum_exits++;
547
548 run->exit_reason = KVM_EXIT_UNKNOWN;
549 run->ready_for_interrupt_injection = 1;
550
3b1d9d7d
AG
551 /* We get here with MSR.EE=0, so enable it to be a nice citizen */
552 __hard_irq_enable();
553
f05ed4d5 554 trace_kvm_book3s_exit(exit_nr, vcpu);
7d82714d 555 preempt_enable();
f05ed4d5
PM
556 kvm_resched(vcpu);
557 switch (exit_nr) {
558 case BOOK3S_INTERRUPT_INST_STORAGE:
468a12c2
AG
559 {
560 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
561 ulong shadow_srr1 = svcpu->shadow_srr1;
f05ed4d5
PM
562 vcpu->stat.pf_instruc++;
563
564#ifdef CONFIG_PPC_BOOK3S_32
565 /* We set segments as unused segments when invalidating them. So
566 * treat the respective fault as segment fault. */
468a12c2 567 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
f05ed4d5
PM
568 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
569 r = RESUME_GUEST;
468a12c2 570 svcpu_put(svcpu);
f05ed4d5
PM
571 break;
572 }
573#endif
468a12c2 574 svcpu_put(svcpu);
f05ed4d5
PM
575
576 /* only care about PTEG not found errors, but leave NX alone */
468a12c2 577 if (shadow_srr1 & 0x40000000) {
f05ed4d5
PM
578 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
579 vcpu->stat.sp_instruc++;
580 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
581 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
582 /*
583 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
584 * so we can't use the NX bit inside the guest. Let's cross our fingers,
585 * that no guest that needs the dcbz hack does NX.
586 */
587 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
588 r = RESUME_GUEST;
589 } else {
468a12c2 590 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
f05ed4d5
PM
591 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
592 r = RESUME_GUEST;
593 }
594 break;
468a12c2 595 }
f05ed4d5
PM
596 case BOOK3S_INTERRUPT_DATA_STORAGE:
597 {
598 ulong dar = kvmppc_get_fault_dar(vcpu);
468a12c2
AG
599 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
600 u32 fault_dsisr = svcpu->fault_dsisr;
f05ed4d5
PM
601 vcpu->stat.pf_storage++;
602
603#ifdef CONFIG_PPC_BOOK3S_32
604 /* We set segments as unused segments when invalidating them. So
605 * treat the respective fault as segment fault. */
468a12c2 606 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
f05ed4d5
PM
607 kvmppc_mmu_map_segment(vcpu, dar);
608 r = RESUME_GUEST;
468a12c2 609 svcpu_put(svcpu);
f05ed4d5
PM
610 break;
611 }
612#endif
468a12c2 613 svcpu_put(svcpu);
f05ed4d5
PM
614
615 /* The only case we need to handle is missing shadow PTEs */
468a12c2 616 if (fault_dsisr & DSISR_NOHPTE) {
f05ed4d5
PM
617 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
618 } else {
619 vcpu->arch.shared->dar = dar;
468a12c2 620 vcpu->arch.shared->dsisr = fault_dsisr;
f05ed4d5
PM
621 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
622 r = RESUME_GUEST;
623 }
624 break;
625 }
626 case BOOK3S_INTERRUPT_DATA_SEGMENT:
627 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
628 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
629 kvmppc_book3s_queue_irqprio(vcpu,
630 BOOK3S_INTERRUPT_DATA_SEGMENT);
631 }
632 r = RESUME_GUEST;
633 break;
634 case BOOK3S_INTERRUPT_INST_SEGMENT:
635 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
636 kvmppc_book3s_queue_irqprio(vcpu,
637 BOOK3S_INTERRUPT_INST_SEGMENT);
638 }
639 r = RESUME_GUEST;
640 break;
641 /* We're good on these - the host merely wanted to get our attention */
642 case BOOK3S_INTERRUPT_DECREMENTER:
4f225ae0 643 case BOOK3S_INTERRUPT_HV_DECREMENTER:
f05ed4d5
PM
644 vcpu->stat.dec_exits++;
645 r = RESUME_GUEST;
646 break;
647 case BOOK3S_INTERRUPT_EXTERNAL:
4f225ae0
AG
648 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
649 case BOOK3S_INTERRUPT_EXTERNAL_HV:
f05ed4d5
PM
650 vcpu->stat.ext_intr_exits++;
651 r = RESUME_GUEST;
652 break;
653 case BOOK3S_INTERRUPT_PERFMON:
654 r = RESUME_GUEST;
655 break;
656 case BOOK3S_INTERRUPT_PROGRAM:
4f225ae0 657 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
f05ed4d5
PM
658 {
659 enum emulation_result er;
468a12c2 660 struct kvmppc_book3s_shadow_vcpu *svcpu;
f05ed4d5
PM
661 ulong flags;
662
663program_interrupt:
468a12c2
AG
664 svcpu = svcpu_get(vcpu);
665 flags = svcpu->shadow_srr1 & 0x1f0000ull;
666 svcpu_put(svcpu);
f05ed4d5
PM
667
668 if (vcpu->arch.shared->msr & MSR_PR) {
669#ifdef EXIT_DEBUG
670 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
671#endif
672 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
673 (INS_DCBZ & 0xfffffff7)) {
674 kvmppc_core_queue_program(vcpu, flags);
675 r = RESUME_GUEST;
676 break;
677 }
678 }
679
680 vcpu->stat.emulated_inst_exits++;
681 er = kvmppc_emulate_instruction(run, vcpu);
682 switch (er) {
683 case EMULATE_DONE:
684 r = RESUME_GUEST_NV;
685 break;
686 case EMULATE_AGAIN:
687 r = RESUME_GUEST;
688 break;
689 case EMULATE_FAIL:
690 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
691 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
692 kvmppc_core_queue_program(vcpu, flags);
693 r = RESUME_GUEST;
694 break;
695 case EMULATE_DO_MMIO:
696 run->exit_reason = KVM_EXIT_MMIO;
697 r = RESUME_HOST_NV;
698 break;
699 default:
700 BUG();
701 }
702 break;
703 }
704 case BOOK3S_INTERRUPT_SYSCALL:
a668f2bd
AG
705 if (vcpu->arch.papr_enabled &&
706 (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
707 !(vcpu->arch.shared->msr & MSR_PR)) {
708 /* SC 1 papr hypercalls */
709 ulong cmd = kvmppc_get_gpr(vcpu, 3);
710 int i;
711
96f38d72 712#ifdef CONFIG_KVM_BOOK3S_64_PR
a668f2bd
AG
713 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
714 r = RESUME_GUEST;
715 break;
716 }
96f38d72 717#endif
a668f2bd
AG
718
719 run->papr_hcall.nr = cmd;
720 for (i = 0; i < 9; ++i) {
721 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
722 run->papr_hcall.args[i] = gpr;
723 }
724 run->exit_reason = KVM_EXIT_PAPR_HCALL;
725 vcpu->arch.hcall_needed = 1;
726 r = RESUME_HOST;
727 } else if (vcpu->arch.osi_enabled &&
f05ed4d5
PM
728 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
729 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
730 /* MOL hypercalls */
731 u64 *gprs = run->osi.gprs;
732 int i;
733
734 run->exit_reason = KVM_EXIT_OSI;
735 for (i = 0; i < 32; i++)
736 gprs[i] = kvmppc_get_gpr(vcpu, i);
737 vcpu->arch.osi_needed = 1;
738 r = RESUME_HOST_NV;
739 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
740 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
741 /* KVM PV hypercalls */
742 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
743 r = RESUME_GUEST;
744 } else {
745 /* Guest syscalls */
746 vcpu->stat.syscall_exits++;
747 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
748 r = RESUME_GUEST;
749 }
750 break;
751 case BOOK3S_INTERRUPT_FP_UNAVAIL:
752 case BOOK3S_INTERRUPT_ALTIVEC:
753 case BOOK3S_INTERRUPT_VSX:
754 {
755 int ext_msr = 0;
756
757 switch (exit_nr) {
758 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
759 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
760 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
761 }
762
763 switch (kvmppc_check_ext(vcpu, exit_nr)) {
764 case EMULATE_DONE:
765 /* everything ok - let's enable the ext */
766 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
767 break;
768 case EMULATE_FAIL:
769 /* we need to emulate this instruction */
770 goto program_interrupt;
771 break;
772 default:
773 /* nothing to worry about - go again */
774 break;
775 }
776 break;
777 }
778 case BOOK3S_INTERRUPT_ALIGNMENT:
779 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
780 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
781 kvmppc_get_last_inst(vcpu));
782 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
783 kvmppc_get_last_inst(vcpu));
784 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
785 }
786 r = RESUME_GUEST;
787 break;
788 case BOOK3S_INTERRUPT_MACHINE_CHECK:
789 case BOOK3S_INTERRUPT_TRACE:
790 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
791 r = RESUME_GUEST;
792 break;
793 default:
468a12c2
AG
794 {
795 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
796 ulong shadow_srr1 = svcpu->shadow_srr1;
797 svcpu_put(svcpu);
f05ed4d5
PM
798 /* Ugh - bork here! What did we get? */
799 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
468a12c2 800 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
f05ed4d5
PM
801 r = RESUME_HOST;
802 BUG();
803 break;
804 }
468a12c2 805 }
f05ed4d5 806
592f5d87 807 preempt_disable();
f05ed4d5
PM
808 if (!(r & RESUME_HOST)) {
809 /* To avoid clobbering exit_reason, only check for signals if
810 * we aren't already exiting to userspace for some other
811 * reason. */
e371f713
AG
812
813 /*
814 * Interrupts could be timers for the guest which we have to
815 * inject again, so let's postpone them until we're in the guest
816 * and if we really did time things so badly, then we just exit
817 * again due to a host external interrupt.
818 */
819 __hard_irq_disable();
f05ed4d5 820 if (signal_pending(current)) {
e371f713 821 __hard_irq_enable();
f05ed4d5
PM
822#ifdef EXIT_DEBUG
823 printk(KERN_EMERG "KVM: Going back to host\n");
824#endif
825 vcpu->stat.signal_exits++;
826 run->exit_reason = KVM_EXIT_INTR;
827 r = -EINTR;
828 } else {
829 /* In case an interrupt came in that was triggered
830 * from userspace (like DEC), we need to check what
831 * to inject now! */
7e28e60e 832 kvmppc_core_prepare_to_enter(vcpu);
f05ed4d5
PM
833 }
834 }
835
836 trace_kvm_book3s_reenter(r, vcpu);
837
838 return r;
839}
840
841int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
842 struct kvm_sregs *sregs)
843{
844 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
845 int i;
846
847 sregs->pvr = vcpu->arch.pvr;
848
849 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
850 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
851 for (i = 0; i < 64; i++) {
852 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
853 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
854 }
855 } else {
856 for (i = 0; i < 16; i++)
857 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
858
859 for (i = 0; i < 8; i++) {
860 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
861 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
862 }
863 }
864
865 return 0;
866}
867
868int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
869 struct kvm_sregs *sregs)
870{
871 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
872 int i;
873
874 kvmppc_set_pvr(vcpu, sregs->pvr);
875
876 vcpu3s->sdr1 = sregs->u.s.sdr1;
877 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
878 for (i = 0; i < 64; i++) {
879 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
880 sregs->u.s.ppc64.slb[i].slbe);
881 }
882 } else {
883 for (i = 0; i < 16; i++) {
884 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
885 }
886 for (i = 0; i < 8; i++) {
887 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
888 (u32)sregs->u.s.ppc32.ibat[i]);
889 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
890 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
891 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
892 (u32)sregs->u.s.ppc32.dbat[i]);
893 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
894 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
895 }
896 }
897
898 /* Flush the MMU after messing with the segments */
899 kvmppc_mmu_pte_flush(vcpu, 0, 0);
900
901 return 0;
902}
903
31f3438e
PM
904int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
905{
906 int r = -EINVAL;
907
908 switch (reg->id) {
909 case KVM_REG_PPC_HIOR:
b8e6f8ae
AG
910 r = copy_to_user((u64 __user *)(long)reg->addr,
911 &to_book3s(vcpu)->hior, sizeof(u64));
31f3438e
PM
912 break;
913 default:
914 break;
915 }
916
917 return r;
918}
919
920int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
921{
922 int r = -EINVAL;
923
924 switch (reg->id) {
925 case KVM_REG_PPC_HIOR:
b8e6f8ae
AG
926 r = copy_from_user(&to_book3s(vcpu)->hior,
927 (u64 __user *)(long)reg->addr, sizeof(u64));
31f3438e
PM
928 if (!r)
929 to_book3s(vcpu)->hior_explicit = true;
930 break;
931 default:
932 break;
933 }
934
935 return r;
936}
937
f05ed4d5
PM
938int kvmppc_core_check_processor_compat(void)
939{
940 return 0;
941}
942
943struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
944{
945 struct kvmppc_vcpu_book3s *vcpu_book3s;
946 struct kvm_vcpu *vcpu;
947 int err = -ENOMEM;
948 unsigned long p;
949
950 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
951 if (!vcpu_book3s)
952 goto out;
953
954 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
955 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
956 if (!vcpu_book3s->shadow_vcpu)
957 goto free_vcpu;
958
959 vcpu = &vcpu_book3s->vcpu;
960 err = kvm_vcpu_init(vcpu, kvm, id);
961 if (err)
962 goto free_shadow_vcpu;
963
964 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
965 /* the real shared page fills the last 4k of our page */
966 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
967 if (!p)
968 goto uninit_vcpu;
969
f05ed4d5
PM
970#ifdef CONFIG_PPC_BOOK3S_64
971 /* default to book3s_64 (970fx) */
972 vcpu->arch.pvr = 0x3C0301;
973#else
974 /* default to book3s_32 (750) */
975 vcpu->arch.pvr = 0x84202;
976#endif
977 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
978 vcpu->arch.slb_nr = 64;
979
f05ed4d5
PM
980 vcpu->arch.shadow_msr = MSR_USER64;
981
982 err = kvmppc_mmu_init(vcpu);
983 if (err < 0)
984 goto uninit_vcpu;
985
986 return vcpu;
987
988uninit_vcpu:
989 kvm_vcpu_uninit(vcpu);
990free_shadow_vcpu:
991 kfree(vcpu_book3s->shadow_vcpu);
992free_vcpu:
993 vfree(vcpu_book3s);
994out:
995 return ERR_PTR(err);
996}
997
998void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
999{
1000 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1001
1002 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1003 kvm_vcpu_uninit(vcpu);
1004 kfree(vcpu_book3s->shadow_vcpu);
1005 vfree(vcpu_book3s);
1006}
1007
df6909e5 1008int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
f05ed4d5
PM
1009{
1010 int ret;
1011 double fpr[32][TS_FPRWIDTH];
1012 unsigned int fpscr;
1013 int fpexc_mode;
1014#ifdef CONFIG_ALTIVEC
1015 vector128 vr[32];
1016 vector128 vscr;
1017 unsigned long uninitialized_var(vrsave);
1018 int used_vr;
1019#endif
1020#ifdef CONFIG_VSX
1021 int used_vsr;
1022#endif
1023 ulong ext_msr;
1024
7d82714d
AG
1025 preempt_disable();
1026
af8f38b3
AG
1027 /* Check if we can run the vcpu at all */
1028 if (!vcpu->arch.sane) {
1029 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7d82714d
AG
1030 ret = -EINVAL;
1031 goto out;
af8f38b3
AG
1032 }
1033
25051b5a
SW
1034 kvmppc_core_prepare_to_enter(vcpu);
1035
e371f713
AG
1036 /*
1037 * Interrupts could be timers for the guest which we have to inject
1038 * again, so let's postpone them until we're in the guest and if we
1039 * really did time things so badly, then we just exit again due to
1040 * a host external interrupt.
1041 */
1042 __hard_irq_disable();
1043
f05ed4d5
PM
1044 /* No need to go into the guest when all we do is going out */
1045 if (signal_pending(current)) {
e371f713 1046 __hard_irq_enable();
f05ed4d5 1047 kvm_run->exit_reason = KVM_EXIT_INTR;
7d82714d
AG
1048 ret = -EINTR;
1049 goto out;
f05ed4d5
PM
1050 }
1051
1052 /* Save FPU state in stack */
1053 if (current->thread.regs->msr & MSR_FP)
1054 giveup_fpu(current);
1055 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1056 fpscr = current->thread.fpscr.val;
1057 fpexc_mode = current->thread.fpexc_mode;
1058
1059#ifdef CONFIG_ALTIVEC
1060 /* Save Altivec state in stack */
1061 used_vr = current->thread.used_vr;
1062 if (used_vr) {
1063 if (current->thread.regs->msr & MSR_VEC)
1064 giveup_altivec(current);
1065 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1066 vscr = current->thread.vscr;
1067 vrsave = current->thread.vrsave;
1068 }
1069#endif
1070
1071#ifdef CONFIG_VSX
1072 /* Save VSX state in stack */
1073 used_vsr = current->thread.used_vsr;
1074 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
1075 __giveup_vsx(current);
1076#endif
1077
1078 /* Remember the MSR with disabled extensions */
1079 ext_msr = current->thread.regs->msr;
1080
f05ed4d5
PM
1081 /* Preload FPU if it's enabled */
1082 if (vcpu->arch.shared->msr & MSR_FP)
1083 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1084
df6909e5
PM
1085 kvm_guest_enter();
1086
1087 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1088
1089 kvm_guest_exit();
f05ed4d5 1090
f05ed4d5
PM
1091 current->thread.regs->msr = ext_msr;
1092
1093 /* Make sure we save the guest FPU/Altivec/VSX state */
1094 kvmppc_giveup_ext(vcpu, MSR_FP);
1095 kvmppc_giveup_ext(vcpu, MSR_VEC);
1096 kvmppc_giveup_ext(vcpu, MSR_VSX);
1097
1098 /* Restore FPU state from stack */
1099 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1100 current->thread.fpscr.val = fpscr;
1101 current->thread.fpexc_mode = fpexc_mode;
1102
1103#ifdef CONFIG_ALTIVEC
1104 /* Restore Altivec state from stack */
1105 if (used_vr && current->thread.used_vr) {
1106 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1107 current->thread.vscr = vscr;
1108 current->thread.vrsave = vrsave;
1109 }
1110 current->thread.used_vr = used_vr;
1111#endif
1112
1113#ifdef CONFIG_VSX
1114 current->thread.used_vsr = used_vsr;
1115#endif
1116
7d82714d
AG
1117out:
1118 preempt_enable();
f05ed4d5
PM
1119 return ret;
1120}
1121
82ed3616
PM
1122/*
1123 * Get (and clear) the dirty memory log for a memory slot.
1124 */
1125int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1126 struct kvm_dirty_log *log)
1127{
1128 struct kvm_memory_slot *memslot;
1129 struct kvm_vcpu *vcpu;
1130 ulong ga, ga_end;
1131 int is_dirty = 0;
1132 int r;
1133 unsigned long n;
1134
1135 mutex_lock(&kvm->slots_lock);
1136
1137 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1138 if (r)
1139 goto out;
1140
1141 /* If nothing is dirty, don't bother messing with page tables. */
1142 if (is_dirty) {
1143 memslot = id_to_memslot(kvm->memslots, log->slot);
1144
1145 ga = memslot->base_gfn << PAGE_SHIFT;
1146 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1147
1148 kvm_for_each_vcpu(n, vcpu, kvm)
1149 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1150
1151 n = kvm_dirty_bitmap_bytes(memslot);
1152 memset(memslot->dirty_bitmap, 0, n);
1153 }
1154
1155 r = 0;
1156out:
1157 mutex_unlock(&kvm->slots_lock);
1158 return r;
1159}
1160
5b74716e
BH
1161#ifdef CONFIG_PPC64
1162int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1163{
1164 /* No flags */
1165 info->flags = 0;
1166
1167 /* SLB is always 64 entries */
1168 info->slb_size = 64;
1169
1170 /* Standard 4k base page size segment */
1171 info->sps[0].page_shift = 12;
1172 info->sps[0].slb_enc = 0;
1173 info->sps[0].enc[0].page_shift = 12;
1174 info->sps[0].enc[0].pte_enc = 0;
1175
1176 /* Standard 16M large page size segment */
1177 info->sps[1].page_shift = 24;
1178 info->sps[1].slb_enc = SLB_VSID_L;
1179 info->sps[1].enc[0].page_shift = 24;
1180 info->sps[1].enc[0].pte_enc = 0;
1181
1182 return 0;
1183}
1184#endif /* CONFIG_PPC64 */
1185
f9e0554d
PM
1186int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1187 struct kvm_userspace_memory_region *mem)
1188{
1189 return 0;
1190}
1191
1192void kvmppc_core_commit_memory_region(struct kvm *kvm,
1193 struct kvm_userspace_memory_region *mem)
1194{
1195}
1196
1197int kvmppc_core_init_vm(struct kvm *kvm)
1198{
f31e65e1
BH
1199#ifdef CONFIG_PPC64
1200 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1201#endif
1202
f9e0554d
PM
1203 return 0;
1204}
1205
1206void kvmppc_core_destroy_vm(struct kvm *kvm)
1207{
f31e65e1
BH
1208#ifdef CONFIG_PPC64
1209 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1210#endif
f9e0554d
PM
1211}
1212
f05ed4d5
PM
1213static int kvmppc_book3s_init(void)
1214{
1215 int r;
1216
1217 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1218 THIS_MODULE);
1219
1220 if (r)
1221 return r;
1222
1223 r = kvmppc_mmu_hpte_sysinit();
1224
1225 return r;
1226}
1227
1228static void kvmppc_book3s_exit(void)
1229{
1230 kvmppc_mmu_hpte_sysexit();
1231 kvm_exit();
1232}
1233
1234module_init(kvmppc_book3s_init);
1235module_exit(kvmppc_book3s_exit);