Merge branches 'topic/sc18is602' and 'topic/rspi' of git://git.kernel.org/pub/scm...
[linux-block.git] / arch / powerpc / kvm / book3s_pr.c
CommitLineData
f05ed4d5
PM
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
93087948 23#include <linux/export.h>
f05ed4d5
PM
24#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
95327d08 36#include <asm/switch_to.h>
a413f474 37#include <asm/firmware.h>
deb26c27 38#include <asm/hvcall.h>
f05ed4d5
PM
39#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
2ba9f0d8 43#include <linux/module.h>
f05ed4d5 44
3a167bea 45#include "book3s.h"
72c12535
AK
46
47#define CREATE_TRACE_POINTS
48#include "trace_pr.h"
f05ed4d5
PM
49
50/* #define EXIT_DEBUG */
51/* #define DEBUG_EXT */
52
53static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
54 ulong msr);
55
56/* Some compatibility defines */
57#ifdef CONFIG_PPC_BOOK3S_32
58#define MSR_USER32 MSR_USER
59#define MSR_USER64 MSR_USER
60#define HW_PAGE_SIZE PAGE_SIZE
61#endif
62
3a167bea 63static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
f05ed4d5
PM
64{
65#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
468a12c2 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
40fdd8c8 69 svcpu->in_use = 0;
468a12c2 70 svcpu_put(svcpu);
f05ed4d5 71#endif
a47d72f3 72 vcpu->cpu = smp_processor_id();
f05ed4d5 73#ifdef CONFIG_PPC_BOOK3S_32
3ff95502 74 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
f05ed4d5
PM
75#endif
76}
77
3a167bea 78static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
79{
80#ifdef CONFIG_PPC_BOOK3S_64
468a12c2 81 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
40fdd8c8
AG
82 if (svcpu->in_use) {
83 kvmppc_copy_from_svcpu(vcpu, svcpu);
84 }
468a12c2 85 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
468a12c2
AG
86 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
87 svcpu_put(svcpu);
f05ed4d5
PM
88#endif
89
28c483b6 90 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
a47d72f3 91 vcpu->cpu = -1;
f05ed4d5
PM
92}
93
a2d56020
PM
94/* Copy data needed by real-mode code from vcpu to shadow vcpu */
95void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
96 struct kvm_vcpu *vcpu)
97{
98 svcpu->gpr[0] = vcpu->arch.gpr[0];
99 svcpu->gpr[1] = vcpu->arch.gpr[1];
100 svcpu->gpr[2] = vcpu->arch.gpr[2];
101 svcpu->gpr[3] = vcpu->arch.gpr[3];
102 svcpu->gpr[4] = vcpu->arch.gpr[4];
103 svcpu->gpr[5] = vcpu->arch.gpr[5];
104 svcpu->gpr[6] = vcpu->arch.gpr[6];
105 svcpu->gpr[7] = vcpu->arch.gpr[7];
106 svcpu->gpr[8] = vcpu->arch.gpr[8];
107 svcpu->gpr[9] = vcpu->arch.gpr[9];
108 svcpu->gpr[10] = vcpu->arch.gpr[10];
109 svcpu->gpr[11] = vcpu->arch.gpr[11];
110 svcpu->gpr[12] = vcpu->arch.gpr[12];
111 svcpu->gpr[13] = vcpu->arch.gpr[13];
112 svcpu->cr = vcpu->arch.cr;
113 svcpu->xer = vcpu->arch.xer;
114 svcpu->ctr = vcpu->arch.ctr;
115 svcpu->lr = vcpu->arch.lr;
116 svcpu->pc = vcpu->arch.pc;
40fdd8c8 117 svcpu->in_use = true;
a2d56020
PM
118}
119
120/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
121void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
122 struct kvmppc_book3s_shadow_vcpu *svcpu)
123{
40fdd8c8
AG
124 /*
125 * vcpu_put would just call us again because in_use hasn't
126 * been updated yet.
127 */
128 preempt_disable();
129
130 /*
131 * Maybe we were already preempted and synced the svcpu from
132 * our preempt notifiers. Don't bother touching this svcpu then.
133 */
134 if (!svcpu->in_use)
135 goto out;
136
a2d56020
PM
137 vcpu->arch.gpr[0] = svcpu->gpr[0];
138 vcpu->arch.gpr[1] = svcpu->gpr[1];
139 vcpu->arch.gpr[2] = svcpu->gpr[2];
140 vcpu->arch.gpr[3] = svcpu->gpr[3];
141 vcpu->arch.gpr[4] = svcpu->gpr[4];
142 vcpu->arch.gpr[5] = svcpu->gpr[5];
143 vcpu->arch.gpr[6] = svcpu->gpr[6];
144 vcpu->arch.gpr[7] = svcpu->gpr[7];
145 vcpu->arch.gpr[8] = svcpu->gpr[8];
146 vcpu->arch.gpr[9] = svcpu->gpr[9];
147 vcpu->arch.gpr[10] = svcpu->gpr[10];
148 vcpu->arch.gpr[11] = svcpu->gpr[11];
149 vcpu->arch.gpr[12] = svcpu->gpr[12];
150 vcpu->arch.gpr[13] = svcpu->gpr[13];
151 vcpu->arch.cr = svcpu->cr;
152 vcpu->arch.xer = svcpu->xer;
153 vcpu->arch.ctr = svcpu->ctr;
154 vcpu->arch.lr = svcpu->lr;
155 vcpu->arch.pc = svcpu->pc;
156 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
157 vcpu->arch.fault_dar = svcpu->fault_dar;
158 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
159 vcpu->arch.last_inst = svcpu->last_inst;
40fdd8c8
AG
160 svcpu->in_use = false;
161
162out:
163 preempt_enable();
a2d56020
PM
164}
165
3a167bea 166static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
03d25c5b 167{
7c973a2e
AG
168 int r = 1; /* Indicate we want to get back into the guest */
169
9b0cb3c8
AG
170 /* We misuse TLB_FLUSH to indicate that we want to clear
171 all shadow cache entries */
172 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
173 kvmppc_mmu_pte_flush(vcpu, 0, 0);
7c973a2e
AG
174
175 return r;
03d25c5b
AG
176}
177
9b0cb3c8 178/************* MMU Notifiers *************/
491d6ecc
PM
179static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
180 unsigned long end)
181{
182 long i;
183 struct kvm_vcpu *vcpu;
184 struct kvm_memslots *slots;
185 struct kvm_memory_slot *memslot;
186
187 slots = kvm_memslots(kvm);
188 kvm_for_each_memslot(memslot, slots) {
189 unsigned long hva_start, hva_end;
190 gfn_t gfn, gfn_end;
191
192 hva_start = max(start, memslot->userspace_addr);
193 hva_end = min(end, memslot->userspace_addr +
194 (memslot->npages << PAGE_SHIFT));
195 if (hva_start >= hva_end)
196 continue;
197 /*
198 * {gfn(page) | page intersects with [hva_start, hva_end)} =
199 * {gfn, gfn+1, ..., gfn_end-1}.
200 */
201 gfn = hva_to_gfn_memslot(hva_start, memslot);
202 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
203 kvm_for_each_vcpu(i, vcpu, kvm)
204 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
205 gfn_end << PAGE_SHIFT);
206 }
207}
9b0cb3c8 208
3a167bea 209static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
210{
211 trace_kvm_unmap_hva(hva);
212
491d6ecc 213 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
214
215 return 0;
216}
217
3a167bea
AK
218static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
219 unsigned long end)
9b0cb3c8 220{
491d6ecc 221 do_kvm_unmap_hva(kvm, start, end);
9b0cb3c8
AG
222
223 return 0;
224}
225
3a167bea 226static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
227{
228 /* XXX could be more clever ;) */
229 return 0;
230}
231
3a167bea 232static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
233{
234 /* XXX could be more clever ;) */
235 return 0;
236}
237
3a167bea 238static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
9b0cb3c8
AG
239{
240 /* The page will get remapped properly on its next fault */
491d6ecc 241 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
242}
243
244/*****************************************/
245
f05ed4d5
PM
246static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
247{
248 ulong smsr = vcpu->arch.shared->msr;
249
250 /* Guest MSR values */
3a2e7b0d 251 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
f05ed4d5
PM
252 /* Process MSR values */
253 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
254 /* External providers the guest reserved */
255 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
256 /* 64-bit Process MSR values */
257#ifdef CONFIG_PPC_BOOK3S_64
258 smsr |= MSR_ISF | MSR_HV;
259#endif
260 vcpu->arch.shadow_msr = smsr;
261}
262
3a167bea 263static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
f05ed4d5
PM
264{
265 ulong old_msr = vcpu->arch.shared->msr;
266
267#ifdef EXIT_DEBUG
268 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
269#endif
270
271 msr &= to_book3s(vcpu)->msr_mask;
272 vcpu->arch.shared->msr = msr;
273 kvmppc_recalc_shadow_msr(vcpu);
274
275 if (msr & MSR_POW) {
276 if (!vcpu->arch.pending_exceptions) {
277 kvm_vcpu_block(vcpu);
966cd0f3 278 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
f05ed4d5
PM
279 vcpu->stat.halt_wakeup++;
280
281 /* Unset POW bit after we woke up */
282 msr &= ~MSR_POW;
283 vcpu->arch.shared->msr = msr;
284 }
285 }
286
287 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
288 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
289 kvmppc_mmu_flush_segments(vcpu);
290 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
291
292 /* Preload magic page segment when in kernel mode */
293 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
294 struct kvm_vcpu_arch *a = &vcpu->arch;
295
296 if (msr & MSR_DR)
297 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
298 else
299 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
300 }
301 }
302
bbcc9c06
BH
303 /*
304 * When switching from 32 to 64-bit, we may have a stale 32-bit
305 * magic page around, we need to flush it. Typically 32-bit magic
306 * page will be instanciated when calling into RTAS. Note: We
307 * assume that such transition only happens while in kernel mode,
308 * ie, we never transition from user 32-bit to kernel 64-bit with
309 * a 32-bit magic page around.
310 */
311 if (vcpu->arch.magic_page_pa &&
312 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
313 /* going from RTAS to normal kernel code */
314 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
315 ~0xFFFUL);
316 }
317
f05ed4d5
PM
318 /* Preload FPU if it's enabled */
319 if (vcpu->arch.shared->msr & MSR_FP)
320 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
321}
322
3a167bea 323void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
f05ed4d5
PM
324{
325 u32 host_pvr;
326
327 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
328 vcpu->arch.pvr = pvr;
329#ifdef CONFIG_PPC_BOOK3S_64
330 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
331 kvmppc_mmu_book3s_64_init(vcpu);
1022fc3d
AG
332 if (!to_book3s(vcpu)->hior_explicit)
333 to_book3s(vcpu)->hior = 0xfff00000;
f05ed4d5 334 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
af8f38b3 335 vcpu->arch.cpu_type = KVM_CPU_3S_64;
f05ed4d5
PM
336 } else
337#endif
338 {
339 kvmppc_mmu_book3s_32_init(vcpu);
1022fc3d
AG
340 if (!to_book3s(vcpu)->hior_explicit)
341 to_book3s(vcpu)->hior = 0;
f05ed4d5 342 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
af8f38b3 343 vcpu->arch.cpu_type = KVM_CPU_3S_32;
f05ed4d5
PM
344 }
345
af8f38b3
AG
346 kvmppc_sanity_check(vcpu);
347
f05ed4d5
PM
348 /* If we are in hypervisor level on 970, we can tell the CPU to
349 * treat DCBZ as 32 bytes store */
350 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
351 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
352 !strcmp(cur_cpu_spec->platform, "ppc970"))
353 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
354
355 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
356 really needs them in a VM on Cell and force disable them. */
357 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
358 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
359
a4a0f252
PM
360 /*
361 * If they're asking for POWER6 or later, set the flag
362 * indicating that we can do multiple large page sizes
363 * and 1TB segments.
364 * Also set the flag that indicates that tlbie has the large
365 * page bit in the RB operand instead of the instruction.
366 */
367 switch (PVR_VER(pvr)) {
368 case PVR_POWER6:
369 case PVR_POWER7:
370 case PVR_POWER7p:
371 case PVR_POWER8:
372 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
373 BOOK3S_HFLAG_NEW_TLBIE;
374 break;
375 }
376
f05ed4d5
PM
377#ifdef CONFIG_PPC_BOOK3S_32
378 /* 32 bit Book3S always has 32 byte dcbz */
379 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
380#endif
381
382 /* On some CPUs we can execute paired single operations natively */
383 asm ( "mfpvr %0" : "=r"(host_pvr));
384 switch (host_pvr) {
385 case 0x00080200: /* lonestar 2.0 */
386 case 0x00088202: /* lonestar 2.2 */
387 case 0x70000100: /* gekko 1.0 */
388 case 0x00080100: /* gekko 2.0 */
389 case 0x00083203: /* gekko 2.3a */
390 case 0x00083213: /* gekko 2.3b */
391 case 0x00083204: /* gekko 2.4 */
392 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
393 case 0x00087200: /* broadway */
394 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
395 /* Enable HID2.PSE - in case we need it later */
396 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
397 }
398}
399
400/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
401 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
402 * emulate 32 bytes dcbz length.
403 *
404 * The Book3s_64 inventors also realized this case and implemented a special bit
405 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
406 *
407 * My approach here is to patch the dcbz instruction on executing pages.
408 */
409static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
410{
411 struct page *hpage;
412 u64 hpage_offset;
413 u32 *page;
414 int i;
415
416 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
32cad84f 417 if (is_error_page(hpage))
f05ed4d5 418 return;
f05ed4d5
PM
419
420 hpage_offset = pte->raddr & ~PAGE_MASK;
421 hpage_offset &= ~0xFFFULL;
422 hpage_offset /= 4;
423
424 get_page(hpage);
2480b208 425 page = kmap_atomic(hpage);
f05ed4d5
PM
426
427 /* patch dcbz into reserved instruction, so we trap */
428 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
429 if ((page[i] & 0xff0007ff) == INS_DCBZ)
430 page[i] &= 0xfffffff7;
431
2480b208 432 kunmap_atomic(page);
f05ed4d5
PM
433 put_page(hpage);
434}
435
436static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
437{
438 ulong mp_pa = vcpu->arch.magic_page_pa;
439
bbcc9c06
BH
440 if (!(vcpu->arch.shared->msr & MSR_SF))
441 mp_pa = (uint32_t)mp_pa;
442
f05ed4d5
PM
443 if (unlikely(mp_pa) &&
444 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
445 return 1;
446 }
447
448 return kvm_is_visible_gfn(vcpu->kvm, gfn);
449}
450
451int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
452 ulong eaddr, int vec)
453{
454 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
93b159b4 455 bool iswrite = false;
f05ed4d5
PM
456 int r = RESUME_GUEST;
457 int relocated;
458 int page_found = 0;
459 struct kvmppc_pte pte;
460 bool is_mmio = false;
461 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
462 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
463 u64 vsid;
464
465 relocated = data ? dr : ir;
93b159b4
PM
466 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
467 iswrite = true;
f05ed4d5
PM
468
469 /* Resolve real address if translation turned on */
470 if (relocated) {
93b159b4 471 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
f05ed4d5
PM
472 } else {
473 pte.may_execute = true;
474 pte.may_read = true;
475 pte.may_write = true;
476 pte.raddr = eaddr & KVM_PAM;
477 pte.eaddr = eaddr;
478 pte.vpage = eaddr >> 12;
c9029c34 479 pte.page_size = MMU_PAGE_64K;
f05ed4d5
PM
480 }
481
482 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
483 case 0:
484 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
485 break;
486 case MSR_DR:
487 case MSR_IR:
488 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
489
490 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
491 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
492 else
493 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
494 pte.vpage |= vsid;
495
496 if (vsid == -1)
497 page_found = -EINVAL;
498 break;
499 }
500
501 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
502 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
503 /*
504 * If we do the dcbz hack, we have to NX on every execution,
505 * so we can patch the executing code. This renders our guest
506 * NX-less.
507 */
508 pte.may_execute = !data;
509 }
510
511 if (page_found == -ENOENT) {
512 /* Page not found in guest PTE entries */
513 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
a2d56020 514 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
f05ed4d5 515 vcpu->arch.shared->msr |=
a2d56020 516 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
f05ed4d5
PM
517 kvmppc_book3s_queue_irqprio(vcpu, vec);
518 } else if (page_found == -EPERM) {
519 /* Storage protection */
520 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
a2d56020 521 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
f05ed4d5
PM
522 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
523 vcpu->arch.shared->msr |=
a2d56020 524 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
f05ed4d5
PM
525 kvmppc_book3s_queue_irqprio(vcpu, vec);
526 } else if (page_found == -EINVAL) {
527 /* Page not found in guest SLB */
528 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
529 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
530 } else if (!is_mmio &&
531 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
93b159b4
PM
532 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
533 /*
534 * There is already a host HPTE there, presumably
535 * a read-only one for a page the guest thinks
536 * is writable, so get rid of it first.
537 */
538 kvmppc_mmu_unmap_page(vcpu, &pte);
539 }
f05ed4d5 540 /* The guest's PTE is not mapped yet. Map on the host */
93b159b4 541 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
f05ed4d5
PM
542 if (data)
543 vcpu->stat.sp_storage++;
544 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
93b159b4 545 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
f05ed4d5
PM
546 kvmppc_patch_dcbz(vcpu, &pte);
547 } else {
548 /* MMIO */
549 vcpu->stat.mmio_exits++;
550 vcpu->arch.paddr_accessed = pte.raddr;
6020c0f6 551 vcpu->arch.vaddr_accessed = pte.eaddr;
f05ed4d5
PM
552 r = kvmppc_emulate_mmio(run, vcpu);
553 if ( r == RESUME_HOST_NV )
554 r = RESUME_HOST;
555 }
556
557 return r;
558}
559
560static inline int get_fpr_index(int i)
561{
28c483b6 562 return i * TS_FPRWIDTH;
f05ed4d5
PM
563}
564
565/* Give up external provider (FPU, Altivec, VSX) */
566void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
567{
568 struct thread_struct *t = &current->thread;
569 u64 *vcpu_fpr = vcpu->arch.fpr;
570#ifdef CONFIG_VSX
571 u64 *vcpu_vsx = vcpu->arch.vsr;
572#endif
de79f7b9 573 u64 *thread_fpr = &t->fp_state.fpr[0][0];
f05ed4d5
PM
574 int i;
575
28c483b6
PM
576 /*
577 * VSX instructions can access FP and vector registers, so if
578 * we are giving up VSX, make sure we give up FP and VMX as well.
579 */
580 if (msr & MSR_VSX)
581 msr |= MSR_FP | MSR_VEC;
582
583 msr &= vcpu->arch.guest_owned_ext;
584 if (!msr)
f05ed4d5
PM
585 return;
586
587#ifdef DEBUG_EXT
588 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
589#endif
590
28c483b6
PM
591 if (msr & MSR_FP) {
592 /*
593 * Note that on CPUs with VSX, giveup_fpu stores
594 * both the traditional FP registers and the added VSX
de79f7b9 595 * registers into thread.fp_state.fpr[].
28c483b6 596 */
9d1ffdd8
PM
597 if (current->thread.regs->msr & MSR_FP)
598 giveup_fpu(current);
f05ed4d5
PM
599 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
600 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
601
de79f7b9 602 vcpu->arch.fpscr = t->fp_state.fpscr;
28c483b6
PM
603
604#ifdef CONFIG_VSX
605 if (cpu_has_feature(CPU_FTR_VSX))
606 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
607 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
608#endif
609 }
610
f05ed4d5 611#ifdef CONFIG_ALTIVEC
28c483b6 612 if (msr & MSR_VEC) {
9d1ffdd8
PM
613 if (current->thread.regs->msr & MSR_VEC)
614 giveup_altivec(current);
de79f7b9
PM
615 memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
616 vcpu->arch.vscr = t->vr_state.vscr;
f05ed4d5 617 }
28c483b6 618#endif
f05ed4d5 619
28c483b6 620 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
f05ed4d5
PM
621 kvmppc_recalc_shadow_msr(vcpu);
622}
623
624static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
625{
626 ulong srr0 = kvmppc_get_pc(vcpu);
627 u32 last_inst = kvmppc_get_last_inst(vcpu);
628 int ret;
629
630 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
631 if (ret == -ENOENT) {
632 ulong msr = vcpu->arch.shared->msr;
633
634 msr = kvmppc_set_field(msr, 33, 33, 1);
635 msr = kvmppc_set_field(msr, 34, 36, 0);
636 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
637 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
638 return EMULATE_AGAIN;
639 }
640
641 return EMULATE_DONE;
642}
643
644static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
645{
646
647 /* Need to do paired single emulation? */
648 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
649 return EMULATE_DONE;
650
651 /* Read out the instruction */
652 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
653 /* Need to emulate */
654 return EMULATE_FAIL;
655
656 return EMULATE_AGAIN;
657}
658
659/* Handle external providers (FPU, Altivec, VSX) */
660static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
661 ulong msr)
662{
663 struct thread_struct *t = &current->thread;
664 u64 *vcpu_fpr = vcpu->arch.fpr;
665#ifdef CONFIG_VSX
666 u64 *vcpu_vsx = vcpu->arch.vsr;
667#endif
de79f7b9 668 u64 *thread_fpr = &t->fp_state.fpr[0][0];
f05ed4d5
PM
669 int i;
670
671 /* When we have paired singles, we emulate in software */
672 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
673 return RESUME_GUEST;
674
675 if (!(vcpu->arch.shared->msr & msr)) {
676 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
677 return RESUME_GUEST;
678 }
679
28c483b6
PM
680 if (msr == MSR_VSX) {
681 /* No VSX? Give an illegal instruction interrupt */
682#ifdef CONFIG_VSX
683 if (!cpu_has_feature(CPU_FTR_VSX))
684#endif
685 {
686 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
687 return RESUME_GUEST;
688 }
689
690 /*
691 * We have to load up all the FP and VMX registers before
692 * we can let the guest use VSX instructions.
693 */
694 msr = MSR_FP | MSR_VEC | MSR_VSX;
f05ed4d5
PM
695 }
696
28c483b6
PM
697 /* See if we already own all the ext(s) needed */
698 msr &= ~vcpu->arch.guest_owned_ext;
699 if (!msr)
700 return RESUME_GUEST;
701
f05ed4d5
PM
702#ifdef DEBUG_EXT
703 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
704#endif
705
28c483b6 706 if (msr & MSR_FP) {
f05ed4d5
PM
707 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
708 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
28c483b6
PM
709#ifdef CONFIG_VSX
710 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
711 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
712#endif
de79f7b9 713 t->fp_state.fpscr = vcpu->arch.fpscr;
f05ed4d5
PM
714 t->fpexc_mode = 0;
715 kvmppc_load_up_fpu();
28c483b6
PM
716 }
717
718 if (msr & MSR_VEC) {
f05ed4d5 719#ifdef CONFIG_ALTIVEC
de79f7b9
PM
720 memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
721 t->vr_state.vscr = vcpu->arch.vscr;
f05ed4d5
PM
722 t->vrsave = -1;
723 kvmppc_load_up_altivec();
724#endif
f05ed4d5
PM
725 }
726
9d1ffdd8 727 current->thread.regs->msr |= msr;
f05ed4d5 728 vcpu->arch.guest_owned_ext |= msr;
f05ed4d5
PM
729 kvmppc_recalc_shadow_msr(vcpu);
730
731 return RESUME_GUEST;
732}
733
9d1ffdd8
PM
734/*
735 * Kernel code using FP or VMX could have flushed guest state to
736 * the thread_struct; if so, get it back now.
737 */
738static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
739{
740 unsigned long lost_ext;
741
742 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
743 if (!lost_ext)
744 return;
745
746 if (lost_ext & MSR_FP)
747 kvmppc_load_up_fpu();
f2481771 748#ifdef CONFIG_ALTIVEC
9d1ffdd8
PM
749 if (lost_ext & MSR_VEC)
750 kvmppc_load_up_altivec();
f2481771 751#endif
9d1ffdd8
PM
752 current->thread.regs->msr |= lost_ext;
753}
754
3a167bea
AK
755int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
756 unsigned int exit_nr)
f05ed4d5
PM
757{
758 int r = RESUME_HOST;
7ee78855 759 int s;
f05ed4d5
PM
760
761 vcpu->stat.sum_exits++;
762
763 run->exit_reason = KVM_EXIT_UNKNOWN;
764 run->ready_for_interrupt_injection = 1;
765
bd2be683 766 /* We get here with MSR.EE=1 */
3b1d9d7d 767
97c95059 768 trace_kvm_exit(exit_nr, vcpu);
706fb730 769 kvm_guest_exit();
c63ddcb4 770
f05ed4d5
PM
771 switch (exit_nr) {
772 case BOOK3S_INTERRUPT_INST_STORAGE:
468a12c2 773 {
a2d56020 774 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
775 vcpu->stat.pf_instruc++;
776
777#ifdef CONFIG_PPC_BOOK3S_32
778 /* We set segments as unused segments when invalidating them. So
779 * treat the respective fault as segment fault. */
a2d56020
PM
780 {
781 struct kvmppc_book3s_shadow_vcpu *svcpu;
782 u32 sr;
783
784 svcpu = svcpu_get(vcpu);
785 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
468a12c2 786 svcpu_put(svcpu);
a2d56020
PM
787 if (sr == SR_INVALID) {
788 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
789 r = RESUME_GUEST;
790 break;
791 }
f05ed4d5
PM
792 }
793#endif
794
795 /* only care about PTEG not found errors, but leave NX alone */
468a12c2 796 if (shadow_srr1 & 0x40000000) {
93b159b4 797 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 798 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
93b159b4 799 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
800 vcpu->stat.sp_instruc++;
801 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
802 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
803 /*
804 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
805 * so we can't use the NX bit inside the guest. Let's cross our fingers,
806 * that no guest that needs the dcbz hack does NX.
807 */
808 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
809 r = RESUME_GUEST;
810 } else {
468a12c2 811 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
f05ed4d5
PM
812 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
813 r = RESUME_GUEST;
814 }
815 break;
468a12c2 816 }
f05ed4d5
PM
817 case BOOK3S_INTERRUPT_DATA_STORAGE:
818 {
819 ulong dar = kvmppc_get_fault_dar(vcpu);
a2d56020 820 u32 fault_dsisr = vcpu->arch.fault_dsisr;
f05ed4d5
PM
821 vcpu->stat.pf_storage++;
822
823#ifdef CONFIG_PPC_BOOK3S_32
824 /* We set segments as unused segments when invalidating them. So
825 * treat the respective fault as segment fault. */
a2d56020
PM
826 {
827 struct kvmppc_book3s_shadow_vcpu *svcpu;
828 u32 sr;
829
830 svcpu = svcpu_get(vcpu);
831 sr = svcpu->sr[dar >> SID_SHIFT];
468a12c2 832 svcpu_put(svcpu);
a2d56020
PM
833 if (sr == SR_INVALID) {
834 kvmppc_mmu_map_segment(vcpu, dar);
835 r = RESUME_GUEST;
836 break;
837 }
f05ed4d5
PM
838 }
839#endif
840
93b159b4
PM
841 /*
842 * We need to handle missing shadow PTEs, and
843 * protection faults due to us mapping a page read-only
844 * when the guest thinks it is writable.
845 */
846 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
847 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 848 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
93b159b4 849 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
850 } else {
851 vcpu->arch.shared->dar = dar;
468a12c2 852 vcpu->arch.shared->dsisr = fault_dsisr;
f05ed4d5
PM
853 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
854 r = RESUME_GUEST;
855 }
856 break;
857 }
858 case BOOK3S_INTERRUPT_DATA_SEGMENT:
859 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
860 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
861 kvmppc_book3s_queue_irqprio(vcpu,
862 BOOK3S_INTERRUPT_DATA_SEGMENT);
863 }
864 r = RESUME_GUEST;
865 break;
866 case BOOK3S_INTERRUPT_INST_SEGMENT:
867 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
868 kvmppc_book3s_queue_irqprio(vcpu,
869 BOOK3S_INTERRUPT_INST_SEGMENT);
870 }
871 r = RESUME_GUEST;
872 break;
873 /* We're good on these - the host merely wanted to get our attention */
874 case BOOK3S_INTERRUPT_DECREMENTER:
4f225ae0 875 case BOOK3S_INTERRUPT_HV_DECREMENTER:
f05ed4d5
PM
876 vcpu->stat.dec_exits++;
877 r = RESUME_GUEST;
878 break;
879 case BOOK3S_INTERRUPT_EXTERNAL:
4f225ae0
AG
880 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
881 case BOOK3S_INTERRUPT_EXTERNAL_HV:
f05ed4d5
PM
882 vcpu->stat.ext_intr_exits++;
883 r = RESUME_GUEST;
884 break;
885 case BOOK3S_INTERRUPT_PERFMON:
886 r = RESUME_GUEST;
887 break;
888 case BOOK3S_INTERRUPT_PROGRAM:
4f225ae0 889 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
f05ed4d5
PM
890 {
891 enum emulation_result er;
892 ulong flags;
893
894program_interrupt:
a2d56020 895 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
f05ed4d5
PM
896
897 if (vcpu->arch.shared->msr & MSR_PR) {
898#ifdef EXIT_DEBUG
899 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
900#endif
901 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
902 (INS_DCBZ & 0xfffffff7)) {
903 kvmppc_core_queue_program(vcpu, flags);
904 r = RESUME_GUEST;
905 break;
906 }
907 }
908
909 vcpu->stat.emulated_inst_exits++;
910 er = kvmppc_emulate_instruction(run, vcpu);
911 switch (er) {
912 case EMULATE_DONE:
913 r = RESUME_GUEST_NV;
914 break;
915 case EMULATE_AGAIN:
916 r = RESUME_GUEST;
917 break;
918 case EMULATE_FAIL:
919 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
920 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
921 kvmppc_core_queue_program(vcpu, flags);
922 r = RESUME_GUEST;
923 break;
924 case EMULATE_DO_MMIO:
925 run->exit_reason = KVM_EXIT_MMIO;
926 r = RESUME_HOST_NV;
927 break;
c402a3f4 928 case EMULATE_EXIT_USER:
50c7bb80
AG
929 r = RESUME_HOST_NV;
930 break;
f05ed4d5
PM
931 default:
932 BUG();
933 }
934 break;
935 }
936 case BOOK3S_INTERRUPT_SYSCALL:
a668f2bd 937 if (vcpu->arch.papr_enabled &&
8b23de29 938 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
a668f2bd
AG
939 !(vcpu->arch.shared->msr & MSR_PR)) {
940 /* SC 1 papr hypercalls */
941 ulong cmd = kvmppc_get_gpr(vcpu, 3);
942 int i;
943
2ba9f0d8 944#ifdef CONFIG_PPC_BOOK3S_64
a668f2bd
AG
945 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
946 r = RESUME_GUEST;
947 break;
948 }
96f38d72 949#endif
a668f2bd
AG
950
951 run->papr_hcall.nr = cmd;
952 for (i = 0; i < 9; ++i) {
953 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
954 run->papr_hcall.args[i] = gpr;
955 }
956 run->exit_reason = KVM_EXIT_PAPR_HCALL;
957 vcpu->arch.hcall_needed = 1;
958 r = RESUME_HOST;
959 } else if (vcpu->arch.osi_enabled &&
f05ed4d5
PM
960 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
961 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
962 /* MOL hypercalls */
963 u64 *gprs = run->osi.gprs;
964 int i;
965
966 run->exit_reason = KVM_EXIT_OSI;
967 for (i = 0; i < 32; i++)
968 gprs[i] = kvmppc_get_gpr(vcpu, i);
969 vcpu->arch.osi_needed = 1;
970 r = RESUME_HOST_NV;
971 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
972 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
973 /* KVM PV hypercalls */
974 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
975 r = RESUME_GUEST;
976 } else {
977 /* Guest syscalls */
978 vcpu->stat.syscall_exits++;
979 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
980 r = RESUME_GUEST;
981 }
982 break;
983 case BOOK3S_INTERRUPT_FP_UNAVAIL:
984 case BOOK3S_INTERRUPT_ALTIVEC:
985 case BOOK3S_INTERRUPT_VSX:
986 {
987 int ext_msr = 0;
988
989 switch (exit_nr) {
990 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
991 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
992 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
993 }
994
995 switch (kvmppc_check_ext(vcpu, exit_nr)) {
996 case EMULATE_DONE:
997 /* everything ok - let's enable the ext */
998 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
999 break;
1000 case EMULATE_FAIL:
1001 /* we need to emulate this instruction */
1002 goto program_interrupt;
1003 break;
1004 default:
1005 /* nothing to worry about - go again */
1006 break;
1007 }
1008 break;
1009 }
1010 case BOOK3S_INTERRUPT_ALIGNMENT:
1011 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
1012 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
1013 kvmppc_get_last_inst(vcpu));
1014 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
1015 kvmppc_get_last_inst(vcpu));
1016 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1017 }
1018 r = RESUME_GUEST;
1019 break;
1020 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1021 case BOOK3S_INTERRUPT_TRACE:
1022 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1023 r = RESUME_GUEST;
1024 break;
1025 default:
468a12c2 1026 {
a2d56020 1027 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
1028 /* Ugh - bork here! What did we get? */
1029 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
468a12c2 1030 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
f05ed4d5
PM
1031 r = RESUME_HOST;
1032 BUG();
1033 break;
1034 }
468a12c2 1035 }
f05ed4d5
PM
1036
1037 if (!(r & RESUME_HOST)) {
1038 /* To avoid clobbering exit_reason, only check for signals if
1039 * we aren't already exiting to userspace for some other
1040 * reason. */
e371f713
AG
1041
1042 /*
1043 * Interrupts could be timers for the guest which we have to
1044 * inject again, so let's postpone them until we're in the guest
1045 * and if we really did time things so badly, then we just exit
1046 * again due to a host external interrupt.
1047 */
bd2be683 1048 local_irq_disable();
7ee78855
AG
1049 s = kvmppc_prepare_to_enter(vcpu);
1050 if (s <= 0) {
bd2be683 1051 local_irq_enable();
7ee78855 1052 r = s;
24afa37b 1053 } else {
5f1c248f 1054 kvmppc_fix_ee_before_entry();
f05ed4d5 1055 }
9d1ffdd8 1056 kvmppc_handle_lost_ext(vcpu);
f05ed4d5
PM
1057 }
1058
1059 trace_kvm_book3s_reenter(r, vcpu);
1060
1061 return r;
1062}
1063
3a167bea
AK
1064static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1065 struct kvm_sregs *sregs)
f05ed4d5
PM
1066{
1067 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1068 int i;
1069
1070 sregs->pvr = vcpu->arch.pvr;
1071
1072 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1073 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1074 for (i = 0; i < 64; i++) {
1075 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1076 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1077 }
1078 } else {
1079 for (i = 0; i < 16; i++)
1080 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1081
1082 for (i = 0; i < 8; i++) {
1083 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1084 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1085 }
1086 }
1087
1088 return 0;
1089}
1090
3a167bea
AK
1091static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1092 struct kvm_sregs *sregs)
f05ed4d5
PM
1093{
1094 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1095 int i;
1096
3a167bea 1097 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
f05ed4d5
PM
1098
1099 vcpu3s->sdr1 = sregs->u.s.sdr1;
1100 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1101 for (i = 0; i < 64; i++) {
1102 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1103 sregs->u.s.ppc64.slb[i].slbe);
1104 }
1105 } else {
1106 for (i = 0; i < 16; i++) {
1107 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1108 }
1109 for (i = 0; i < 8; i++) {
1110 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1111 (u32)sregs->u.s.ppc32.ibat[i]);
1112 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1113 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1114 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1115 (u32)sregs->u.s.ppc32.dbat[i]);
1116 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1117 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1118 }
1119 }
1120
1121 /* Flush the MMU after messing with the segments */
1122 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1123
1124 return 0;
1125}
1126
3a167bea
AK
1127static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1128 union kvmppc_one_reg *val)
31f3438e 1129{
a136a8bd 1130 int r = 0;
31f3438e 1131
a136a8bd 1132 switch (id) {
31f3438e 1133 case KVM_REG_PPC_HIOR:
a136a8bd 1134 *val = get_reg_val(id, to_book3s(vcpu)->hior);
31f3438e 1135 break;
a8bd19ef
PM
1136#ifdef CONFIG_VSX
1137 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1138 long int i = id - KVM_REG_PPC_VSR0;
1139
1140 if (!cpu_has_feature(CPU_FTR_VSX)) {
1141 r = -ENXIO;
1142 break;
1143 }
1144 val->vsxval[0] = vcpu->arch.fpr[i];
1145 val->vsxval[1] = vcpu->arch.vsr[i];
1146 break;
1147 }
1148#endif /* CONFIG_VSX */
31f3438e 1149 default:
a136a8bd 1150 r = -EINVAL;
31f3438e
PM
1151 break;
1152 }
1153
1154 return r;
1155}
1156
3a167bea
AK
1157static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1158 union kvmppc_one_reg *val)
31f3438e 1159{
a136a8bd 1160 int r = 0;
31f3438e 1161
a136a8bd 1162 switch (id) {
31f3438e 1163 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1164 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1165 to_book3s(vcpu)->hior_explicit = true;
31f3438e 1166 break;
a8bd19ef
PM
1167#ifdef CONFIG_VSX
1168 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1169 long int i = id - KVM_REG_PPC_VSR0;
1170
1171 if (!cpu_has_feature(CPU_FTR_VSX)) {
1172 r = -ENXIO;
1173 break;
1174 }
1175 vcpu->arch.fpr[i] = val->vsxval[0];
1176 vcpu->arch.vsr[i] = val->vsxval[1];
1177 break;
1178 }
1179#endif /* CONFIG_VSX */
31f3438e 1180 default:
a136a8bd 1181 r = -EINVAL;
31f3438e
PM
1182 break;
1183 }
1184
1185 return r;
1186}
1187
3a167bea
AK
1188static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1189 unsigned int id)
f05ed4d5
PM
1190{
1191 struct kvmppc_vcpu_book3s *vcpu_book3s;
1192 struct kvm_vcpu *vcpu;
1193 int err = -ENOMEM;
1194 unsigned long p;
1195
3ff95502
PM
1196 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1197 if (!vcpu)
f05ed4d5
PM
1198 goto out;
1199
f05ed4d5
PM
1200 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1201 if (!vcpu_book3s)
f05ed4d5 1202 goto free_vcpu;
3ff95502 1203 vcpu->arch.book3s = vcpu_book3s;
f05ed4d5 1204
a2d56020 1205#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1206 vcpu->arch.shadow_vcpu =
1207 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1208 if (!vcpu->arch.shadow_vcpu)
1209 goto free_vcpu3s;
a2d56020 1210#endif
f05ed4d5 1211
f05ed4d5
PM
1212 err = kvm_vcpu_init(vcpu, kvm, id);
1213 if (err)
1214 goto free_shadow_vcpu;
1215
7c7b406e 1216 err = -ENOMEM;
f05ed4d5 1217 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
f05ed4d5
PM
1218 if (!p)
1219 goto uninit_vcpu;
7c7b406e
TLSC
1220 /* the real shared page fills the last 4k of our page */
1221 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
f05ed4d5 1222
f05ed4d5 1223#ifdef CONFIG_PPC_BOOK3S_64
a4a0f252
PM
1224 /*
1225 * Default to the same as the host if we're on sufficiently
1226 * recent machine that we have 1TB segments;
1227 * otherwise default to PPC970FX.
1228 */
f05ed4d5 1229 vcpu->arch.pvr = 0x3C0301;
a4a0f252
PM
1230 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1231 vcpu->arch.pvr = mfspr(SPRN_PVR);
f05ed4d5
PM
1232#else
1233 /* default to book3s_32 (750) */
1234 vcpu->arch.pvr = 0x84202;
1235#endif
3a167bea 1236 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
f05ed4d5
PM
1237 vcpu->arch.slb_nr = 64;
1238
f05ed4d5
PM
1239 vcpu->arch.shadow_msr = MSR_USER64;
1240
1241 err = kvmppc_mmu_init(vcpu);
1242 if (err < 0)
1243 goto uninit_vcpu;
1244
1245 return vcpu;
1246
1247uninit_vcpu:
1248 kvm_vcpu_uninit(vcpu);
1249free_shadow_vcpu:
a2d56020 1250#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1251 kfree(vcpu->arch.shadow_vcpu);
1252free_vcpu3s:
a2d56020 1253#endif
f05ed4d5 1254 vfree(vcpu_book3s);
3ff95502
PM
1255free_vcpu:
1256 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1257out:
1258 return ERR_PTR(err);
1259}
1260
3a167bea 1261static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
1262{
1263 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1264
1265 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1266 kvm_vcpu_uninit(vcpu);
3ff95502
PM
1267#ifdef CONFIG_KVM_BOOK3S_32
1268 kfree(vcpu->arch.shadow_vcpu);
1269#endif
f05ed4d5 1270 vfree(vcpu_book3s);
3ff95502 1271 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1272}
1273
3a167bea 1274static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
f05ed4d5
PM
1275{
1276 int ret;
de79f7b9 1277 struct thread_fp_state fp;
f05ed4d5
PM
1278 int fpexc_mode;
1279#ifdef CONFIG_ALTIVEC
de79f7b9 1280 struct thread_vr_state vr;
f05ed4d5
PM
1281 unsigned long uninitialized_var(vrsave);
1282 int used_vr;
1283#endif
1284#ifdef CONFIG_VSX
1285 int used_vsr;
1286#endif
1287 ulong ext_msr;
1288
af8f38b3
AG
1289 /* Check if we can run the vcpu at all */
1290 if (!vcpu->arch.sane) {
1291 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7d82714d
AG
1292 ret = -EINVAL;
1293 goto out;
af8f38b3
AG
1294 }
1295
e371f713
AG
1296 /*
1297 * Interrupts could be timers for the guest which we have to inject
1298 * again, so let's postpone them until we're in the guest and if we
1299 * really did time things so badly, then we just exit again due to
1300 * a host external interrupt.
1301 */
bd2be683 1302 local_irq_disable();
7ee78855
AG
1303 ret = kvmppc_prepare_to_enter(vcpu);
1304 if (ret <= 0) {
bd2be683 1305 local_irq_enable();
7d82714d 1306 goto out;
f05ed4d5
PM
1307 }
1308
1309 /* Save FPU state in stack */
1310 if (current->thread.regs->msr & MSR_FP)
1311 giveup_fpu(current);
de79f7b9 1312 fp = current->thread.fp_state;
f05ed4d5
PM
1313 fpexc_mode = current->thread.fpexc_mode;
1314
1315#ifdef CONFIG_ALTIVEC
1316 /* Save Altivec state in stack */
1317 used_vr = current->thread.used_vr;
1318 if (used_vr) {
1319 if (current->thread.regs->msr & MSR_VEC)
1320 giveup_altivec(current);
de79f7b9 1321 vr = current->thread.vr_state;
f05ed4d5
PM
1322 vrsave = current->thread.vrsave;
1323 }
1324#endif
1325
1326#ifdef CONFIG_VSX
1327 /* Save VSX state in stack */
1328 used_vsr = current->thread.used_vsr;
1329 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
28c483b6 1330 __giveup_vsx(current);
f05ed4d5
PM
1331#endif
1332
1333 /* Remember the MSR with disabled extensions */
1334 ext_msr = current->thread.regs->msr;
1335
f05ed4d5
PM
1336 /* Preload FPU if it's enabled */
1337 if (vcpu->arch.shared->msr & MSR_FP)
1338 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1339
5f1c248f 1340 kvmppc_fix_ee_before_entry();
df6909e5
PM
1341
1342 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1343
24afa37b
AG
1344 /* No need for kvm_guest_exit. It's done in handle_exit.
1345 We also get here with interrupts enabled. */
f05ed4d5 1346
f05ed4d5 1347 /* Make sure we save the guest FPU/Altivec/VSX state */
28c483b6
PM
1348 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1349
1350 current->thread.regs->msr = ext_msr;
f05ed4d5 1351
28c483b6 1352 /* Restore FPU/VSX state from stack */
de79f7b9 1353 current->thread.fp_state = fp;
f05ed4d5
PM
1354 current->thread.fpexc_mode = fpexc_mode;
1355
1356#ifdef CONFIG_ALTIVEC
1357 /* Restore Altivec state from stack */
1358 if (used_vr && current->thread.used_vr) {
de79f7b9 1359 current->thread.vr_state = vr;
f05ed4d5
PM
1360 current->thread.vrsave = vrsave;
1361 }
1362 current->thread.used_vr = used_vr;
1363#endif
1364
1365#ifdef CONFIG_VSX
1366 current->thread.used_vsr = used_vsr;
1367#endif
1368
7d82714d 1369out:
0652eaae 1370 vcpu->mode = OUTSIDE_GUEST_MODE;
f05ed4d5
PM
1371 return ret;
1372}
1373
82ed3616
PM
1374/*
1375 * Get (and clear) the dirty memory log for a memory slot.
1376 */
3a167bea
AK
1377static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1378 struct kvm_dirty_log *log)
82ed3616
PM
1379{
1380 struct kvm_memory_slot *memslot;
1381 struct kvm_vcpu *vcpu;
1382 ulong ga, ga_end;
1383 int is_dirty = 0;
1384 int r;
1385 unsigned long n;
1386
1387 mutex_lock(&kvm->slots_lock);
1388
1389 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1390 if (r)
1391 goto out;
1392
1393 /* If nothing is dirty, don't bother messing with page tables. */
1394 if (is_dirty) {
1395 memslot = id_to_memslot(kvm->memslots, log->slot);
1396
1397 ga = memslot->base_gfn << PAGE_SHIFT;
1398 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1399
1400 kvm_for_each_vcpu(n, vcpu, kvm)
1401 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1402
1403 n = kvm_dirty_bitmap_bytes(memslot);
1404 memset(memslot->dirty_bitmap, 0, n);
1405 }
1406
1407 r = 0;
1408out:
1409 mutex_unlock(&kvm->slots_lock);
1410 return r;
1411}
1412
3a167bea
AK
1413static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1414 struct kvm_memory_slot *memslot)
5b74716e 1415{
3a167bea
AK
1416 return;
1417}
5b74716e 1418
3a167bea
AK
1419static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1420 struct kvm_memory_slot *memslot,
1421 struct kvm_userspace_memory_region *mem)
1422{
5b74716e
BH
1423 return 0;
1424}
5b74716e 1425
3a167bea
AK
1426static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1427 struct kvm_userspace_memory_region *mem,
1428 const struct kvm_memory_slot *old)
a66b48c3 1429{
3a167bea 1430 return;
a66b48c3
PM
1431}
1432
3a167bea
AK
1433static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1434 struct kvm_memory_slot *dont)
a66b48c3 1435{
3a167bea 1436 return;
a66b48c3
PM
1437}
1438
3a167bea
AK
1439static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1440 unsigned long npages)
f9e0554d
PM
1441{
1442 return 0;
1443}
1444
3a167bea 1445
5b74716e 1446#ifdef CONFIG_PPC64
3a167bea
AK
1447static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1448 struct kvm_ppc_smmu_info *info)
dfe49dbd 1449{
a4a0f252
PM
1450 long int i;
1451 struct kvm_vcpu *vcpu;
1452
1453 info->flags = 0;
5b74716e
BH
1454
1455 /* SLB is always 64 entries */
1456 info->slb_size = 64;
1457
1458 /* Standard 4k base page size segment */
1459 info->sps[0].page_shift = 12;
1460 info->sps[0].slb_enc = 0;
1461 info->sps[0].enc[0].page_shift = 12;
1462 info->sps[0].enc[0].pte_enc = 0;
1463
a4a0f252
PM
1464 /*
1465 * 64k large page size.
1466 * We only want to put this in if the CPUs we're emulating
1467 * support it, but unfortunately we don't have a vcpu easily
1468 * to hand here to test. Just pick the first vcpu, and if
1469 * that doesn't exist yet, report the minimum capability,
1470 * i.e., no 64k pages.
1471 * 1T segment support goes along with 64k pages.
1472 */
1473 i = 1;
1474 vcpu = kvm_get_vcpu(kvm, 0);
1475 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1476 info->flags = KVM_PPC_1T_SEGMENTS;
1477 info->sps[i].page_shift = 16;
1478 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1479 info->sps[i].enc[0].page_shift = 16;
1480 info->sps[i].enc[0].pte_enc = 1;
1481 ++i;
1482 }
1483
5b74716e 1484 /* Standard 16M large page size segment */
a4a0f252
PM
1485 info->sps[i].page_shift = 24;
1486 info->sps[i].slb_enc = SLB_VSID_L;
1487 info->sps[i].enc[0].page_shift = 24;
1488 info->sps[i].enc[0].pte_enc = 0;
dfe49dbd 1489
5b74716e
BH
1490 return 0;
1491}
3a167bea
AK
1492#else
1493static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1494 struct kvm_ppc_smmu_info *info)
f9e0554d 1495{
3a167bea
AK
1496 /* We should not get called */
1497 BUG();
f9e0554d 1498}
3a167bea 1499#endif /* CONFIG_PPC64 */
f9e0554d 1500
a413f474
IM
1501static unsigned int kvm_global_user_count = 0;
1502static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1503
3a167bea 1504static int kvmppc_core_init_vm_pr(struct kvm *kvm)
f9e0554d 1505{
9308ab8e 1506 mutex_init(&kvm->arch.hpt_mutex);
f31e65e1 1507
a413f474
IM
1508 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1509 spin_lock(&kvm_global_user_count_lock);
1510 if (++kvm_global_user_count == 1)
1511 pSeries_disable_reloc_on_exc();
1512 spin_unlock(&kvm_global_user_count_lock);
1513 }
f9e0554d
PM
1514 return 0;
1515}
1516
3a167bea 1517static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
f9e0554d 1518{
f31e65e1
BH
1519#ifdef CONFIG_PPC64
1520 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1521#endif
a413f474
IM
1522
1523 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1524 spin_lock(&kvm_global_user_count_lock);
1525 BUG_ON(kvm_global_user_count == 0);
1526 if (--kvm_global_user_count == 0)
1527 pSeries_enable_reloc_on_exc();
1528 spin_unlock(&kvm_global_user_count_lock);
1529 }
f9e0554d
PM
1530}
1531
3a167bea 1532static int kvmppc_core_check_processor_compat_pr(void)
f05ed4d5 1533{
3a167bea
AK
1534 /* we are always compatible */
1535 return 0;
1536}
f05ed4d5 1537
3a167bea
AK
1538static long kvm_arch_vm_ioctl_pr(struct file *filp,
1539 unsigned int ioctl, unsigned long arg)
1540{
1541 return -ENOTTY;
1542}
f05ed4d5 1543
cbbc58d4 1544static struct kvmppc_ops kvm_ops_pr = {
3a167bea
AK
1545 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1546 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1547 .get_one_reg = kvmppc_get_one_reg_pr,
1548 .set_one_reg = kvmppc_set_one_reg_pr,
1549 .vcpu_load = kvmppc_core_vcpu_load_pr,
1550 .vcpu_put = kvmppc_core_vcpu_put_pr,
1551 .set_msr = kvmppc_set_msr_pr,
1552 .vcpu_run = kvmppc_vcpu_run_pr,
1553 .vcpu_create = kvmppc_core_vcpu_create_pr,
1554 .vcpu_free = kvmppc_core_vcpu_free_pr,
1555 .check_requests = kvmppc_core_check_requests_pr,
1556 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1557 .flush_memslot = kvmppc_core_flush_memslot_pr,
1558 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1559 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1560 .unmap_hva = kvm_unmap_hva_pr,
1561 .unmap_hva_range = kvm_unmap_hva_range_pr,
1562 .age_hva = kvm_age_hva_pr,
1563 .test_age_hva = kvm_test_age_hva_pr,
1564 .set_spte_hva = kvm_set_spte_hva_pr,
1565 .mmu_destroy = kvmppc_mmu_destroy_pr,
1566 .free_memslot = kvmppc_core_free_memslot_pr,
1567 .create_memslot = kvmppc_core_create_memslot_pr,
1568 .init_vm = kvmppc_core_init_vm_pr,
1569 .destroy_vm = kvmppc_core_destroy_vm_pr,
3a167bea
AK
1570 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1571 .emulate_op = kvmppc_core_emulate_op_pr,
1572 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1573 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1574 .fast_vcpu_kick = kvm_vcpu_kick,
1575 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1576};
1577
cbbc58d4
AK
1578
1579int kvmppc_book3s_init_pr(void)
f05ed4d5
PM
1580{
1581 int r;
1582
cbbc58d4
AK
1583 r = kvmppc_core_check_processor_compat_pr();
1584 if (r < 0)
f05ed4d5
PM
1585 return r;
1586
cbbc58d4
AK
1587 kvm_ops_pr.owner = THIS_MODULE;
1588 kvmppc_pr_ops = &kvm_ops_pr;
f05ed4d5 1589
cbbc58d4 1590 r = kvmppc_mmu_hpte_sysinit();
f05ed4d5
PM
1591 return r;
1592}
1593
cbbc58d4 1594void kvmppc_book3s_exit_pr(void)
f05ed4d5 1595{
cbbc58d4 1596 kvmppc_pr_ops = NULL;
f05ed4d5 1597 kvmppc_mmu_hpte_sysexit();
f05ed4d5
PM
1598}
1599
cbbc58d4
AK
1600/*
1601 * We only support separate modules for book3s 64
1602 */
1603#ifdef CONFIG_PPC_BOOK3S_64
1604
3a167bea
AK
1605module_init(kvmppc_book3s_init_pr);
1606module_exit(kvmppc_book3s_exit_pr);
2ba9f0d8
AK
1607
1608MODULE_LICENSE("GPL");
cbbc58d4 1609#endif