Commit | Line | Data |
---|---|---|
f05ed4d5 PM |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * Paul Mackerras <paulus@samba.org> | |
8 | * | |
9 | * Description: | |
10 | * Functions relating to running KVM on Book 3S processors where | |
11 | * we don't have access to hypervisor mode, and we run the guest | |
12 | * in problem state (user mode). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/44x.c, | |
15 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or modify | |
18 | * it under the terms of the GNU General Public License, version 2, as | |
19 | * published by the Free Software Foundation. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
93087948 | 23 | #include <linux/export.h> |
f05ed4d5 PM |
24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | |
26 | ||
27 | #include <asm/reg.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/kvm_ppc.h> | |
34 | #include <asm/kvm_book3s.h> | |
35 | #include <asm/mmu_context.h> | |
95327d08 | 36 | #include <asm/switch_to.h> |
a413f474 | 37 | #include <asm/firmware.h> |
deb26c27 | 38 | #include <asm/hvcall.h> |
f05ed4d5 PM |
39 | #include <linux/gfp.h> |
40 | #include <linux/sched.h> | |
41 | #include <linux/vmalloc.h> | |
42 | #include <linux/highmem.h> | |
2ba9f0d8 | 43 | #include <linux/module.h> |
398a76c6 | 44 | #include <linux/miscdevice.h> |
f05ed4d5 | 45 | |
3a167bea | 46 | #include "book3s.h" |
72c12535 AK |
47 | |
48 | #define CREATE_TRACE_POINTS | |
49 | #include "trace_pr.h" | |
f05ed4d5 PM |
50 | |
51 | /* #define EXIT_DEBUG */ | |
52 | /* #define DEBUG_EXT */ | |
53 | ||
54 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
55 | ulong msr); | |
616dff86 | 56 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); |
f05ed4d5 PM |
57 | |
58 | /* Some compatibility defines */ | |
59 | #ifdef CONFIG_PPC_BOOK3S_32 | |
60 | #define MSR_USER32 MSR_USER | |
61 | #define MSR_USER64 MSR_USER | |
62 | #define HW_PAGE_SIZE PAGE_SIZE | |
63 | #endif | |
64 | ||
c01e3f66 AG |
65 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
66 | { | |
67 | ulong msr = kvmppc_get_msr(vcpu); | |
68 | return (msr & (MSR_IR|MSR_DR)) == MSR_DR; | |
69 | } | |
70 | ||
71 | static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) | |
72 | { | |
73 | ulong msr = kvmppc_get_msr(vcpu); | |
74 | ulong pc = kvmppc_get_pc(vcpu); | |
75 | ||
76 | /* We are in DR only split real mode */ | |
77 | if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) | |
78 | return; | |
79 | ||
80 | /* We have not fixed up the guest already */ | |
81 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) | |
82 | return; | |
83 | ||
84 | /* The code is in fixupable address space */ | |
85 | if (pc & SPLIT_HACK_MASK) | |
86 | return; | |
87 | ||
88 | vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; | |
89 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); | |
90 | } | |
91 | ||
92 | void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu); | |
93 | ||
3a167bea | 94 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
f05ed4d5 PM |
95 | { |
96 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
97 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
98 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
468a12c2 | 99 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
40fdd8c8 | 100 | svcpu->in_use = 0; |
468a12c2 | 101 | svcpu_put(svcpu); |
f05ed4d5 | 102 | #endif |
fb4188ba AG |
103 | |
104 | /* Disable AIL if supported */ | |
105 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
106 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
107 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); | |
108 | ||
a47d72f3 | 109 | vcpu->cpu = smp_processor_id(); |
f05ed4d5 | 110 | #ifdef CONFIG_PPC_BOOK3S_32 |
3ff95502 | 111 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
f05ed4d5 | 112 | #endif |
c01e3f66 AG |
113 | |
114 | if (kvmppc_is_split_real(vcpu)) | |
115 | kvmppc_fixup_split_real(vcpu); | |
f05ed4d5 PM |
116 | } |
117 | ||
3a167bea | 118 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
119 | { |
120 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 | 121 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
40fdd8c8 AG |
122 | if (svcpu->in_use) { |
123 | kvmppc_copy_from_svcpu(vcpu, svcpu); | |
124 | } | |
468a12c2 | 125 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
468a12c2 AG |
126 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
127 | svcpu_put(svcpu); | |
f05ed4d5 PM |
128 | #endif |
129 | ||
c01e3f66 AG |
130 | if (kvmppc_is_split_real(vcpu)) |
131 | kvmppc_unfixup_split_real(vcpu); | |
132 | ||
28c483b6 | 133 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
e14e7a1e | 134 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
fb4188ba AG |
135 | |
136 | /* Enable AIL if supported */ | |
137 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
138 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
139 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); | |
140 | ||
a47d72f3 | 141 | vcpu->cpu = -1; |
f05ed4d5 PM |
142 | } |
143 | ||
a2d56020 PM |
144 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
145 | void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |
146 | struct kvm_vcpu *vcpu) | |
147 | { | |
148 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | |
149 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | |
150 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | |
151 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | |
152 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | |
153 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | |
154 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | |
155 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | |
156 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | |
157 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | |
158 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | |
159 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | |
160 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | |
161 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | |
162 | svcpu->cr = vcpu->arch.cr; | |
163 | svcpu->xer = vcpu->arch.xer; | |
164 | svcpu->ctr = vcpu->arch.ctr; | |
165 | svcpu->lr = vcpu->arch.lr; | |
166 | svcpu->pc = vcpu->arch.pc; | |
616dff86 AG |
167 | #ifdef CONFIG_PPC_BOOK3S_64 |
168 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | |
169 | #endif | |
3cd60e31 AK |
170 | /* |
171 | * Now also save the current time base value. We use this | |
172 | * to find the guest purr and spurr value. | |
173 | */ | |
174 | vcpu->arch.entry_tb = get_tb(); | |
8f42ab27 | 175 | vcpu->arch.entry_vtb = get_vtb(); |
06da28e7 AK |
176 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
177 | vcpu->arch.entry_ic = mfspr(SPRN_IC); | |
40fdd8c8 | 178 | svcpu->in_use = true; |
a2d56020 PM |
179 | } |
180 | ||
181 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | |
182 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |
183 | struct kvmppc_book3s_shadow_vcpu *svcpu) | |
184 | { | |
40fdd8c8 AG |
185 | /* |
186 | * vcpu_put would just call us again because in_use hasn't | |
187 | * been updated yet. | |
188 | */ | |
189 | preempt_disable(); | |
190 | ||
191 | /* | |
192 | * Maybe we were already preempted and synced the svcpu from | |
193 | * our preempt notifiers. Don't bother touching this svcpu then. | |
194 | */ | |
195 | if (!svcpu->in_use) | |
196 | goto out; | |
197 | ||
a2d56020 PM |
198 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
199 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | |
200 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | |
201 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | |
202 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | |
203 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | |
204 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | |
205 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | |
206 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | |
207 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | |
208 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | |
209 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | |
210 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | |
211 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | |
212 | vcpu->arch.cr = svcpu->cr; | |
213 | vcpu->arch.xer = svcpu->xer; | |
214 | vcpu->arch.ctr = svcpu->ctr; | |
215 | vcpu->arch.lr = svcpu->lr; | |
216 | vcpu->arch.pc = svcpu->pc; | |
217 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | |
218 | vcpu->arch.fault_dar = svcpu->fault_dar; | |
219 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | |
220 | vcpu->arch.last_inst = svcpu->last_inst; | |
616dff86 AG |
221 | #ifdef CONFIG_PPC_BOOK3S_64 |
222 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; | |
223 | #endif | |
3cd60e31 AK |
224 | /* |
225 | * Update purr and spurr using time base on exit. | |
226 | */ | |
227 | vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; | |
228 | vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; | |
8f42ab27 | 229 | vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; |
06da28e7 AK |
230 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
231 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | |
40fdd8c8 AG |
232 | svcpu->in_use = false; |
233 | ||
234 | out: | |
235 | preempt_enable(); | |
a2d56020 PM |
236 | } |
237 | ||
3a167bea | 238 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
03d25c5b | 239 | { |
7c973a2e AG |
240 | int r = 1; /* Indicate we want to get back into the guest */ |
241 | ||
9b0cb3c8 AG |
242 | /* We misuse TLB_FLUSH to indicate that we want to clear |
243 | all shadow cache entries */ | |
244 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
245 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
7c973a2e AG |
246 | |
247 | return r; | |
03d25c5b AG |
248 | } |
249 | ||
9b0cb3c8 | 250 | /************* MMU Notifiers *************/ |
491d6ecc PM |
251 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, |
252 | unsigned long end) | |
253 | { | |
254 | long i; | |
255 | struct kvm_vcpu *vcpu; | |
256 | struct kvm_memslots *slots; | |
257 | struct kvm_memory_slot *memslot; | |
258 | ||
259 | slots = kvm_memslots(kvm); | |
260 | kvm_for_each_memslot(memslot, slots) { | |
261 | unsigned long hva_start, hva_end; | |
262 | gfn_t gfn, gfn_end; | |
263 | ||
264 | hva_start = max(start, memslot->userspace_addr); | |
265 | hva_end = min(end, memslot->userspace_addr + | |
266 | (memslot->npages << PAGE_SHIFT)); | |
267 | if (hva_start >= hva_end) | |
268 | continue; | |
269 | /* | |
270 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
271 | * {gfn, gfn+1, ..., gfn_end-1}. | |
272 | */ | |
273 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
274 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
275 | kvm_for_each_vcpu(i, vcpu, kvm) | |
276 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | |
277 | gfn_end << PAGE_SHIFT); | |
278 | } | |
279 | } | |
9b0cb3c8 | 280 | |
3a167bea | 281 | static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
282 | { |
283 | trace_kvm_unmap_hva(hva); | |
284 | ||
491d6ecc | 285 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
286 | |
287 | return 0; | |
288 | } | |
289 | ||
3a167bea AK |
290 | static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, |
291 | unsigned long end) | |
9b0cb3c8 | 292 | { |
491d6ecc | 293 | do_kvm_unmap_hva(kvm, start, end); |
9b0cb3c8 AG |
294 | |
295 | return 0; | |
296 | } | |
297 | ||
57128468 ALC |
298 | static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start, |
299 | unsigned long end) | |
9b0cb3c8 AG |
300 | { |
301 | /* XXX could be more clever ;) */ | |
302 | return 0; | |
303 | } | |
304 | ||
3a167bea | 305 | static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
306 | { |
307 | /* XXX could be more clever ;) */ | |
308 | return 0; | |
309 | } | |
310 | ||
3a167bea | 311 | static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) |
9b0cb3c8 AG |
312 | { |
313 | /* The page will get remapped properly on its next fault */ | |
491d6ecc | 314 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
315 | } |
316 | ||
317 | /*****************************************/ | |
318 | ||
f05ed4d5 PM |
319 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
320 | { | |
5deb8e7a AG |
321 | ulong guest_msr = kvmppc_get_msr(vcpu); |
322 | ulong smsr = guest_msr; | |
f05ed4d5 PM |
323 | |
324 | /* Guest MSR values */ | |
e5ee5422 | 325 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; |
f05ed4d5 PM |
326 | /* Process MSR values */ |
327 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
328 | /* External providers the guest reserved */ | |
5deb8e7a | 329 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); |
f05ed4d5 PM |
330 | /* 64-bit Process MSR values */ |
331 | #ifdef CONFIG_PPC_BOOK3S_64 | |
332 | smsr |= MSR_ISF | MSR_HV; | |
333 | #endif | |
334 | vcpu->arch.shadow_msr = smsr; | |
335 | } | |
336 | ||
3a167bea | 337 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
f05ed4d5 | 338 | { |
5deb8e7a | 339 | ulong old_msr = kvmppc_get_msr(vcpu); |
f05ed4d5 PM |
340 | |
341 | #ifdef EXIT_DEBUG | |
342 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
343 | #endif | |
344 | ||
345 | msr &= to_book3s(vcpu)->msr_mask; | |
5deb8e7a | 346 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
347 | kvmppc_recalc_shadow_msr(vcpu); |
348 | ||
349 | if (msr & MSR_POW) { | |
350 | if (!vcpu->arch.pending_exceptions) { | |
351 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 352 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
f05ed4d5 PM |
353 | vcpu->stat.halt_wakeup++; |
354 | ||
355 | /* Unset POW bit after we woke up */ | |
356 | msr &= ~MSR_POW; | |
5deb8e7a | 357 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
358 | } |
359 | } | |
360 | ||
c01e3f66 AG |
361 | if (kvmppc_is_split_real(vcpu)) |
362 | kvmppc_fixup_split_real(vcpu); | |
363 | else | |
364 | kvmppc_unfixup_split_real(vcpu); | |
365 | ||
5deb8e7a | 366 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
f05ed4d5 PM |
367 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
368 | kvmppc_mmu_flush_segments(vcpu); | |
369 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
370 | ||
371 | /* Preload magic page segment when in kernel mode */ | |
372 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
373 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
374 | ||
375 | if (msr & MSR_DR) | |
376 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
377 | else | |
378 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
379 | } | |
380 | } | |
381 | ||
bbcc9c06 BH |
382 | /* |
383 | * When switching from 32 to 64-bit, we may have a stale 32-bit | |
384 | * magic page around, we need to flush it. Typically 32-bit magic | |
385 | * page will be instanciated when calling into RTAS. Note: We | |
386 | * assume that such transition only happens while in kernel mode, | |
387 | * ie, we never transition from user 32-bit to kernel 64-bit with | |
388 | * a 32-bit magic page around. | |
389 | */ | |
390 | if (vcpu->arch.magic_page_pa && | |
391 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | |
392 | /* going from RTAS to normal kernel code */ | |
393 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | |
394 | ~0xFFFUL); | |
395 | } | |
396 | ||
f05ed4d5 | 397 | /* Preload FPU if it's enabled */ |
5deb8e7a | 398 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 PM |
399 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
400 | } | |
401 | ||
3a167bea | 402 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
f05ed4d5 PM |
403 | { |
404 | u32 host_pvr; | |
405 | ||
406 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
407 | vcpu->arch.pvr = pvr; | |
408 | #ifdef CONFIG_PPC_BOOK3S_64 | |
409 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
410 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
411 | if (!to_book3s(vcpu)->hior_explicit) |
412 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 413 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 414 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
415 | } else |
416 | #endif | |
417 | { | |
418 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
419 | if (!to_book3s(vcpu)->hior_explicit) |
420 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 421 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 422 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
423 | } |
424 | ||
af8f38b3 AG |
425 | kvmppc_sanity_check(vcpu); |
426 | ||
f05ed4d5 PM |
427 | /* If we are in hypervisor level on 970, we can tell the CPU to |
428 | * treat DCBZ as 32 bytes store */ | |
429 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
430 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
431 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
432 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
433 | ||
434 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
435 | really needs them in a VM on Cell and force disable them. */ | |
436 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
437 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
438 | ||
a4a0f252 PM |
439 | /* |
440 | * If they're asking for POWER6 or later, set the flag | |
441 | * indicating that we can do multiple large page sizes | |
442 | * and 1TB segments. | |
443 | * Also set the flag that indicates that tlbie has the large | |
444 | * page bit in the RB operand instead of the instruction. | |
445 | */ | |
446 | switch (PVR_VER(pvr)) { | |
447 | case PVR_POWER6: | |
448 | case PVR_POWER7: | |
449 | case PVR_POWER7p: | |
450 | case PVR_POWER8: | |
451 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | | |
452 | BOOK3S_HFLAG_NEW_TLBIE; | |
453 | break; | |
454 | } | |
455 | ||
f05ed4d5 PM |
456 | #ifdef CONFIG_PPC_BOOK3S_32 |
457 | /* 32 bit Book3S always has 32 byte dcbz */ | |
458 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
459 | #endif | |
460 | ||
461 | /* On some CPUs we can execute paired single operations natively */ | |
462 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
463 | switch (host_pvr) { | |
464 | case 0x00080200: /* lonestar 2.0 */ | |
465 | case 0x00088202: /* lonestar 2.2 */ | |
466 | case 0x70000100: /* gekko 1.0 */ | |
467 | case 0x00080100: /* gekko 2.0 */ | |
468 | case 0x00083203: /* gekko 2.3a */ | |
469 | case 0x00083213: /* gekko 2.3b */ | |
470 | case 0x00083204: /* gekko 2.4 */ | |
471 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
472 | case 0x00087200: /* broadway */ | |
473 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
474 | /* Enable HID2.PSE - in case we need it later */ | |
475 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
476 | } | |
477 | } | |
478 | ||
479 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
480 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
481 | * emulate 32 bytes dcbz length. | |
482 | * | |
483 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
484 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
485 | * | |
486 | * My approach here is to patch the dcbz instruction on executing pages. | |
487 | */ | |
488 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
489 | { | |
490 | struct page *hpage; | |
491 | u64 hpage_offset; | |
492 | u32 *page; | |
493 | int i; | |
494 | ||
495 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
32cad84f | 496 | if (is_error_page(hpage)) |
f05ed4d5 | 497 | return; |
f05ed4d5 PM |
498 | |
499 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
500 | hpage_offset &= ~0xFFFULL; | |
501 | hpage_offset /= 4; | |
502 | ||
503 | get_page(hpage); | |
2480b208 | 504 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
505 | |
506 | /* patch dcbz into reserved instruction, so we trap */ | |
507 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
cd087eef AG |
508 | if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) |
509 | page[i] &= cpu_to_be32(0xfffffff7); | |
f05ed4d5 | 510 | |
2480b208 | 511 | kunmap_atomic(page); |
f05ed4d5 PM |
512 | put_page(hpage); |
513 | } | |
514 | ||
89b68c96 | 515 | static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
f05ed4d5 PM |
516 | { |
517 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
518 | ||
5deb8e7a | 519 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
bbcc9c06 BH |
520 | mp_pa = (uint32_t)mp_pa; |
521 | ||
89b68c96 AG |
522 | gpa &= ~0xFFFULL; |
523 | if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { | |
f05ed4d5 PM |
524 | return 1; |
525 | } | |
526 | ||
89b68c96 | 527 | return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); |
f05ed4d5 PM |
528 | } |
529 | ||
530 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
531 | ulong eaddr, int vec) | |
532 | { | |
533 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
93b159b4 | 534 | bool iswrite = false; |
f05ed4d5 PM |
535 | int r = RESUME_GUEST; |
536 | int relocated; | |
537 | int page_found = 0; | |
538 | struct kvmppc_pte pte; | |
539 | bool is_mmio = false; | |
5deb8e7a AG |
540 | bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; |
541 | bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; | |
f05ed4d5 PM |
542 | u64 vsid; |
543 | ||
544 | relocated = data ? dr : ir; | |
93b159b4 PM |
545 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) |
546 | iswrite = true; | |
f05ed4d5 PM |
547 | |
548 | /* Resolve real address if translation turned on */ | |
549 | if (relocated) { | |
93b159b4 | 550 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
f05ed4d5 PM |
551 | } else { |
552 | pte.may_execute = true; | |
553 | pte.may_read = true; | |
554 | pte.may_write = true; | |
555 | pte.raddr = eaddr & KVM_PAM; | |
556 | pte.eaddr = eaddr; | |
557 | pte.vpage = eaddr >> 12; | |
c9029c34 | 558 | pte.page_size = MMU_PAGE_64K; |
f05ed4d5 PM |
559 | } |
560 | ||
5deb8e7a | 561 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
f05ed4d5 PM |
562 | case 0: |
563 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
564 | break; | |
565 | case MSR_DR: | |
c01e3f66 AG |
566 | if (!data && |
567 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && | |
568 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) | |
569 | pte.raddr &= ~SPLIT_HACK_MASK; | |
570 | /* fall through */ | |
f05ed4d5 PM |
571 | case MSR_IR: |
572 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
573 | ||
5deb8e7a | 574 | if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) |
f05ed4d5 PM |
575 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
576 | else | |
577 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
578 | pte.vpage |= vsid; | |
579 | ||
580 | if (vsid == -1) | |
581 | page_found = -EINVAL; | |
582 | break; | |
583 | } | |
584 | ||
585 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
586 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
587 | /* | |
588 | * If we do the dcbz hack, we have to NX on every execution, | |
589 | * so we can patch the executing code. This renders our guest | |
590 | * NX-less. | |
591 | */ | |
592 | pte.may_execute = !data; | |
593 | } | |
594 | ||
595 | if (page_found == -ENOENT) { | |
596 | /* Page not found in guest PTE entries */ | |
5deb8e7a AG |
597 | u64 ssrr1 = vcpu->arch.shadow_srr1; |
598 | u64 msr = kvmppc_get_msr(vcpu); | |
599 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | |
600 | kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); | |
601 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | |
f05ed4d5 PM |
602 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
603 | } else if (page_found == -EPERM) { | |
604 | /* Storage protection */ | |
5deb8e7a AG |
605 | u32 dsisr = vcpu->arch.fault_dsisr; |
606 | u64 ssrr1 = vcpu->arch.shadow_srr1; | |
607 | u64 msr = kvmppc_get_msr(vcpu); | |
608 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | |
609 | dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; | |
610 | kvmppc_set_dsisr(vcpu, dsisr); | |
611 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | |
f05ed4d5 PM |
612 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
613 | } else if (page_found == -EINVAL) { | |
614 | /* Page not found in guest SLB */ | |
5deb8e7a | 615 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 PM |
616 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
617 | } else if (!is_mmio && | |
89b68c96 | 618 | kvmppc_visible_gpa(vcpu, pte.raddr)) { |
93b159b4 PM |
619 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
620 | /* | |
621 | * There is already a host HPTE there, presumably | |
622 | * a read-only one for a page the guest thinks | |
623 | * is writable, so get rid of it first. | |
624 | */ | |
625 | kvmppc_mmu_unmap_page(vcpu, &pte); | |
626 | } | |
f05ed4d5 | 627 | /* The guest's PTE is not mapped yet. Map on the host */ |
93b159b4 | 628 | kvmppc_mmu_map_page(vcpu, &pte, iswrite); |
f05ed4d5 PM |
629 | if (data) |
630 | vcpu->stat.sp_storage++; | |
631 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
93b159b4 | 632 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
f05ed4d5 PM |
633 | kvmppc_patch_dcbz(vcpu, &pte); |
634 | } else { | |
635 | /* MMIO */ | |
636 | vcpu->stat.mmio_exits++; | |
637 | vcpu->arch.paddr_accessed = pte.raddr; | |
6020c0f6 | 638 | vcpu->arch.vaddr_accessed = pte.eaddr; |
f05ed4d5 PM |
639 | r = kvmppc_emulate_mmio(run, vcpu); |
640 | if ( r == RESUME_HOST_NV ) | |
641 | r = RESUME_HOST; | |
642 | } | |
643 | ||
644 | return r; | |
645 | } | |
646 | ||
f05ed4d5 PM |
647 | /* Give up external provider (FPU, Altivec, VSX) */ |
648 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
649 | { | |
650 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 | 651 | |
28c483b6 PM |
652 | /* |
653 | * VSX instructions can access FP and vector registers, so if | |
654 | * we are giving up VSX, make sure we give up FP and VMX as well. | |
655 | */ | |
656 | if (msr & MSR_VSX) | |
657 | msr |= MSR_FP | MSR_VEC; | |
658 | ||
659 | msr &= vcpu->arch.guest_owned_ext; | |
660 | if (!msr) | |
f05ed4d5 PM |
661 | return; |
662 | ||
663 | #ifdef DEBUG_EXT | |
664 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
665 | #endif | |
666 | ||
28c483b6 PM |
667 | if (msr & MSR_FP) { |
668 | /* | |
669 | * Note that on CPUs with VSX, giveup_fpu stores | |
670 | * both the traditional FP registers and the added VSX | |
de79f7b9 | 671 | * registers into thread.fp_state.fpr[]. |
28c483b6 | 672 | */ |
99dae3ba | 673 | if (t->regs->msr & MSR_FP) |
9d1ffdd8 | 674 | giveup_fpu(current); |
99dae3ba | 675 | t->fp_save_area = NULL; |
28c483b6 PM |
676 | } |
677 | ||
f05ed4d5 | 678 | #ifdef CONFIG_ALTIVEC |
28c483b6 | 679 | if (msr & MSR_VEC) { |
9d1ffdd8 PM |
680 | if (current->thread.regs->msr & MSR_VEC) |
681 | giveup_altivec(current); | |
99dae3ba | 682 | t->vr_save_area = NULL; |
f05ed4d5 | 683 | } |
28c483b6 | 684 | #endif |
f05ed4d5 | 685 | |
28c483b6 | 686 | vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); |
f05ed4d5 PM |
687 | kvmppc_recalc_shadow_msr(vcpu); |
688 | } | |
689 | ||
616dff86 AG |
690 | /* Give up facility (TAR / EBB / DSCR) */ |
691 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) | |
692 | { | |
693 | #ifdef CONFIG_PPC_BOOK3S_64 | |
694 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | |
695 | /* Facility not available to the guest, ignore giveup request*/ | |
696 | return; | |
697 | } | |
e14e7a1e AG |
698 | |
699 | switch (fac) { | |
700 | case FSCR_TAR_LG: | |
701 | vcpu->arch.tar = mfspr(SPRN_TAR); | |
702 | mtspr(SPRN_TAR, current->thread.tar); | |
703 | vcpu->arch.shadow_fscr &= ~FSCR_TAR; | |
704 | break; | |
705 | } | |
616dff86 AG |
706 | #endif |
707 | } | |
708 | ||
f05ed4d5 PM |
709 | /* Handle external providers (FPU, Altivec, VSX) */ |
710 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
711 | ulong msr) | |
712 | { | |
713 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 PM |
714 | |
715 | /* When we have paired singles, we emulate in software */ | |
716 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
717 | return RESUME_GUEST; | |
718 | ||
5deb8e7a | 719 | if (!(kvmppc_get_msr(vcpu) & msr)) { |
f05ed4d5 PM |
720 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
721 | return RESUME_GUEST; | |
722 | } | |
723 | ||
28c483b6 PM |
724 | if (msr == MSR_VSX) { |
725 | /* No VSX? Give an illegal instruction interrupt */ | |
726 | #ifdef CONFIG_VSX | |
727 | if (!cpu_has_feature(CPU_FTR_VSX)) | |
728 | #endif | |
729 | { | |
730 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
731 | return RESUME_GUEST; | |
732 | } | |
733 | ||
734 | /* | |
735 | * We have to load up all the FP and VMX registers before | |
736 | * we can let the guest use VSX instructions. | |
737 | */ | |
738 | msr = MSR_FP | MSR_VEC | MSR_VSX; | |
f05ed4d5 PM |
739 | } |
740 | ||
28c483b6 PM |
741 | /* See if we already own all the ext(s) needed */ |
742 | msr &= ~vcpu->arch.guest_owned_ext; | |
743 | if (!msr) | |
744 | return RESUME_GUEST; | |
745 | ||
f05ed4d5 PM |
746 | #ifdef DEBUG_EXT |
747 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
748 | #endif | |
749 | ||
28c483b6 | 750 | if (msr & MSR_FP) { |
7562c4fd | 751 | preempt_disable(); |
09548fda | 752 | enable_kernel_fp(); |
99dae3ba PM |
753 | load_fp_state(&vcpu->arch.fp); |
754 | t->fp_save_area = &vcpu->arch.fp; | |
7562c4fd | 755 | preempt_enable(); |
28c483b6 PM |
756 | } |
757 | ||
758 | if (msr & MSR_VEC) { | |
f05ed4d5 | 759 | #ifdef CONFIG_ALTIVEC |
7562c4fd | 760 | preempt_disable(); |
09548fda | 761 | enable_kernel_altivec(); |
99dae3ba PM |
762 | load_vr_state(&vcpu->arch.vr); |
763 | t->vr_save_area = &vcpu->arch.vr; | |
7562c4fd | 764 | preempt_enable(); |
f05ed4d5 | 765 | #endif |
f05ed4d5 PM |
766 | } |
767 | ||
99dae3ba | 768 | t->regs->msr |= msr; |
f05ed4d5 | 769 | vcpu->arch.guest_owned_ext |= msr; |
f05ed4d5 PM |
770 | kvmppc_recalc_shadow_msr(vcpu); |
771 | ||
772 | return RESUME_GUEST; | |
773 | } | |
774 | ||
9d1ffdd8 PM |
775 | /* |
776 | * Kernel code using FP or VMX could have flushed guest state to | |
777 | * the thread_struct; if so, get it back now. | |
778 | */ | |
779 | static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |
780 | { | |
781 | unsigned long lost_ext; | |
782 | ||
783 | lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; | |
784 | if (!lost_ext) | |
785 | return; | |
786 | ||
09548fda | 787 | if (lost_ext & MSR_FP) { |
7562c4fd | 788 | preempt_disable(); |
09548fda | 789 | enable_kernel_fp(); |
99dae3ba | 790 | load_fp_state(&vcpu->arch.fp); |
7562c4fd | 791 | preempt_enable(); |
09548fda | 792 | } |
f2481771 | 793 | #ifdef CONFIG_ALTIVEC |
09548fda | 794 | if (lost_ext & MSR_VEC) { |
7562c4fd | 795 | preempt_disable(); |
09548fda | 796 | enable_kernel_altivec(); |
99dae3ba | 797 | load_vr_state(&vcpu->arch.vr); |
7562c4fd | 798 | preempt_enable(); |
09548fda | 799 | } |
f2481771 | 800 | #endif |
9d1ffdd8 PM |
801 | current->thread.regs->msr |= lost_ext; |
802 | } | |
803 | ||
616dff86 AG |
804 | #ifdef CONFIG_PPC_BOOK3S_64 |
805 | ||
806 | static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) | |
807 | { | |
808 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | |
809 | vcpu->arch.fscr &= ~(0xffULL << 56); | |
810 | vcpu->arch.fscr |= (fac << 56); | |
811 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); | |
812 | } | |
813 | ||
814 | static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) | |
815 | { | |
816 | enum emulation_result er = EMULATE_FAIL; | |
817 | ||
818 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) | |
819 | er = kvmppc_emulate_instruction(vcpu->run, vcpu); | |
820 | ||
821 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { | |
822 | /* Couldn't emulate, trigger interrupt in guest */ | |
823 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
824 | } | |
825 | } | |
826 | ||
827 | /* Enable facilities (TAR, EBB, DSCR) for the guest */ | |
828 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |
829 | { | |
9916d57e | 830 | bool guest_fac_enabled; |
616dff86 AG |
831 | BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); |
832 | ||
9916d57e AG |
833 | /* |
834 | * Not every facility is enabled by FSCR bits, check whether the | |
835 | * guest has this facility enabled at all. | |
836 | */ | |
837 | switch (fac) { | |
838 | case FSCR_TAR_LG: | |
839 | case FSCR_EBB_LG: | |
840 | guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); | |
841 | break; | |
842 | case FSCR_TM_LG: | |
843 | guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; | |
844 | break; | |
845 | default: | |
846 | guest_fac_enabled = false; | |
847 | break; | |
848 | } | |
849 | ||
850 | if (!guest_fac_enabled) { | |
616dff86 AG |
851 | /* Facility not enabled by the guest */ |
852 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
853 | return RESUME_GUEST; | |
854 | } | |
855 | ||
856 | switch (fac) { | |
e14e7a1e AG |
857 | case FSCR_TAR_LG: |
858 | /* TAR switching isn't lazy in Linux yet */ | |
859 | current->thread.tar = mfspr(SPRN_TAR); | |
860 | mtspr(SPRN_TAR, vcpu->arch.tar); | |
861 | vcpu->arch.shadow_fscr |= FSCR_TAR; | |
862 | break; | |
616dff86 AG |
863 | default: |
864 | kvmppc_emulate_fac(vcpu, fac); | |
865 | break; | |
866 | } | |
867 | ||
868 | return RESUME_GUEST; | |
869 | } | |
8e6afa36 AG |
870 | |
871 | void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr) | |
872 | { | |
873 | if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) { | |
874 | /* TAR got dropped, drop it in shadow too */ | |
875 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
876 | } | |
877 | vcpu->arch.fscr = fscr; | |
878 | } | |
616dff86 AG |
879 | #endif |
880 | ||
3a167bea AK |
881 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
882 | unsigned int exit_nr) | |
f05ed4d5 PM |
883 | { |
884 | int r = RESUME_HOST; | |
7ee78855 | 885 | int s; |
f05ed4d5 PM |
886 | |
887 | vcpu->stat.sum_exits++; | |
888 | ||
889 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
890 | run->ready_for_interrupt_injection = 1; | |
891 | ||
bd2be683 | 892 | /* We get here with MSR.EE=1 */ |
3b1d9d7d | 893 | |
97c95059 | 894 | trace_kvm_exit(exit_nr, vcpu); |
706fb730 | 895 | kvm_guest_exit(); |
c63ddcb4 | 896 | |
f05ed4d5 PM |
897 | switch (exit_nr) { |
898 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 | 899 | { |
a2d56020 | 900 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
901 | vcpu->stat.pf_instruc++; |
902 | ||
c01e3f66 AG |
903 | if (kvmppc_is_split_real(vcpu)) |
904 | kvmppc_fixup_split_real(vcpu); | |
905 | ||
f05ed4d5 PM |
906 | #ifdef CONFIG_PPC_BOOK3S_32 |
907 | /* We set segments as unused segments when invalidating them. So | |
908 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
909 | { |
910 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
911 | u32 sr; | |
912 | ||
913 | svcpu = svcpu_get(vcpu); | |
914 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | |
468a12c2 | 915 | svcpu_put(svcpu); |
a2d56020 PM |
916 | if (sr == SR_INVALID) { |
917 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
918 | r = RESUME_GUEST; | |
919 | break; | |
920 | } | |
f05ed4d5 PM |
921 | } |
922 | #endif | |
923 | ||
924 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 925 | if (shadow_srr1 & 0x40000000) { |
93b159b4 | 926 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
f05ed4d5 | 927 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
93b159b4 | 928 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 PM |
929 | vcpu->stat.sp_instruc++; |
930 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
931 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
932 | /* | |
933 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
934 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
935 | * that no guest that needs the dcbz hack does NX. | |
936 | */ | |
937 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
938 | r = RESUME_GUEST; | |
939 | } else { | |
5deb8e7a AG |
940 | u64 msr = kvmppc_get_msr(vcpu); |
941 | msr |= shadow_srr1 & 0x58000000; | |
942 | kvmppc_set_msr_fast(vcpu, msr); | |
f05ed4d5 PM |
943 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
944 | r = RESUME_GUEST; | |
945 | } | |
946 | break; | |
468a12c2 | 947 | } |
f05ed4d5 PM |
948 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
949 | { | |
950 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 951 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
f05ed4d5 PM |
952 | vcpu->stat.pf_storage++; |
953 | ||
954 | #ifdef CONFIG_PPC_BOOK3S_32 | |
955 | /* We set segments as unused segments when invalidating them. So | |
956 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
957 | { |
958 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
959 | u32 sr; | |
960 | ||
961 | svcpu = svcpu_get(vcpu); | |
962 | sr = svcpu->sr[dar >> SID_SHIFT]; | |
468a12c2 | 963 | svcpu_put(svcpu); |
a2d56020 PM |
964 | if (sr == SR_INVALID) { |
965 | kvmppc_mmu_map_segment(vcpu, dar); | |
966 | r = RESUME_GUEST; | |
967 | break; | |
968 | } | |
f05ed4d5 PM |
969 | } |
970 | #endif | |
971 | ||
93b159b4 PM |
972 | /* |
973 | * We need to handle missing shadow PTEs, and | |
974 | * protection faults due to us mapping a page read-only | |
975 | * when the guest thinks it is writable. | |
976 | */ | |
977 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | |
978 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | |
f05ed4d5 | 979 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
93b159b4 | 980 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 | 981 | } else { |
5deb8e7a AG |
982 | kvmppc_set_dar(vcpu, dar); |
983 | kvmppc_set_dsisr(vcpu, fault_dsisr); | |
f05ed4d5 PM |
984 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
985 | r = RESUME_GUEST; | |
986 | } | |
987 | break; | |
988 | } | |
989 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
990 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
5deb8e7a | 991 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 PM |
992 | kvmppc_book3s_queue_irqprio(vcpu, |
993 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
994 | } | |
995 | r = RESUME_GUEST; | |
996 | break; | |
997 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
998 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
999 | kvmppc_book3s_queue_irqprio(vcpu, | |
1000 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
1001 | } | |
1002 | r = RESUME_GUEST; | |
1003 | break; | |
1004 | /* We're good on these - the host merely wanted to get our attention */ | |
1005 | case BOOK3S_INTERRUPT_DECREMENTER: | |
4f225ae0 | 1006 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
40688909 | 1007 | case BOOK3S_INTERRUPT_DOORBELL: |
568fccc4 | 1008 | case BOOK3S_INTERRUPT_H_DOORBELL: |
f05ed4d5 PM |
1009 | vcpu->stat.dec_exits++; |
1010 | r = RESUME_GUEST; | |
1011 | break; | |
1012 | case BOOK3S_INTERRUPT_EXTERNAL: | |
4f225ae0 AG |
1013 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
1014 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | |
f05ed4d5 PM |
1015 | vcpu->stat.ext_intr_exits++; |
1016 | r = RESUME_GUEST; | |
1017 | break; | |
1018 | case BOOK3S_INTERRUPT_PERFMON: | |
1019 | r = RESUME_GUEST; | |
1020 | break; | |
1021 | case BOOK3S_INTERRUPT_PROGRAM: | |
4f225ae0 | 1022 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
f05ed4d5 PM |
1023 | { |
1024 | enum emulation_result er; | |
1025 | ulong flags; | |
51f04726 MC |
1026 | u32 last_inst; |
1027 | int emul; | |
f05ed4d5 PM |
1028 | |
1029 | program_interrupt: | |
a2d56020 | 1030 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
f05ed4d5 | 1031 | |
51f04726 MC |
1032 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
1033 | if (emul != EMULATE_DONE) { | |
1034 | r = RESUME_GUEST; | |
1035 | break; | |
1036 | } | |
1037 | ||
5deb8e7a | 1038 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
f05ed4d5 | 1039 | #ifdef EXIT_DEBUG |
51f04726 MC |
1040 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", |
1041 | kvmppc_get_pc(vcpu), last_inst); | |
f05ed4d5 | 1042 | #endif |
51f04726 | 1043 | if ((last_inst & 0xff0007ff) != |
f05ed4d5 PM |
1044 | (INS_DCBZ & 0xfffffff7)) { |
1045 | kvmppc_core_queue_program(vcpu, flags); | |
1046 | r = RESUME_GUEST; | |
1047 | break; | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | vcpu->stat.emulated_inst_exits++; | |
1052 | er = kvmppc_emulate_instruction(run, vcpu); | |
1053 | switch (er) { | |
1054 | case EMULATE_DONE: | |
1055 | r = RESUME_GUEST_NV; | |
1056 | break; | |
1057 | case EMULATE_AGAIN: | |
1058 | r = RESUME_GUEST; | |
1059 | break; | |
1060 | case EMULATE_FAIL: | |
1061 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
51f04726 | 1062 | __func__, kvmppc_get_pc(vcpu), last_inst); |
f05ed4d5 PM |
1063 | kvmppc_core_queue_program(vcpu, flags); |
1064 | r = RESUME_GUEST; | |
1065 | break; | |
1066 | case EMULATE_DO_MMIO: | |
1067 | run->exit_reason = KVM_EXIT_MMIO; | |
1068 | r = RESUME_HOST_NV; | |
1069 | break; | |
c402a3f4 | 1070 | case EMULATE_EXIT_USER: |
50c7bb80 AG |
1071 | r = RESUME_HOST_NV; |
1072 | break; | |
f05ed4d5 PM |
1073 | default: |
1074 | BUG(); | |
1075 | } | |
1076 | break; | |
1077 | } | |
1078 | case BOOK3S_INTERRUPT_SYSCALL: | |
51f04726 MC |
1079 | { |
1080 | u32 last_sc; | |
1081 | int emul; | |
1082 | ||
1083 | /* Get last sc for papr */ | |
1084 | if (vcpu->arch.papr_enabled) { | |
1085 | /* The sc instuction points SRR0 to the next inst */ | |
1086 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); | |
1087 | if (emul != EMULATE_DONE) { | |
1088 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); | |
1089 | r = RESUME_GUEST; | |
1090 | break; | |
1091 | } | |
1092 | } | |
1093 | ||
a668f2bd | 1094 | if (vcpu->arch.papr_enabled && |
51f04726 | 1095 | (last_sc == 0x44000022) && |
5deb8e7a | 1096 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
a668f2bd AG |
1097 | /* SC 1 papr hypercalls */ |
1098 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
1099 | int i; | |
1100 | ||
2ba9f0d8 | 1101 | #ifdef CONFIG_PPC_BOOK3S_64 |
a668f2bd AG |
1102 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
1103 | r = RESUME_GUEST; | |
1104 | break; | |
1105 | } | |
96f38d72 | 1106 | #endif |
a668f2bd AG |
1107 | |
1108 | run->papr_hcall.nr = cmd; | |
1109 | for (i = 0; i < 9; ++i) { | |
1110 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
1111 | run->papr_hcall.args[i] = gpr; | |
1112 | } | |
1113 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
1114 | vcpu->arch.hcall_needed = 1; | |
1115 | r = RESUME_HOST; | |
1116 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
1117 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
1118 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
1119 | /* MOL hypercalls */ | |
1120 | u64 *gprs = run->osi.gprs; | |
1121 | int i; | |
1122 | ||
1123 | run->exit_reason = KVM_EXIT_OSI; | |
1124 | for (i = 0; i < 32; i++) | |
1125 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
1126 | vcpu->arch.osi_needed = 1; | |
1127 | r = RESUME_HOST_NV; | |
5deb8e7a | 1128 | } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && |
f05ed4d5 PM |
1129 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
1130 | /* KVM PV hypercalls */ | |
1131 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1132 | r = RESUME_GUEST; | |
1133 | } else { | |
1134 | /* Guest syscalls */ | |
1135 | vcpu->stat.syscall_exits++; | |
1136 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1137 | r = RESUME_GUEST; | |
1138 | } | |
1139 | break; | |
51f04726 | 1140 | } |
f05ed4d5 PM |
1141 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1142 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1143 | case BOOK3S_INTERRUPT_VSX: | |
1144 | { | |
1145 | int ext_msr = 0; | |
9a26af64 | 1146 | int emul; |
9a26af64 MC |
1147 | u32 last_inst; |
1148 | ||
1149 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { | |
1150 | /* Do paired single instruction emulation */ | |
51f04726 MC |
1151 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
1152 | &last_inst); | |
9a26af64 MC |
1153 | if (emul == EMULATE_DONE) |
1154 | goto program_interrupt; | |
1155 | else | |
1156 | r = RESUME_GUEST; | |
f05ed4d5 | 1157 | |
9a26af64 | 1158 | break; |
f05ed4d5 PM |
1159 | } |
1160 | ||
9a26af64 MC |
1161 | /* Enable external provider */ |
1162 | switch (exit_nr) { | |
1163 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
1164 | ext_msr = MSR_FP; | |
f05ed4d5 | 1165 | break; |
9a26af64 MC |
1166 | |
1167 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1168 | ext_msr = MSR_VEC; | |
f05ed4d5 | 1169 | break; |
9a26af64 MC |
1170 | |
1171 | case BOOK3S_INTERRUPT_VSX: | |
1172 | ext_msr = MSR_VSX; | |
f05ed4d5 PM |
1173 | break; |
1174 | } | |
9a26af64 MC |
1175 | |
1176 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
f05ed4d5 PM |
1177 | break; |
1178 | } | |
1179 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
9a26af64 | 1180 | { |
51f04726 MC |
1181 | u32 last_inst; |
1182 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
9a26af64 MC |
1183 | |
1184 | if (emul == EMULATE_DONE) { | |
5deb8e7a AG |
1185 | u32 dsisr; |
1186 | u64 dar; | |
1187 | ||
1188 | dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); | |
1189 | dar = kvmppc_alignment_dar(vcpu, last_inst); | |
1190 | ||
1191 | kvmppc_set_dsisr(vcpu, dsisr); | |
1192 | kvmppc_set_dar(vcpu, dar); | |
1193 | ||
f05ed4d5 PM |
1194 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1195 | } | |
1196 | r = RESUME_GUEST; | |
1197 | break; | |
9a26af64 | 1198 | } |
616dff86 AG |
1199 | #ifdef CONFIG_PPC_BOOK3S_64 |
1200 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | |
1201 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); | |
1202 | r = RESUME_GUEST; | |
1203 | break; | |
1204 | #endif | |
f05ed4d5 PM |
1205 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
1206 | case BOOK3S_INTERRUPT_TRACE: | |
1207 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1208 | r = RESUME_GUEST; | |
1209 | break; | |
1210 | default: | |
468a12c2 | 1211 | { |
a2d56020 | 1212 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1213 | /* Ugh - bork here! What did we get? */ |
1214 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 1215 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
1216 | r = RESUME_HOST; |
1217 | BUG(); | |
1218 | break; | |
1219 | } | |
468a12c2 | 1220 | } |
f05ed4d5 PM |
1221 | |
1222 | if (!(r & RESUME_HOST)) { | |
1223 | /* To avoid clobbering exit_reason, only check for signals if | |
1224 | * we aren't already exiting to userspace for some other | |
1225 | * reason. */ | |
e371f713 AG |
1226 | |
1227 | /* | |
1228 | * Interrupts could be timers for the guest which we have to | |
1229 | * inject again, so let's postpone them until we're in the guest | |
1230 | * and if we really did time things so badly, then we just exit | |
1231 | * again due to a host external interrupt. | |
1232 | */ | |
7ee78855 | 1233 | s = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1234 | if (s <= 0) |
7ee78855 | 1235 | r = s; |
6c85f52b SW |
1236 | else { |
1237 | /* interrupts now hard-disabled */ | |
5f1c248f | 1238 | kvmppc_fix_ee_before_entry(); |
f05ed4d5 | 1239 | } |
6c85f52b | 1240 | |
9d1ffdd8 | 1241 | kvmppc_handle_lost_ext(vcpu); |
f05ed4d5 PM |
1242 | } |
1243 | ||
1244 | trace_kvm_book3s_reenter(r, vcpu); | |
1245 | ||
1246 | return r; | |
1247 | } | |
1248 | ||
3a167bea AK |
1249 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
1250 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1251 | { |
1252 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1253 | int i; | |
1254 | ||
1255 | sregs->pvr = vcpu->arch.pvr; | |
1256 | ||
1257 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
1258 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1259 | for (i = 0; i < 64; i++) { | |
1260 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
1261 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1262 | } | |
1263 | } else { | |
1264 | for (i = 0; i < 16; i++) | |
5deb8e7a | 1265 | sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); |
f05ed4d5 PM |
1266 | |
1267 | for (i = 0; i < 8; i++) { | |
1268 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
1269 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | return 0; | |
1274 | } | |
1275 | ||
3a167bea AK |
1276 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
1277 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1278 | { |
1279 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1280 | int i; | |
1281 | ||
3a167bea | 1282 | kvmppc_set_pvr_pr(vcpu, sregs->pvr); |
f05ed4d5 PM |
1283 | |
1284 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
1285 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1286 | for (i = 0; i < 64; i++) { | |
1287 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
1288 | sregs->u.s.ppc64.slb[i].slbe); | |
1289 | } | |
1290 | } else { | |
1291 | for (i = 0; i < 16; i++) { | |
1292 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
1293 | } | |
1294 | for (i = 0; i < 8; i++) { | |
1295 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
1296 | (u32)sregs->u.s.ppc32.ibat[i]); | |
1297 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
1298 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
1299 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
1300 | (u32)sregs->u.s.ppc32.dbat[i]); | |
1301 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
1302 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | /* Flush the MMU after messing with the segments */ | |
1307 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
1308 | ||
1309 | return 0; | |
1310 | } | |
1311 | ||
3a167bea AK |
1312 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1313 | union kvmppc_one_reg *val) | |
31f3438e | 1314 | { |
a136a8bd | 1315 | int r = 0; |
31f3438e | 1316 | |
a136a8bd | 1317 | switch (id) { |
a59c1d9e MS |
1318 | case KVM_REG_PPC_DEBUG_INST: |
1319 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); | |
1320 | break; | |
31f3438e | 1321 | case KVM_REG_PPC_HIOR: |
a136a8bd | 1322 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
31f3438e | 1323 | break; |
e5ee5422 | 1324 | case KVM_REG_PPC_LPCR: |
a0840240 | 1325 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1326 | /* |
1327 | * We are only interested in the LPCR_ILE bit | |
1328 | */ | |
1329 | if (vcpu->arch.intr_msr & MSR_LE) | |
1330 | *val = get_reg_val(id, LPCR_ILE); | |
1331 | else | |
1332 | *val = get_reg_val(id, 0); | |
1333 | break; | |
31f3438e | 1334 | default: |
a136a8bd | 1335 | r = -EINVAL; |
31f3438e PM |
1336 | break; |
1337 | } | |
1338 | ||
1339 | return r; | |
1340 | } | |
1341 | ||
e5ee5422 AK |
1342 | static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
1343 | { | |
1344 | if (new_lpcr & LPCR_ILE) | |
1345 | vcpu->arch.intr_msr |= MSR_LE; | |
1346 | else | |
1347 | vcpu->arch.intr_msr &= ~MSR_LE; | |
1348 | } | |
1349 | ||
3a167bea AK |
1350 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1351 | union kvmppc_one_reg *val) | |
31f3438e | 1352 | { |
a136a8bd | 1353 | int r = 0; |
31f3438e | 1354 | |
a136a8bd | 1355 | switch (id) { |
31f3438e | 1356 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1357 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1358 | to_book3s(vcpu)->hior_explicit = true; | |
31f3438e | 1359 | break; |
e5ee5422 | 1360 | case KVM_REG_PPC_LPCR: |
a0840240 | 1361 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1362 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
1363 | break; | |
31f3438e | 1364 | default: |
a136a8bd | 1365 | r = -EINVAL; |
31f3438e PM |
1366 | break; |
1367 | } | |
1368 | ||
1369 | return r; | |
1370 | } | |
1371 | ||
3a167bea AK |
1372 | static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
1373 | unsigned int id) | |
f05ed4d5 PM |
1374 | { |
1375 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
1376 | struct kvm_vcpu *vcpu; | |
1377 | int err = -ENOMEM; | |
1378 | unsigned long p; | |
1379 | ||
3ff95502 PM |
1380 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
1381 | if (!vcpu) | |
f05ed4d5 PM |
1382 | goto out; |
1383 | ||
f05ed4d5 PM |
1384 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1385 | if (!vcpu_book3s) | |
f05ed4d5 | 1386 | goto free_vcpu; |
3ff95502 | 1387 | vcpu->arch.book3s = vcpu_book3s; |
f05ed4d5 | 1388 | |
ab78475c | 1389 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1390 | vcpu->arch.shadow_vcpu = |
1391 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | |
1392 | if (!vcpu->arch.shadow_vcpu) | |
1393 | goto free_vcpu3s; | |
a2d56020 | 1394 | #endif |
f05ed4d5 | 1395 | |
f05ed4d5 PM |
1396 | err = kvm_vcpu_init(vcpu, kvm, id); |
1397 | if (err) | |
1398 | goto free_shadow_vcpu; | |
1399 | ||
7c7b406e | 1400 | err = -ENOMEM; |
f05ed4d5 | 1401 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
f05ed4d5 PM |
1402 | if (!p) |
1403 | goto uninit_vcpu; | |
89b68c96 | 1404 | vcpu->arch.shared = (void *)p; |
f05ed4d5 | 1405 | #ifdef CONFIG_PPC_BOOK3S_64 |
5deb8e7a AG |
1406 | /* Always start the shared struct in native endian mode */ |
1407 | #ifdef __BIG_ENDIAN__ | |
1408 | vcpu->arch.shared_big_endian = true; | |
1409 | #else | |
1410 | vcpu->arch.shared_big_endian = false; | |
1411 | #endif | |
1412 | ||
a4a0f252 PM |
1413 | /* |
1414 | * Default to the same as the host if we're on sufficiently | |
1415 | * recent machine that we have 1TB segments; | |
1416 | * otherwise default to PPC970FX. | |
1417 | */ | |
f05ed4d5 | 1418 | vcpu->arch.pvr = 0x3C0301; |
a4a0f252 PM |
1419 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1420 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
e5ee5422 | 1421 | vcpu->arch.intr_msr = MSR_SF; |
f05ed4d5 PM |
1422 | #else |
1423 | /* default to book3s_32 (750) */ | |
1424 | vcpu->arch.pvr = 0x84202; | |
1425 | #endif | |
3a167bea | 1426 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
f05ed4d5 PM |
1427 | vcpu->arch.slb_nr = 64; |
1428 | ||
94810ba4 | 1429 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
f05ed4d5 PM |
1430 | |
1431 | err = kvmppc_mmu_init(vcpu); | |
1432 | if (err < 0) | |
1433 | goto uninit_vcpu; | |
1434 | ||
1435 | return vcpu; | |
1436 | ||
1437 | uninit_vcpu: | |
1438 | kvm_vcpu_uninit(vcpu); | |
1439 | free_shadow_vcpu: | |
ab78475c | 1440 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1441 | kfree(vcpu->arch.shadow_vcpu); |
1442 | free_vcpu3s: | |
a2d56020 | 1443 | #endif |
f05ed4d5 | 1444 | vfree(vcpu_book3s); |
3ff95502 PM |
1445 | free_vcpu: |
1446 | kmem_cache_free(kvm_vcpu_cache, vcpu); | |
f05ed4d5 PM |
1447 | out: |
1448 | return ERR_PTR(err); | |
1449 | } | |
1450 | ||
3a167bea | 1451 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1452 | { |
1453 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
1454 | ||
1455 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
1456 | kvm_vcpu_uninit(vcpu); | |
ab78475c | 1457 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1458 | kfree(vcpu->arch.shadow_vcpu); |
1459 | #endif | |
f05ed4d5 | 1460 | vfree(vcpu_book3s); |
3ff95502 | 1461 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
f05ed4d5 PM |
1462 | } |
1463 | ||
3a167bea | 1464 | static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1465 | { |
1466 | int ret; | |
f05ed4d5 | 1467 | #ifdef CONFIG_ALTIVEC |
f05ed4d5 | 1468 | unsigned long uninitialized_var(vrsave); |
f05ed4d5 | 1469 | #endif |
f05ed4d5 | 1470 | |
af8f38b3 AG |
1471 | /* Check if we can run the vcpu at all */ |
1472 | if (!vcpu->arch.sane) { | |
1473 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
7d82714d AG |
1474 | ret = -EINVAL; |
1475 | goto out; | |
af8f38b3 AG |
1476 | } |
1477 | ||
e371f713 AG |
1478 | /* |
1479 | * Interrupts could be timers for the guest which we have to inject | |
1480 | * again, so let's postpone them until we're in the guest and if we | |
1481 | * really did time things so badly, then we just exit again due to | |
1482 | * a host external interrupt. | |
1483 | */ | |
7ee78855 | 1484 | ret = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1485 | if (ret <= 0) |
7d82714d | 1486 | goto out; |
6c85f52b | 1487 | /* interrupts now hard-disabled */ |
f05ed4d5 | 1488 | |
99dae3ba | 1489 | /* Save FPU state in thread_struct */ |
f05ed4d5 PM |
1490 | if (current->thread.regs->msr & MSR_FP) |
1491 | giveup_fpu(current); | |
f05ed4d5 PM |
1492 | |
1493 | #ifdef CONFIG_ALTIVEC | |
99dae3ba PM |
1494 | /* Save Altivec state in thread_struct */ |
1495 | if (current->thread.regs->msr & MSR_VEC) | |
1496 | giveup_altivec(current); | |
f05ed4d5 PM |
1497 | #endif |
1498 | ||
1499 | #ifdef CONFIG_VSX | |
99dae3ba PM |
1500 | /* Save VSX state in thread_struct */ |
1501 | if (current->thread.regs->msr & MSR_VSX) | |
28c483b6 | 1502 | __giveup_vsx(current); |
f05ed4d5 PM |
1503 | #endif |
1504 | ||
f05ed4d5 | 1505 | /* Preload FPU if it's enabled */ |
5deb8e7a | 1506 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 PM |
1507 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1508 | ||
5f1c248f | 1509 | kvmppc_fix_ee_before_entry(); |
df6909e5 PM |
1510 | |
1511 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
1512 | ||
24afa37b AG |
1513 | /* No need for kvm_guest_exit. It's done in handle_exit. |
1514 | We also get here with interrupts enabled. */ | |
f05ed4d5 | 1515 | |
f05ed4d5 | 1516 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
28c483b6 PM |
1517 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1518 | ||
e14e7a1e AG |
1519 | /* Make sure we save the guest TAR/EBB/DSCR state */ |
1520 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
1521 | ||
7d82714d | 1522 | out: |
0652eaae | 1523 | vcpu->mode = OUTSIDE_GUEST_MODE; |
f05ed4d5 PM |
1524 | return ret; |
1525 | } | |
1526 | ||
82ed3616 PM |
1527 | /* |
1528 | * Get (and clear) the dirty memory log for a memory slot. | |
1529 | */ | |
3a167bea AK |
1530 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1531 | struct kvm_dirty_log *log) | |
82ed3616 PM |
1532 | { |
1533 | struct kvm_memory_slot *memslot; | |
1534 | struct kvm_vcpu *vcpu; | |
1535 | ulong ga, ga_end; | |
1536 | int is_dirty = 0; | |
1537 | int r; | |
1538 | unsigned long n; | |
1539 | ||
1540 | mutex_lock(&kvm->slots_lock); | |
1541 | ||
1542 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1543 | if (r) | |
1544 | goto out; | |
1545 | ||
1546 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1547 | if (is_dirty) { | |
1548 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1549 | ||
1550 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1551 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1552 | ||
1553 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1554 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1555 | ||
1556 | n = kvm_dirty_bitmap_bytes(memslot); | |
1557 | memset(memslot->dirty_bitmap, 0, n); | |
1558 | } | |
1559 | ||
1560 | r = 0; | |
1561 | out: | |
1562 | mutex_unlock(&kvm->slots_lock); | |
1563 | return r; | |
1564 | } | |
1565 | ||
3a167bea AK |
1566 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1567 | struct kvm_memory_slot *memslot) | |
5b74716e | 1568 | { |
3a167bea AK |
1569 | return; |
1570 | } | |
5b74716e | 1571 | |
3a167bea AK |
1572 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, |
1573 | struct kvm_memory_slot *memslot, | |
1574 | struct kvm_userspace_memory_region *mem) | |
1575 | { | |
5b74716e BH |
1576 | return 0; |
1577 | } | |
5b74716e | 1578 | |
3a167bea AK |
1579 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
1580 | struct kvm_userspace_memory_region *mem, | |
1581 | const struct kvm_memory_slot *old) | |
a66b48c3 | 1582 | { |
3a167bea | 1583 | return; |
a66b48c3 PM |
1584 | } |
1585 | ||
3a167bea AK |
1586 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, |
1587 | struct kvm_memory_slot *dont) | |
a66b48c3 | 1588 | { |
3a167bea | 1589 | return; |
a66b48c3 PM |
1590 | } |
1591 | ||
3a167bea AK |
1592 | static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, |
1593 | unsigned long npages) | |
f9e0554d PM |
1594 | { |
1595 | return 0; | |
1596 | } | |
1597 | ||
3a167bea | 1598 | |
5b74716e | 1599 | #ifdef CONFIG_PPC64 |
3a167bea AK |
1600 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1601 | struct kvm_ppc_smmu_info *info) | |
dfe49dbd | 1602 | { |
a4a0f252 PM |
1603 | long int i; |
1604 | struct kvm_vcpu *vcpu; | |
1605 | ||
1606 | info->flags = 0; | |
5b74716e BH |
1607 | |
1608 | /* SLB is always 64 entries */ | |
1609 | info->slb_size = 64; | |
1610 | ||
1611 | /* Standard 4k base page size segment */ | |
1612 | info->sps[0].page_shift = 12; | |
1613 | info->sps[0].slb_enc = 0; | |
1614 | info->sps[0].enc[0].page_shift = 12; | |
1615 | info->sps[0].enc[0].pte_enc = 0; | |
1616 | ||
a4a0f252 PM |
1617 | /* |
1618 | * 64k large page size. | |
1619 | * We only want to put this in if the CPUs we're emulating | |
1620 | * support it, but unfortunately we don't have a vcpu easily | |
1621 | * to hand here to test. Just pick the first vcpu, and if | |
1622 | * that doesn't exist yet, report the minimum capability, | |
1623 | * i.e., no 64k pages. | |
1624 | * 1T segment support goes along with 64k pages. | |
1625 | */ | |
1626 | i = 1; | |
1627 | vcpu = kvm_get_vcpu(kvm, 0); | |
1628 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | |
1629 | info->flags = KVM_PPC_1T_SEGMENTS; | |
1630 | info->sps[i].page_shift = 16; | |
1631 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | |
1632 | info->sps[i].enc[0].page_shift = 16; | |
1633 | info->sps[i].enc[0].pte_enc = 1; | |
1634 | ++i; | |
1635 | } | |
1636 | ||
5b74716e | 1637 | /* Standard 16M large page size segment */ |
a4a0f252 PM |
1638 | info->sps[i].page_shift = 24; |
1639 | info->sps[i].slb_enc = SLB_VSID_L; | |
1640 | info->sps[i].enc[0].page_shift = 24; | |
1641 | info->sps[i].enc[0].pte_enc = 0; | |
dfe49dbd | 1642 | |
5b74716e BH |
1643 | return 0; |
1644 | } | |
3a167bea AK |
1645 | #else |
1646 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | |
1647 | struct kvm_ppc_smmu_info *info) | |
f9e0554d | 1648 | { |
3a167bea AK |
1649 | /* We should not get called */ |
1650 | BUG(); | |
f9e0554d | 1651 | } |
3a167bea | 1652 | #endif /* CONFIG_PPC64 */ |
f9e0554d | 1653 | |
a413f474 IM |
1654 | static unsigned int kvm_global_user_count = 0; |
1655 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | |
1656 | ||
3a167bea | 1657 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
f9e0554d | 1658 | { |
9308ab8e | 1659 | mutex_init(&kvm->arch.hpt_mutex); |
f31e65e1 | 1660 | |
699a0ea0 PM |
1661 | #ifdef CONFIG_PPC_BOOK3S_64 |
1662 | /* Start out with the default set of hcalls enabled */ | |
1663 | kvmppc_pr_init_default_hcalls(kvm); | |
1664 | #endif | |
1665 | ||
a413f474 IM |
1666 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
1667 | spin_lock(&kvm_global_user_count_lock); | |
1668 | if (++kvm_global_user_count == 1) | |
1669 | pSeries_disable_reloc_on_exc(); | |
1670 | spin_unlock(&kvm_global_user_count_lock); | |
1671 | } | |
f9e0554d PM |
1672 | return 0; |
1673 | } | |
1674 | ||
3a167bea | 1675 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
f9e0554d | 1676 | { |
f31e65e1 BH |
1677 | #ifdef CONFIG_PPC64 |
1678 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | |
1679 | #endif | |
a413f474 IM |
1680 | |
1681 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
1682 | spin_lock(&kvm_global_user_count_lock); | |
1683 | BUG_ON(kvm_global_user_count == 0); | |
1684 | if (--kvm_global_user_count == 0) | |
1685 | pSeries_enable_reloc_on_exc(); | |
1686 | spin_unlock(&kvm_global_user_count_lock); | |
1687 | } | |
f9e0554d PM |
1688 | } |
1689 | ||
3a167bea | 1690 | static int kvmppc_core_check_processor_compat_pr(void) |
f05ed4d5 | 1691 | { |
3a167bea AK |
1692 | /* we are always compatible */ |
1693 | return 0; | |
1694 | } | |
f05ed4d5 | 1695 | |
3a167bea AK |
1696 | static long kvm_arch_vm_ioctl_pr(struct file *filp, |
1697 | unsigned int ioctl, unsigned long arg) | |
1698 | { | |
1699 | return -ENOTTY; | |
1700 | } | |
f05ed4d5 | 1701 | |
cbbc58d4 | 1702 | static struct kvmppc_ops kvm_ops_pr = { |
3a167bea AK |
1703 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, |
1704 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | |
1705 | .get_one_reg = kvmppc_get_one_reg_pr, | |
1706 | .set_one_reg = kvmppc_set_one_reg_pr, | |
1707 | .vcpu_load = kvmppc_core_vcpu_load_pr, | |
1708 | .vcpu_put = kvmppc_core_vcpu_put_pr, | |
1709 | .set_msr = kvmppc_set_msr_pr, | |
1710 | .vcpu_run = kvmppc_vcpu_run_pr, | |
1711 | .vcpu_create = kvmppc_core_vcpu_create_pr, | |
1712 | .vcpu_free = kvmppc_core_vcpu_free_pr, | |
1713 | .check_requests = kvmppc_core_check_requests_pr, | |
1714 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, | |
1715 | .flush_memslot = kvmppc_core_flush_memslot_pr, | |
1716 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, | |
1717 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, | |
1718 | .unmap_hva = kvm_unmap_hva_pr, | |
1719 | .unmap_hva_range = kvm_unmap_hva_range_pr, | |
1720 | .age_hva = kvm_age_hva_pr, | |
1721 | .test_age_hva = kvm_test_age_hva_pr, | |
1722 | .set_spte_hva = kvm_set_spte_hva_pr, | |
1723 | .mmu_destroy = kvmppc_mmu_destroy_pr, | |
1724 | .free_memslot = kvmppc_core_free_memslot_pr, | |
1725 | .create_memslot = kvmppc_core_create_memslot_pr, | |
1726 | .init_vm = kvmppc_core_init_vm_pr, | |
1727 | .destroy_vm = kvmppc_core_destroy_vm_pr, | |
3a167bea AK |
1728 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
1729 | .emulate_op = kvmppc_core_emulate_op_pr, | |
1730 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | |
1731 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | |
1732 | .fast_vcpu_kick = kvm_vcpu_kick, | |
1733 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | |
ae2113a4 PM |
1734 | #ifdef CONFIG_PPC_BOOK3S_64 |
1735 | .hcall_implemented = kvmppc_hcall_impl_pr, | |
1736 | #endif | |
3a167bea AK |
1737 | }; |
1738 | ||
cbbc58d4 AK |
1739 | |
1740 | int kvmppc_book3s_init_pr(void) | |
f05ed4d5 PM |
1741 | { |
1742 | int r; | |
1743 | ||
cbbc58d4 AK |
1744 | r = kvmppc_core_check_processor_compat_pr(); |
1745 | if (r < 0) | |
f05ed4d5 PM |
1746 | return r; |
1747 | ||
cbbc58d4 AK |
1748 | kvm_ops_pr.owner = THIS_MODULE; |
1749 | kvmppc_pr_ops = &kvm_ops_pr; | |
f05ed4d5 | 1750 | |
cbbc58d4 | 1751 | r = kvmppc_mmu_hpte_sysinit(); |
f05ed4d5 PM |
1752 | return r; |
1753 | } | |
1754 | ||
cbbc58d4 | 1755 | void kvmppc_book3s_exit_pr(void) |
f05ed4d5 | 1756 | { |
cbbc58d4 | 1757 | kvmppc_pr_ops = NULL; |
f05ed4d5 | 1758 | kvmppc_mmu_hpte_sysexit(); |
f05ed4d5 PM |
1759 | } |
1760 | ||
cbbc58d4 AK |
1761 | /* |
1762 | * We only support separate modules for book3s 64 | |
1763 | */ | |
1764 | #ifdef CONFIG_PPC_BOOK3S_64 | |
1765 | ||
3a167bea AK |
1766 | module_init(kvmppc_book3s_init_pr); |
1767 | module_exit(kvmppc_book3s_exit_pr); | |
2ba9f0d8 AK |
1768 | |
1769 | MODULE_LICENSE("GPL"); | |
398a76c6 AG |
1770 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
1771 | MODULE_ALIAS("devname:kvm"); | |
cbbc58d4 | 1772 | #endif |