Commit | Line | Data |
---|---|---|
f05ed4d5 PM |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * Paul Mackerras <paulus@samba.org> | |
8 | * | |
9 | * Description: | |
10 | * Functions relating to running KVM on Book 3S processors where | |
11 | * we don't have access to hypervisor mode, and we run the guest | |
12 | * in problem state (user mode). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/44x.c, | |
15 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or modify | |
18 | * it under the terms of the GNU General Public License, version 2, as | |
19 | * published by the Free Software Foundation. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
93087948 | 23 | #include <linux/export.h> |
f05ed4d5 PM |
24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | |
26 | ||
27 | #include <asm/reg.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/kvm_ppc.h> | |
34 | #include <asm/kvm_book3s.h> | |
35 | #include <asm/mmu_context.h> | |
95327d08 | 36 | #include <asm/switch_to.h> |
f05ed4d5 PM |
37 | #include <linux/gfp.h> |
38 | #include <linux/sched.h> | |
39 | #include <linux/vmalloc.h> | |
40 | #include <linux/highmem.h> | |
41 | ||
42 | #include "trace.h" | |
43 | ||
44 | /* #define EXIT_DEBUG */ | |
45 | /* #define DEBUG_EXT */ | |
46 | ||
47 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
48 | ulong msr); | |
49 | ||
50 | /* Some compatibility defines */ | |
51 | #ifdef CONFIG_PPC_BOOK3S_32 | |
52 | #define MSR_USER32 MSR_USER | |
53 | #define MSR_USER64 MSR_USER | |
54 | #define HW_PAGE_SIZE PAGE_SIZE | |
e371f713 AG |
55 | #define __hard_irq_disable local_irq_disable |
56 | #define __hard_irq_enable local_irq_enable | |
f05ed4d5 PM |
57 | #endif |
58 | ||
59 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
60 | { | |
61 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
62 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
63 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
f05ed4d5 PM |
64 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, |
65 | sizeof(get_paca()->shadow_vcpu)); | |
468a12c2 AG |
66 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
67 | svcpu_put(svcpu); | |
f05ed4d5 PM |
68 | #endif |
69 | ||
70 | #ifdef CONFIG_PPC_BOOK3S_32 | |
71 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | |
72 | #endif | |
73 | } | |
74 | ||
75 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
76 | { | |
77 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
78 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
79 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | |
f05ed4d5 PM |
80 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, |
81 | sizeof(get_paca()->shadow_vcpu)); | |
468a12c2 AG |
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | |
f05ed4d5 PM |
84 | #endif |
85 | ||
86 | kvmppc_giveup_ext(vcpu, MSR_FP); | |
87 | kvmppc_giveup_ext(vcpu, MSR_VEC); | |
88 | kvmppc_giveup_ext(vcpu, MSR_VSX); | |
89 | } | |
90 | ||
91 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | |
92 | { | |
93 | ulong smsr = vcpu->arch.shared->msr; | |
94 | ||
95 | /* Guest MSR values */ | |
96 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; | |
97 | /* Process MSR values */ | |
98 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
99 | /* External providers the guest reserved */ | |
100 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); | |
101 | /* 64-bit Process MSR values */ | |
102 | #ifdef CONFIG_PPC_BOOK3S_64 | |
103 | smsr |= MSR_ISF | MSR_HV; | |
104 | #endif | |
105 | vcpu->arch.shadow_msr = smsr; | |
106 | } | |
107 | ||
108 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
109 | { | |
110 | ulong old_msr = vcpu->arch.shared->msr; | |
111 | ||
112 | #ifdef EXIT_DEBUG | |
113 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
114 | #endif | |
115 | ||
116 | msr &= to_book3s(vcpu)->msr_mask; | |
117 | vcpu->arch.shared->msr = msr; | |
118 | kvmppc_recalc_shadow_msr(vcpu); | |
119 | ||
120 | if (msr & MSR_POW) { | |
121 | if (!vcpu->arch.pending_exceptions) { | |
122 | kvm_vcpu_block(vcpu); | |
123 | vcpu->stat.halt_wakeup++; | |
124 | ||
125 | /* Unset POW bit after we woke up */ | |
126 | msr &= ~MSR_POW; | |
127 | vcpu->arch.shared->msr = msr; | |
128 | } | |
129 | } | |
130 | ||
131 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != | |
132 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | |
133 | kvmppc_mmu_flush_segments(vcpu); | |
134 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
135 | ||
136 | /* Preload magic page segment when in kernel mode */ | |
137 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
138 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
139 | ||
140 | if (msr & MSR_DR) | |
141 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
142 | else | |
143 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
144 | } | |
145 | } | |
146 | ||
147 | /* Preload FPU if it's enabled */ | |
148 | if (vcpu->arch.shared->msr & MSR_FP) | |
149 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
150 | } | |
151 | ||
152 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
153 | { | |
154 | u32 host_pvr; | |
155 | ||
156 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
157 | vcpu->arch.pvr = pvr; | |
158 | #ifdef CONFIG_PPC_BOOK3S_64 | |
159 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
160 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
161 | if (!to_book3s(vcpu)->hior_explicit) |
162 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 163 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 164 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
165 | } else |
166 | #endif | |
167 | { | |
168 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
169 | if (!to_book3s(vcpu)->hior_explicit) |
170 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 171 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 172 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
173 | } |
174 | ||
af8f38b3 AG |
175 | kvmppc_sanity_check(vcpu); |
176 | ||
f05ed4d5 PM |
177 | /* If we are in hypervisor level on 970, we can tell the CPU to |
178 | * treat DCBZ as 32 bytes store */ | |
179 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
180 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
181 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
182 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
183 | ||
184 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
185 | really needs them in a VM on Cell and force disable them. */ | |
186 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
187 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
188 | ||
189 | #ifdef CONFIG_PPC_BOOK3S_32 | |
190 | /* 32 bit Book3S always has 32 byte dcbz */ | |
191 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
192 | #endif | |
193 | ||
194 | /* On some CPUs we can execute paired single operations natively */ | |
195 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
196 | switch (host_pvr) { | |
197 | case 0x00080200: /* lonestar 2.0 */ | |
198 | case 0x00088202: /* lonestar 2.2 */ | |
199 | case 0x70000100: /* gekko 1.0 */ | |
200 | case 0x00080100: /* gekko 2.0 */ | |
201 | case 0x00083203: /* gekko 2.3a */ | |
202 | case 0x00083213: /* gekko 2.3b */ | |
203 | case 0x00083204: /* gekko 2.4 */ | |
204 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
205 | case 0x00087200: /* broadway */ | |
206 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
207 | /* Enable HID2.PSE - in case we need it later */ | |
208 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
209 | } | |
210 | } | |
211 | ||
212 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
213 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
214 | * emulate 32 bytes dcbz length. | |
215 | * | |
216 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
217 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
218 | * | |
219 | * My approach here is to patch the dcbz instruction on executing pages. | |
220 | */ | |
221 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
222 | { | |
223 | struct page *hpage; | |
224 | u64 hpage_offset; | |
225 | u32 *page; | |
226 | int i; | |
227 | ||
228 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
229 | if (is_error_page(hpage)) { | |
230 | kvm_release_page_clean(hpage); | |
231 | return; | |
232 | } | |
233 | ||
234 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
235 | hpage_offset &= ~0xFFFULL; | |
236 | hpage_offset /= 4; | |
237 | ||
238 | get_page(hpage); | |
2480b208 | 239 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
240 | |
241 | /* patch dcbz into reserved instruction, so we trap */ | |
242 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
243 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | |
244 | page[i] &= 0xfffffff7; | |
245 | ||
2480b208 | 246 | kunmap_atomic(page); |
f05ed4d5 PM |
247 | put_page(hpage); |
248 | } | |
249 | ||
250 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
251 | { | |
252 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
253 | ||
254 | if (unlikely(mp_pa) && | |
255 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | |
256 | return 1; | |
257 | } | |
258 | ||
259 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
260 | } | |
261 | ||
262 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
263 | ulong eaddr, int vec) | |
264 | { | |
265 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
266 | int r = RESUME_GUEST; | |
267 | int relocated; | |
268 | int page_found = 0; | |
269 | struct kvmppc_pte pte; | |
270 | bool is_mmio = false; | |
271 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; | |
272 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; | |
273 | u64 vsid; | |
274 | ||
275 | relocated = data ? dr : ir; | |
276 | ||
277 | /* Resolve real address if translation turned on */ | |
278 | if (relocated) { | |
279 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | |
280 | } else { | |
281 | pte.may_execute = true; | |
282 | pte.may_read = true; | |
283 | pte.may_write = true; | |
284 | pte.raddr = eaddr & KVM_PAM; | |
285 | pte.eaddr = eaddr; | |
286 | pte.vpage = eaddr >> 12; | |
287 | } | |
288 | ||
289 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | |
290 | case 0: | |
291 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
292 | break; | |
293 | case MSR_DR: | |
294 | case MSR_IR: | |
295 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
296 | ||
297 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) | |
298 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | |
299 | else | |
300 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
301 | pte.vpage |= vsid; | |
302 | ||
303 | if (vsid == -1) | |
304 | page_found = -EINVAL; | |
305 | break; | |
306 | } | |
307 | ||
308 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
309 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
310 | /* | |
311 | * If we do the dcbz hack, we have to NX on every execution, | |
312 | * so we can patch the executing code. This renders our guest | |
313 | * NX-less. | |
314 | */ | |
315 | pte.may_execute = !data; | |
316 | } | |
317 | ||
318 | if (page_found == -ENOENT) { | |
319 | /* Page not found in guest PTE entries */ | |
468a12c2 | 320 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
f05ed4d5 | 321 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
468a12c2 | 322 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr; |
f05ed4d5 | 323 | vcpu->arch.shared->msr |= |
468a12c2 AG |
324 | (svcpu->shadow_srr1 & 0x00000000f8000000ULL); |
325 | svcpu_put(svcpu); | |
f05ed4d5 PM |
326 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
327 | } else if (page_found == -EPERM) { | |
328 | /* Storage protection */ | |
468a12c2 | 329 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
f05ed4d5 | 330 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
468a12c2 | 331 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; |
f05ed4d5 PM |
332 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
333 | vcpu->arch.shared->msr |= | |
468a12c2 AG |
334 | svcpu->shadow_srr1 & 0x00000000f8000000ULL; |
335 | svcpu_put(svcpu); | |
f05ed4d5 PM |
336 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
337 | } else if (page_found == -EINVAL) { | |
338 | /* Page not found in guest SLB */ | |
339 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
340 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
341 | } else if (!is_mmio && | |
342 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
343 | /* The guest's PTE is not mapped yet. Map on the host */ | |
344 | kvmppc_mmu_map_page(vcpu, &pte); | |
345 | if (data) | |
346 | vcpu->stat.sp_storage++; | |
347 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
348 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | |
349 | kvmppc_patch_dcbz(vcpu, &pte); | |
350 | } else { | |
351 | /* MMIO */ | |
352 | vcpu->stat.mmio_exits++; | |
353 | vcpu->arch.paddr_accessed = pte.raddr; | |
354 | r = kvmppc_emulate_mmio(run, vcpu); | |
355 | if ( r == RESUME_HOST_NV ) | |
356 | r = RESUME_HOST; | |
357 | } | |
358 | ||
359 | return r; | |
360 | } | |
361 | ||
362 | static inline int get_fpr_index(int i) | |
363 | { | |
364 | #ifdef CONFIG_VSX | |
365 | i *= 2; | |
366 | #endif | |
367 | return i; | |
368 | } | |
369 | ||
370 | /* Give up external provider (FPU, Altivec, VSX) */ | |
371 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
372 | { | |
373 | struct thread_struct *t = ¤t->thread; | |
374 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
375 | #ifdef CONFIG_VSX | |
376 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
377 | #endif | |
378 | u64 *thread_fpr = (u64*)t->fpr; | |
379 | int i; | |
380 | ||
381 | if (!(vcpu->arch.guest_owned_ext & msr)) | |
382 | return; | |
383 | ||
384 | #ifdef DEBUG_EXT | |
385 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
386 | #endif | |
387 | ||
388 | switch (msr) { | |
389 | case MSR_FP: | |
390 | giveup_fpu(current); | |
391 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | |
392 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | |
393 | ||
394 | vcpu->arch.fpscr = t->fpscr.val; | |
395 | break; | |
396 | case MSR_VEC: | |
397 | #ifdef CONFIG_ALTIVEC | |
398 | giveup_altivec(current); | |
399 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | |
400 | vcpu->arch.vscr = t->vscr; | |
401 | #endif | |
402 | break; | |
403 | case MSR_VSX: | |
404 | #ifdef CONFIG_VSX | |
405 | __giveup_vsx(current); | |
406 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | |
407 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | |
408 | #endif | |
409 | break; | |
410 | default: | |
411 | BUG(); | |
412 | } | |
413 | ||
414 | vcpu->arch.guest_owned_ext &= ~msr; | |
415 | current->thread.regs->msr &= ~msr; | |
416 | kvmppc_recalc_shadow_msr(vcpu); | |
417 | } | |
418 | ||
419 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |
420 | { | |
421 | ulong srr0 = kvmppc_get_pc(vcpu); | |
422 | u32 last_inst = kvmppc_get_last_inst(vcpu); | |
423 | int ret; | |
424 | ||
425 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
426 | if (ret == -ENOENT) { | |
427 | ulong msr = vcpu->arch.shared->msr; | |
428 | ||
429 | msr = kvmppc_set_field(msr, 33, 33, 1); | |
430 | msr = kvmppc_set_field(msr, 34, 36, 0); | |
431 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | |
432 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | |
433 | return EMULATE_AGAIN; | |
434 | } | |
435 | ||
436 | return EMULATE_DONE; | |
437 | } | |
438 | ||
439 | static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) | |
440 | { | |
441 | ||
442 | /* Need to do paired single emulation? */ | |
443 | if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) | |
444 | return EMULATE_DONE; | |
445 | ||
446 | /* Read out the instruction */ | |
447 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) | |
448 | /* Need to emulate */ | |
449 | return EMULATE_FAIL; | |
450 | ||
451 | return EMULATE_AGAIN; | |
452 | } | |
453 | ||
454 | /* Handle external providers (FPU, Altivec, VSX) */ | |
455 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
456 | ulong msr) | |
457 | { | |
458 | struct thread_struct *t = ¤t->thread; | |
459 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
460 | #ifdef CONFIG_VSX | |
461 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
462 | #endif | |
463 | u64 *thread_fpr = (u64*)t->fpr; | |
464 | int i; | |
465 | ||
466 | /* When we have paired singles, we emulate in software */ | |
467 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
468 | return RESUME_GUEST; | |
469 | ||
470 | if (!(vcpu->arch.shared->msr & msr)) { | |
471 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
472 | return RESUME_GUEST; | |
473 | } | |
474 | ||
475 | /* We already own the ext */ | |
476 | if (vcpu->arch.guest_owned_ext & msr) { | |
477 | return RESUME_GUEST; | |
478 | } | |
479 | ||
480 | #ifdef DEBUG_EXT | |
481 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
482 | #endif | |
483 | ||
484 | current->thread.regs->msr |= msr; | |
485 | ||
486 | switch (msr) { | |
487 | case MSR_FP: | |
488 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | |
489 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | |
490 | ||
491 | t->fpscr.val = vcpu->arch.fpscr; | |
492 | t->fpexc_mode = 0; | |
493 | kvmppc_load_up_fpu(); | |
494 | break; | |
495 | case MSR_VEC: | |
496 | #ifdef CONFIG_ALTIVEC | |
497 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | |
498 | t->vscr = vcpu->arch.vscr; | |
499 | t->vrsave = -1; | |
500 | kvmppc_load_up_altivec(); | |
501 | #endif | |
502 | break; | |
503 | case MSR_VSX: | |
504 | #ifdef CONFIG_VSX | |
505 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | |
506 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | |
507 | kvmppc_load_up_vsx(); | |
508 | #endif | |
509 | break; | |
510 | default: | |
511 | BUG(); | |
512 | } | |
513 | ||
514 | vcpu->arch.guest_owned_ext |= msr; | |
515 | ||
516 | kvmppc_recalc_shadow_msr(vcpu); | |
517 | ||
518 | return RESUME_GUEST; | |
519 | } | |
520 | ||
521 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
522 | unsigned int exit_nr) | |
523 | { | |
524 | int r = RESUME_HOST; | |
525 | ||
526 | vcpu->stat.sum_exits++; | |
527 | ||
528 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
529 | run->ready_for_interrupt_injection = 1; | |
530 | ||
531 | trace_kvm_book3s_exit(exit_nr, vcpu); | |
7d82714d | 532 | preempt_enable(); |
f05ed4d5 PM |
533 | kvm_resched(vcpu); |
534 | switch (exit_nr) { | |
535 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 AG |
536 | { |
537 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | |
538 | ulong shadow_srr1 = svcpu->shadow_srr1; | |
f05ed4d5 PM |
539 | vcpu->stat.pf_instruc++; |
540 | ||
541 | #ifdef CONFIG_PPC_BOOK3S_32 | |
542 | /* We set segments as unused segments when invalidating them. So | |
543 | * treat the respective fault as segment fault. */ | |
468a12c2 | 544 | if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { |
f05ed4d5 PM |
545 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
546 | r = RESUME_GUEST; | |
468a12c2 | 547 | svcpu_put(svcpu); |
f05ed4d5 PM |
548 | break; |
549 | } | |
550 | #endif | |
468a12c2 | 551 | svcpu_put(svcpu); |
f05ed4d5 PM |
552 | |
553 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 554 | if (shadow_srr1 & 0x40000000) { |
f05ed4d5 PM |
555 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
556 | vcpu->stat.sp_instruc++; | |
557 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
558 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
559 | /* | |
560 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
561 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
562 | * that no guest that needs the dcbz hack does NX. | |
563 | */ | |
564 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
565 | r = RESUME_GUEST; | |
566 | } else { | |
468a12c2 | 567 | vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; |
f05ed4d5 PM |
568 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
569 | r = RESUME_GUEST; | |
570 | } | |
571 | break; | |
468a12c2 | 572 | } |
f05ed4d5 PM |
573 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
574 | { | |
575 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
468a12c2 AG |
576 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
577 | u32 fault_dsisr = svcpu->fault_dsisr; | |
f05ed4d5 PM |
578 | vcpu->stat.pf_storage++; |
579 | ||
580 | #ifdef CONFIG_PPC_BOOK3S_32 | |
581 | /* We set segments as unused segments when invalidating them. So | |
582 | * treat the respective fault as segment fault. */ | |
468a12c2 | 583 | if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { |
f05ed4d5 PM |
584 | kvmppc_mmu_map_segment(vcpu, dar); |
585 | r = RESUME_GUEST; | |
468a12c2 | 586 | svcpu_put(svcpu); |
f05ed4d5 PM |
587 | break; |
588 | } | |
589 | #endif | |
468a12c2 | 590 | svcpu_put(svcpu); |
f05ed4d5 PM |
591 | |
592 | /* The only case we need to handle is missing shadow PTEs */ | |
468a12c2 | 593 | if (fault_dsisr & DSISR_NOHPTE) { |
f05ed4d5 PM |
594 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
595 | } else { | |
596 | vcpu->arch.shared->dar = dar; | |
468a12c2 | 597 | vcpu->arch.shared->dsisr = fault_dsisr; |
f05ed4d5 PM |
598 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
599 | r = RESUME_GUEST; | |
600 | } | |
601 | break; | |
602 | } | |
603 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
604 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
605 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
606 | kvmppc_book3s_queue_irqprio(vcpu, | |
607 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
608 | } | |
609 | r = RESUME_GUEST; | |
610 | break; | |
611 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
612 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
613 | kvmppc_book3s_queue_irqprio(vcpu, | |
614 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
615 | } | |
616 | r = RESUME_GUEST; | |
617 | break; | |
618 | /* We're good on these - the host merely wanted to get our attention */ | |
619 | case BOOK3S_INTERRUPT_DECREMENTER: | |
620 | vcpu->stat.dec_exits++; | |
621 | r = RESUME_GUEST; | |
622 | break; | |
623 | case BOOK3S_INTERRUPT_EXTERNAL: | |
624 | vcpu->stat.ext_intr_exits++; | |
625 | r = RESUME_GUEST; | |
626 | break; | |
627 | case BOOK3S_INTERRUPT_PERFMON: | |
628 | r = RESUME_GUEST; | |
629 | break; | |
630 | case BOOK3S_INTERRUPT_PROGRAM: | |
631 | { | |
632 | enum emulation_result er; | |
468a12c2 | 633 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
f05ed4d5 PM |
634 | ulong flags; |
635 | ||
636 | program_interrupt: | |
468a12c2 AG |
637 | svcpu = svcpu_get(vcpu); |
638 | flags = svcpu->shadow_srr1 & 0x1f0000ull; | |
639 | svcpu_put(svcpu); | |
f05ed4d5 PM |
640 | |
641 | if (vcpu->arch.shared->msr & MSR_PR) { | |
642 | #ifdef EXIT_DEBUG | |
643 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
644 | #endif | |
645 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != | |
646 | (INS_DCBZ & 0xfffffff7)) { | |
647 | kvmppc_core_queue_program(vcpu, flags); | |
648 | r = RESUME_GUEST; | |
649 | break; | |
650 | } | |
651 | } | |
652 | ||
653 | vcpu->stat.emulated_inst_exits++; | |
654 | er = kvmppc_emulate_instruction(run, vcpu); | |
655 | switch (er) { | |
656 | case EMULATE_DONE: | |
657 | r = RESUME_GUEST_NV; | |
658 | break; | |
659 | case EMULATE_AGAIN: | |
660 | r = RESUME_GUEST; | |
661 | break; | |
662 | case EMULATE_FAIL: | |
663 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
664 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
665 | kvmppc_core_queue_program(vcpu, flags); | |
666 | r = RESUME_GUEST; | |
667 | break; | |
668 | case EMULATE_DO_MMIO: | |
669 | run->exit_reason = KVM_EXIT_MMIO; | |
670 | r = RESUME_HOST_NV; | |
671 | break; | |
672 | default: | |
673 | BUG(); | |
674 | } | |
675 | break; | |
676 | } | |
677 | case BOOK3S_INTERRUPT_SYSCALL: | |
a668f2bd AG |
678 | if (vcpu->arch.papr_enabled && |
679 | (kvmppc_get_last_inst(vcpu) == 0x44000022) && | |
680 | !(vcpu->arch.shared->msr & MSR_PR)) { | |
681 | /* SC 1 papr hypercalls */ | |
682 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
683 | int i; | |
684 | ||
96f38d72 | 685 | #ifdef CONFIG_KVM_BOOK3S_64_PR |
a668f2bd AG |
686 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
687 | r = RESUME_GUEST; | |
688 | break; | |
689 | } | |
96f38d72 | 690 | #endif |
a668f2bd AG |
691 | |
692 | run->papr_hcall.nr = cmd; | |
693 | for (i = 0; i < 9; ++i) { | |
694 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
695 | run->papr_hcall.args[i] = gpr; | |
696 | } | |
697 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
698 | vcpu->arch.hcall_needed = 1; | |
699 | r = RESUME_HOST; | |
700 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
701 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
702 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
703 | /* MOL hypercalls */ | |
704 | u64 *gprs = run->osi.gprs; | |
705 | int i; | |
706 | ||
707 | run->exit_reason = KVM_EXIT_OSI; | |
708 | for (i = 0; i < 32; i++) | |
709 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
710 | vcpu->arch.osi_needed = 1; | |
711 | r = RESUME_HOST_NV; | |
712 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | |
713 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
714 | /* KVM PV hypercalls */ | |
715 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
716 | r = RESUME_GUEST; | |
717 | } else { | |
718 | /* Guest syscalls */ | |
719 | vcpu->stat.syscall_exits++; | |
720 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
721 | r = RESUME_GUEST; | |
722 | } | |
723 | break; | |
724 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
725 | case BOOK3S_INTERRUPT_ALTIVEC: | |
726 | case BOOK3S_INTERRUPT_VSX: | |
727 | { | |
728 | int ext_msr = 0; | |
729 | ||
730 | switch (exit_nr) { | |
731 | case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; | |
732 | case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; | |
733 | case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; | |
734 | } | |
735 | ||
736 | switch (kvmppc_check_ext(vcpu, exit_nr)) { | |
737 | case EMULATE_DONE: | |
738 | /* everything ok - let's enable the ext */ | |
739 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
740 | break; | |
741 | case EMULATE_FAIL: | |
742 | /* we need to emulate this instruction */ | |
743 | goto program_interrupt; | |
744 | break; | |
745 | default: | |
746 | /* nothing to worry about - go again */ | |
747 | break; | |
748 | } | |
749 | break; | |
750 | } | |
751 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
752 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | |
753 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, | |
754 | kvmppc_get_last_inst(vcpu)); | |
755 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, | |
756 | kvmppc_get_last_inst(vcpu)); | |
757 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
758 | } | |
759 | r = RESUME_GUEST; | |
760 | break; | |
761 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
762 | case BOOK3S_INTERRUPT_TRACE: | |
763 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
764 | r = RESUME_GUEST; | |
765 | break; | |
766 | default: | |
468a12c2 AG |
767 | { |
768 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | |
769 | ulong shadow_srr1 = svcpu->shadow_srr1; | |
770 | svcpu_put(svcpu); | |
f05ed4d5 PM |
771 | /* Ugh - bork here! What did we get? */ |
772 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 773 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
774 | r = RESUME_HOST; |
775 | BUG(); | |
776 | break; | |
777 | } | |
468a12c2 | 778 | } |
f05ed4d5 PM |
779 | |
780 | if (!(r & RESUME_HOST)) { | |
781 | /* To avoid clobbering exit_reason, only check for signals if | |
782 | * we aren't already exiting to userspace for some other | |
783 | * reason. */ | |
e371f713 AG |
784 | |
785 | /* | |
786 | * Interrupts could be timers for the guest which we have to | |
787 | * inject again, so let's postpone them until we're in the guest | |
788 | * and if we really did time things so badly, then we just exit | |
789 | * again due to a host external interrupt. | |
790 | */ | |
791 | __hard_irq_disable(); | |
f05ed4d5 | 792 | if (signal_pending(current)) { |
e371f713 | 793 | __hard_irq_enable(); |
f05ed4d5 PM |
794 | #ifdef EXIT_DEBUG |
795 | printk(KERN_EMERG "KVM: Going back to host\n"); | |
796 | #endif | |
797 | vcpu->stat.signal_exits++; | |
798 | run->exit_reason = KVM_EXIT_INTR; | |
799 | r = -EINTR; | |
800 | } else { | |
7d82714d AG |
801 | preempt_disable(); |
802 | ||
f05ed4d5 PM |
803 | /* In case an interrupt came in that was triggered |
804 | * from userspace (like DEC), we need to check what | |
805 | * to inject now! */ | |
7e28e60e | 806 | kvmppc_core_prepare_to_enter(vcpu); |
f05ed4d5 PM |
807 | } |
808 | } | |
809 | ||
810 | trace_kvm_book3s_reenter(r, vcpu); | |
811 | ||
812 | return r; | |
813 | } | |
814 | ||
815 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
816 | struct kvm_sregs *sregs) | |
817 | { | |
818 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
819 | int i; | |
820 | ||
821 | sregs->pvr = vcpu->arch.pvr; | |
822 | ||
823 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
824 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
825 | for (i = 0; i < 64; i++) { | |
826 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
827 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
828 | } | |
829 | } else { | |
830 | for (i = 0; i < 16; i++) | |
831 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; | |
832 | ||
833 | for (i = 0; i < 8; i++) { | |
834 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
835 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
836 | } | |
837 | } | |
838 | ||
839 | return 0; | |
840 | } | |
841 | ||
842 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
843 | struct kvm_sregs *sregs) | |
844 | { | |
845 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
846 | int i; | |
847 | ||
848 | kvmppc_set_pvr(vcpu, sregs->pvr); | |
849 | ||
850 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
851 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
852 | for (i = 0; i < 64; i++) { | |
853 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
854 | sregs->u.s.ppc64.slb[i].slbe); | |
855 | } | |
856 | } else { | |
857 | for (i = 0; i < 16; i++) { | |
858 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
859 | } | |
860 | for (i = 0; i < 8; i++) { | |
861 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
862 | (u32)sregs->u.s.ppc32.ibat[i]); | |
863 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
864 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
865 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
866 | (u32)sregs->u.s.ppc32.dbat[i]); | |
867 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
868 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
869 | } | |
870 | } | |
871 | ||
872 | /* Flush the MMU after messing with the segments */ | |
873 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
874 | ||
875 | return 0; | |
876 | } | |
877 | ||
31f3438e PM |
878 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
879 | { | |
880 | int r = -EINVAL; | |
881 | ||
882 | switch (reg->id) { | |
883 | case KVM_REG_PPC_HIOR: | |
b8e6f8ae AG |
884 | r = copy_to_user((u64 __user *)(long)reg->addr, |
885 | &to_book3s(vcpu)->hior, sizeof(u64)); | |
31f3438e PM |
886 | break; |
887 | default: | |
888 | break; | |
889 | } | |
890 | ||
891 | return r; | |
892 | } | |
893 | ||
894 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
895 | { | |
896 | int r = -EINVAL; | |
897 | ||
898 | switch (reg->id) { | |
899 | case KVM_REG_PPC_HIOR: | |
b8e6f8ae AG |
900 | r = copy_from_user(&to_book3s(vcpu)->hior, |
901 | (u64 __user *)(long)reg->addr, sizeof(u64)); | |
31f3438e PM |
902 | if (!r) |
903 | to_book3s(vcpu)->hior_explicit = true; | |
904 | break; | |
905 | default: | |
906 | break; | |
907 | } | |
908 | ||
909 | return r; | |
910 | } | |
911 | ||
f05ed4d5 PM |
912 | int kvmppc_core_check_processor_compat(void) |
913 | { | |
914 | return 0; | |
915 | } | |
916 | ||
917 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
918 | { | |
919 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
920 | struct kvm_vcpu *vcpu; | |
921 | int err = -ENOMEM; | |
922 | unsigned long p; | |
923 | ||
924 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); | |
925 | if (!vcpu_book3s) | |
926 | goto out; | |
927 | ||
928 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) | |
929 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | |
930 | if (!vcpu_book3s->shadow_vcpu) | |
931 | goto free_vcpu; | |
932 | ||
933 | vcpu = &vcpu_book3s->vcpu; | |
934 | err = kvm_vcpu_init(vcpu, kvm, id); | |
935 | if (err) | |
936 | goto free_shadow_vcpu; | |
937 | ||
938 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | |
939 | /* the real shared page fills the last 4k of our page */ | |
940 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | |
941 | if (!p) | |
942 | goto uninit_vcpu; | |
943 | ||
f05ed4d5 PM |
944 | #ifdef CONFIG_PPC_BOOK3S_64 |
945 | /* default to book3s_64 (970fx) */ | |
946 | vcpu->arch.pvr = 0x3C0301; | |
947 | #else | |
948 | /* default to book3s_32 (750) */ | |
949 | vcpu->arch.pvr = 0x84202; | |
950 | #endif | |
951 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
952 | vcpu->arch.slb_nr = 64; | |
953 | ||
f05ed4d5 PM |
954 | vcpu->arch.shadow_msr = MSR_USER64; |
955 | ||
956 | err = kvmppc_mmu_init(vcpu); | |
957 | if (err < 0) | |
958 | goto uninit_vcpu; | |
959 | ||
960 | return vcpu; | |
961 | ||
962 | uninit_vcpu: | |
963 | kvm_vcpu_uninit(vcpu); | |
964 | free_shadow_vcpu: | |
965 | kfree(vcpu_book3s->shadow_vcpu); | |
966 | free_vcpu: | |
967 | vfree(vcpu_book3s); | |
968 | out: | |
969 | return ERR_PTR(err); | |
970 | } | |
971 | ||
972 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
973 | { | |
974 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
975 | ||
976 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
977 | kvm_vcpu_uninit(vcpu); | |
978 | kfree(vcpu_book3s->shadow_vcpu); | |
979 | vfree(vcpu_book3s); | |
980 | } | |
981 | ||
df6909e5 | 982 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
983 | { |
984 | int ret; | |
985 | double fpr[32][TS_FPRWIDTH]; | |
986 | unsigned int fpscr; | |
987 | int fpexc_mode; | |
988 | #ifdef CONFIG_ALTIVEC | |
989 | vector128 vr[32]; | |
990 | vector128 vscr; | |
991 | unsigned long uninitialized_var(vrsave); | |
992 | int used_vr; | |
993 | #endif | |
994 | #ifdef CONFIG_VSX | |
995 | int used_vsr; | |
996 | #endif | |
997 | ulong ext_msr; | |
998 | ||
7d82714d AG |
999 | preempt_disable(); |
1000 | ||
af8f38b3 AG |
1001 | /* Check if we can run the vcpu at all */ |
1002 | if (!vcpu->arch.sane) { | |
1003 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
7d82714d AG |
1004 | ret = -EINVAL; |
1005 | goto out; | |
af8f38b3 AG |
1006 | } |
1007 | ||
25051b5a SW |
1008 | kvmppc_core_prepare_to_enter(vcpu); |
1009 | ||
e371f713 AG |
1010 | /* |
1011 | * Interrupts could be timers for the guest which we have to inject | |
1012 | * again, so let's postpone them until we're in the guest and if we | |
1013 | * really did time things so badly, then we just exit again due to | |
1014 | * a host external interrupt. | |
1015 | */ | |
1016 | __hard_irq_disable(); | |
1017 | ||
f05ed4d5 PM |
1018 | /* No need to go into the guest when all we do is going out */ |
1019 | if (signal_pending(current)) { | |
e371f713 | 1020 | __hard_irq_enable(); |
f05ed4d5 | 1021 | kvm_run->exit_reason = KVM_EXIT_INTR; |
7d82714d AG |
1022 | ret = -EINTR; |
1023 | goto out; | |
f05ed4d5 PM |
1024 | } |
1025 | ||
1026 | /* Save FPU state in stack */ | |
1027 | if (current->thread.regs->msr & MSR_FP) | |
1028 | giveup_fpu(current); | |
1029 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | |
1030 | fpscr = current->thread.fpscr.val; | |
1031 | fpexc_mode = current->thread.fpexc_mode; | |
1032 | ||
1033 | #ifdef CONFIG_ALTIVEC | |
1034 | /* Save Altivec state in stack */ | |
1035 | used_vr = current->thread.used_vr; | |
1036 | if (used_vr) { | |
1037 | if (current->thread.regs->msr & MSR_VEC) | |
1038 | giveup_altivec(current); | |
1039 | memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); | |
1040 | vscr = current->thread.vscr; | |
1041 | vrsave = current->thread.vrsave; | |
1042 | } | |
1043 | #endif | |
1044 | ||
1045 | #ifdef CONFIG_VSX | |
1046 | /* Save VSX state in stack */ | |
1047 | used_vsr = current->thread.used_vsr; | |
1048 | if (used_vsr && (current->thread.regs->msr & MSR_VSX)) | |
1049 | __giveup_vsx(current); | |
1050 | #endif | |
1051 | ||
1052 | /* Remember the MSR with disabled extensions */ | |
1053 | ext_msr = current->thread.regs->msr; | |
1054 | ||
f05ed4d5 PM |
1055 | /* Preload FPU if it's enabled */ |
1056 | if (vcpu->arch.shared->msr & MSR_FP) | |
1057 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
1058 | ||
df6909e5 PM |
1059 | kvm_guest_enter(); |
1060 | ||
1061 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
1062 | ||
1063 | kvm_guest_exit(); | |
f05ed4d5 | 1064 | |
f05ed4d5 PM |
1065 | current->thread.regs->msr = ext_msr; |
1066 | ||
1067 | /* Make sure we save the guest FPU/Altivec/VSX state */ | |
1068 | kvmppc_giveup_ext(vcpu, MSR_FP); | |
1069 | kvmppc_giveup_ext(vcpu, MSR_VEC); | |
1070 | kvmppc_giveup_ext(vcpu, MSR_VSX); | |
1071 | ||
1072 | /* Restore FPU state from stack */ | |
1073 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | |
1074 | current->thread.fpscr.val = fpscr; | |
1075 | current->thread.fpexc_mode = fpexc_mode; | |
1076 | ||
1077 | #ifdef CONFIG_ALTIVEC | |
1078 | /* Restore Altivec state from stack */ | |
1079 | if (used_vr && current->thread.used_vr) { | |
1080 | memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); | |
1081 | current->thread.vscr = vscr; | |
1082 | current->thread.vrsave = vrsave; | |
1083 | } | |
1084 | current->thread.used_vr = used_vr; | |
1085 | #endif | |
1086 | ||
1087 | #ifdef CONFIG_VSX | |
1088 | current->thread.used_vsr = used_vsr; | |
1089 | #endif | |
1090 | ||
7d82714d AG |
1091 | out: |
1092 | preempt_enable(); | |
f05ed4d5 PM |
1093 | return ret; |
1094 | } | |
1095 | ||
82ed3616 PM |
1096 | /* |
1097 | * Get (and clear) the dirty memory log for a memory slot. | |
1098 | */ | |
1099 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
1100 | struct kvm_dirty_log *log) | |
1101 | { | |
1102 | struct kvm_memory_slot *memslot; | |
1103 | struct kvm_vcpu *vcpu; | |
1104 | ulong ga, ga_end; | |
1105 | int is_dirty = 0; | |
1106 | int r; | |
1107 | unsigned long n; | |
1108 | ||
1109 | mutex_lock(&kvm->slots_lock); | |
1110 | ||
1111 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1112 | if (r) | |
1113 | goto out; | |
1114 | ||
1115 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1116 | if (is_dirty) { | |
1117 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1118 | ||
1119 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1120 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1121 | ||
1122 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1123 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1124 | ||
1125 | n = kvm_dirty_bitmap_bytes(memslot); | |
1126 | memset(memslot->dirty_bitmap, 0, n); | |
1127 | } | |
1128 | ||
1129 | r = 0; | |
1130 | out: | |
1131 | mutex_unlock(&kvm->slots_lock); | |
1132 | return r; | |
1133 | } | |
1134 | ||
f9e0554d PM |
1135 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
1136 | struct kvm_userspace_memory_region *mem) | |
1137 | { | |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
1142 | struct kvm_userspace_memory_region *mem) | |
1143 | { | |
1144 | } | |
1145 | ||
1146 | int kvmppc_core_init_vm(struct kvm *kvm) | |
1147 | { | |
1148 | return 0; | |
1149 | } | |
1150 | ||
1151 | void kvmppc_core_destroy_vm(struct kvm *kvm) | |
1152 | { | |
1153 | } | |
1154 | ||
f05ed4d5 PM |
1155 | static int kvmppc_book3s_init(void) |
1156 | { | |
1157 | int r; | |
1158 | ||
1159 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | |
1160 | THIS_MODULE); | |
1161 | ||
1162 | if (r) | |
1163 | return r; | |
1164 | ||
1165 | r = kvmppc_mmu_hpte_sysinit(); | |
1166 | ||
1167 | return r; | |
1168 | } | |
1169 | ||
1170 | static void kvmppc_book3s_exit(void) | |
1171 | { | |
1172 | kvmppc_mmu_hpte_sysexit(); | |
1173 | kvm_exit(); | |
1174 | } | |
1175 | ||
1176 | module_init(kvmppc_book3s_init); | |
1177 | module_exit(kvmppc_book3s_exit); |