Commit | Line | Data |
---|---|---|
f05ed4d5 PM |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * Paul Mackerras <paulus@samba.org> | |
8 | * | |
9 | * Description: | |
10 | * Functions relating to running KVM on Book 3S processors where | |
11 | * we don't have access to hypervisor mode, and we run the guest | |
12 | * in problem state (user mode). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/44x.c, | |
15 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or modify | |
18 | * it under the terms of the GNU General Public License, version 2, as | |
19 | * published by the Free Software Foundation. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
93087948 | 23 | #include <linux/export.h> |
f05ed4d5 PM |
24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | |
26 | ||
27 | #include <asm/reg.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/kvm_ppc.h> | |
34 | #include <asm/kvm_book3s.h> | |
35 | #include <asm/mmu_context.h> | |
95327d08 | 36 | #include <asm/switch_to.h> |
f05ed4d5 PM |
37 | #include <linux/gfp.h> |
38 | #include <linux/sched.h> | |
39 | #include <linux/vmalloc.h> | |
40 | #include <linux/highmem.h> | |
41 | ||
42 | #include "trace.h" | |
43 | ||
44 | /* #define EXIT_DEBUG */ | |
45 | /* #define DEBUG_EXT */ | |
46 | ||
47 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
48 | ulong msr); | |
49 | ||
50 | /* Some compatibility defines */ | |
51 | #ifdef CONFIG_PPC_BOOK3S_32 | |
52 | #define MSR_USER32 MSR_USER | |
53 | #define MSR_USER64 MSR_USER | |
54 | #define HW_PAGE_SIZE PAGE_SIZE | |
55 | #endif | |
56 | ||
57 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
58 | { | |
59 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
60 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
61 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
f05ed4d5 PM |
62 | memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, |
63 | sizeof(get_paca()->shadow_vcpu)); | |
468a12c2 AG |
64 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
65 | svcpu_put(svcpu); | |
f05ed4d5 PM |
66 | #endif |
67 | ||
68 | #ifdef CONFIG_PPC_BOOK3S_32 | |
69 | current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; | |
70 | #endif | |
71 | } | |
72 | ||
73 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
74 | { | |
75 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
76 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
77 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | |
f05ed4d5 PM |
78 | memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, |
79 | sizeof(get_paca()->shadow_vcpu)); | |
468a12c2 AG |
80 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
81 | svcpu_put(svcpu); | |
f05ed4d5 PM |
82 | #endif |
83 | ||
84 | kvmppc_giveup_ext(vcpu, MSR_FP); | |
85 | kvmppc_giveup_ext(vcpu, MSR_VEC); | |
86 | kvmppc_giveup_ext(vcpu, MSR_VSX); | |
87 | } | |
88 | ||
7c973a2e | 89 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) |
03d25c5b | 90 | { |
7c973a2e AG |
91 | int r = 1; /* Indicate we want to get back into the guest */ |
92 | ||
9b0cb3c8 AG |
93 | /* We misuse TLB_FLUSH to indicate that we want to clear |
94 | all shadow cache entries */ | |
95 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
96 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
7c973a2e AG |
97 | |
98 | return r; | |
03d25c5b AG |
99 | } |
100 | ||
9b0cb3c8 AG |
101 | /************* MMU Notifiers *************/ |
102 | ||
103 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | |
104 | { | |
105 | trace_kvm_unmap_hva(hva); | |
106 | ||
107 | /* | |
108 | * Flush all shadow tlb entries everywhere. This is slow, but | |
109 | * we are 100% sure that we catch the to be unmapped page | |
110 | */ | |
111 | kvm_flush_remote_tlbs(kvm); | |
112 | ||
113 | return 0; | |
114 | } | |
115 | ||
116 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end) | |
117 | { | |
118 | /* kvm_unmap_hva flushes everything anyways */ | |
119 | kvm_unmap_hva(kvm, start); | |
120 | ||
121 | return 0; | |
122 | } | |
123 | ||
124 | int kvm_age_hva(struct kvm *kvm, unsigned long hva) | |
125 | { | |
126 | /* XXX could be more clever ;) */ | |
127 | return 0; | |
128 | } | |
129 | ||
130 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | |
131 | { | |
132 | /* XXX could be more clever ;) */ | |
133 | return 0; | |
134 | } | |
135 | ||
136 | void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | |
137 | { | |
138 | /* The page will get remapped properly on its next fault */ | |
139 | kvm_unmap_hva(kvm, hva); | |
140 | } | |
141 | ||
142 | /*****************************************/ | |
143 | ||
f05ed4d5 PM |
144 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
145 | { | |
146 | ulong smsr = vcpu->arch.shared->msr; | |
147 | ||
148 | /* Guest MSR values */ | |
149 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; | |
150 | /* Process MSR values */ | |
151 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
152 | /* External providers the guest reserved */ | |
153 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); | |
154 | /* 64-bit Process MSR values */ | |
155 | #ifdef CONFIG_PPC_BOOK3S_64 | |
156 | smsr |= MSR_ISF | MSR_HV; | |
157 | #endif | |
158 | vcpu->arch.shadow_msr = smsr; | |
159 | } | |
160 | ||
161 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
162 | { | |
163 | ulong old_msr = vcpu->arch.shared->msr; | |
164 | ||
165 | #ifdef EXIT_DEBUG | |
166 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
167 | #endif | |
168 | ||
169 | msr &= to_book3s(vcpu)->msr_mask; | |
170 | vcpu->arch.shared->msr = msr; | |
171 | kvmppc_recalc_shadow_msr(vcpu); | |
172 | ||
173 | if (msr & MSR_POW) { | |
174 | if (!vcpu->arch.pending_exceptions) { | |
175 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 176 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
f05ed4d5 PM |
177 | vcpu->stat.halt_wakeup++; |
178 | ||
179 | /* Unset POW bit after we woke up */ | |
180 | msr &= ~MSR_POW; | |
181 | vcpu->arch.shared->msr = msr; | |
182 | } | |
183 | } | |
184 | ||
185 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != | |
186 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | |
187 | kvmppc_mmu_flush_segments(vcpu); | |
188 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
189 | ||
190 | /* Preload magic page segment when in kernel mode */ | |
191 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
192 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
193 | ||
194 | if (msr & MSR_DR) | |
195 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
196 | else | |
197 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
198 | } | |
199 | } | |
200 | ||
bbcc9c06 BH |
201 | /* |
202 | * When switching from 32 to 64-bit, we may have a stale 32-bit | |
203 | * magic page around, we need to flush it. Typically 32-bit magic | |
204 | * page will be instanciated when calling into RTAS. Note: We | |
205 | * assume that such transition only happens while in kernel mode, | |
206 | * ie, we never transition from user 32-bit to kernel 64-bit with | |
207 | * a 32-bit magic page around. | |
208 | */ | |
209 | if (vcpu->arch.magic_page_pa && | |
210 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | |
211 | /* going from RTAS to normal kernel code */ | |
212 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | |
213 | ~0xFFFUL); | |
214 | } | |
215 | ||
f05ed4d5 PM |
216 | /* Preload FPU if it's enabled */ |
217 | if (vcpu->arch.shared->msr & MSR_FP) | |
218 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
219 | } | |
220 | ||
221 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
222 | { | |
223 | u32 host_pvr; | |
224 | ||
225 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
226 | vcpu->arch.pvr = pvr; | |
227 | #ifdef CONFIG_PPC_BOOK3S_64 | |
228 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
229 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
230 | if (!to_book3s(vcpu)->hior_explicit) |
231 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 232 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 233 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
234 | } else |
235 | #endif | |
236 | { | |
237 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
238 | if (!to_book3s(vcpu)->hior_explicit) |
239 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 240 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 241 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
242 | } |
243 | ||
af8f38b3 AG |
244 | kvmppc_sanity_check(vcpu); |
245 | ||
f05ed4d5 PM |
246 | /* If we are in hypervisor level on 970, we can tell the CPU to |
247 | * treat DCBZ as 32 bytes store */ | |
248 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
249 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
250 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
251 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
252 | ||
253 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
254 | really needs them in a VM on Cell and force disable them. */ | |
255 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
256 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
257 | ||
258 | #ifdef CONFIG_PPC_BOOK3S_32 | |
259 | /* 32 bit Book3S always has 32 byte dcbz */ | |
260 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
261 | #endif | |
262 | ||
263 | /* On some CPUs we can execute paired single operations natively */ | |
264 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
265 | switch (host_pvr) { | |
266 | case 0x00080200: /* lonestar 2.0 */ | |
267 | case 0x00088202: /* lonestar 2.2 */ | |
268 | case 0x70000100: /* gekko 1.0 */ | |
269 | case 0x00080100: /* gekko 2.0 */ | |
270 | case 0x00083203: /* gekko 2.3a */ | |
271 | case 0x00083213: /* gekko 2.3b */ | |
272 | case 0x00083204: /* gekko 2.4 */ | |
273 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
274 | case 0x00087200: /* broadway */ | |
275 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
276 | /* Enable HID2.PSE - in case we need it later */ | |
277 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
278 | } | |
279 | } | |
280 | ||
281 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
282 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
283 | * emulate 32 bytes dcbz length. | |
284 | * | |
285 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
286 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
287 | * | |
288 | * My approach here is to patch the dcbz instruction on executing pages. | |
289 | */ | |
290 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
291 | { | |
292 | struct page *hpage; | |
293 | u64 hpage_offset; | |
294 | u32 *page; | |
295 | int i; | |
296 | ||
297 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
32cad84f | 298 | if (is_error_page(hpage)) |
f05ed4d5 | 299 | return; |
f05ed4d5 PM |
300 | |
301 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
302 | hpage_offset &= ~0xFFFULL; | |
303 | hpage_offset /= 4; | |
304 | ||
305 | get_page(hpage); | |
2480b208 | 306 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
307 | |
308 | /* patch dcbz into reserved instruction, so we trap */ | |
309 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
310 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | |
311 | page[i] &= 0xfffffff7; | |
312 | ||
2480b208 | 313 | kunmap_atomic(page); |
f05ed4d5 PM |
314 | put_page(hpage); |
315 | } | |
316 | ||
317 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
318 | { | |
319 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
320 | ||
bbcc9c06 BH |
321 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
322 | mp_pa = (uint32_t)mp_pa; | |
323 | ||
f05ed4d5 PM |
324 | if (unlikely(mp_pa) && |
325 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | |
326 | return 1; | |
327 | } | |
328 | ||
329 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
330 | } | |
331 | ||
332 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
333 | ulong eaddr, int vec) | |
334 | { | |
335 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
336 | int r = RESUME_GUEST; | |
337 | int relocated; | |
338 | int page_found = 0; | |
339 | struct kvmppc_pte pte; | |
340 | bool is_mmio = false; | |
341 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; | |
342 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; | |
343 | u64 vsid; | |
344 | ||
345 | relocated = data ? dr : ir; | |
346 | ||
347 | /* Resolve real address if translation turned on */ | |
348 | if (relocated) { | |
349 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | |
350 | } else { | |
351 | pte.may_execute = true; | |
352 | pte.may_read = true; | |
353 | pte.may_write = true; | |
354 | pte.raddr = eaddr & KVM_PAM; | |
355 | pte.eaddr = eaddr; | |
356 | pte.vpage = eaddr >> 12; | |
357 | } | |
358 | ||
359 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | |
360 | case 0: | |
361 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
362 | break; | |
363 | case MSR_DR: | |
364 | case MSR_IR: | |
365 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
366 | ||
367 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) | |
368 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | |
369 | else | |
370 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
371 | pte.vpage |= vsid; | |
372 | ||
373 | if (vsid == -1) | |
374 | page_found = -EINVAL; | |
375 | break; | |
376 | } | |
377 | ||
378 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
379 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
380 | /* | |
381 | * If we do the dcbz hack, we have to NX on every execution, | |
382 | * so we can patch the executing code. This renders our guest | |
383 | * NX-less. | |
384 | */ | |
385 | pte.may_execute = !data; | |
386 | } | |
387 | ||
388 | if (page_found == -ENOENT) { | |
389 | /* Page not found in guest PTE entries */ | |
468a12c2 | 390 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
f05ed4d5 | 391 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
468a12c2 | 392 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr; |
f05ed4d5 | 393 | vcpu->arch.shared->msr |= |
468a12c2 AG |
394 | (svcpu->shadow_srr1 & 0x00000000f8000000ULL); |
395 | svcpu_put(svcpu); | |
f05ed4d5 PM |
396 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
397 | } else if (page_found == -EPERM) { | |
398 | /* Storage protection */ | |
468a12c2 | 399 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
f05ed4d5 | 400 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
468a12c2 | 401 | vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE; |
f05ed4d5 PM |
402 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
403 | vcpu->arch.shared->msr |= | |
468a12c2 AG |
404 | svcpu->shadow_srr1 & 0x00000000f8000000ULL; |
405 | svcpu_put(svcpu); | |
f05ed4d5 PM |
406 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
407 | } else if (page_found == -EINVAL) { | |
408 | /* Page not found in guest SLB */ | |
409 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
410 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
411 | } else if (!is_mmio && | |
412 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
413 | /* The guest's PTE is not mapped yet. Map on the host */ | |
414 | kvmppc_mmu_map_page(vcpu, &pte); | |
415 | if (data) | |
416 | vcpu->stat.sp_storage++; | |
417 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
418 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | |
419 | kvmppc_patch_dcbz(vcpu, &pte); | |
420 | } else { | |
421 | /* MMIO */ | |
422 | vcpu->stat.mmio_exits++; | |
423 | vcpu->arch.paddr_accessed = pte.raddr; | |
6020c0f6 | 424 | vcpu->arch.vaddr_accessed = pte.eaddr; |
f05ed4d5 PM |
425 | r = kvmppc_emulate_mmio(run, vcpu); |
426 | if ( r == RESUME_HOST_NV ) | |
427 | r = RESUME_HOST; | |
428 | } | |
429 | ||
430 | return r; | |
431 | } | |
432 | ||
433 | static inline int get_fpr_index(int i) | |
434 | { | |
435 | #ifdef CONFIG_VSX | |
436 | i *= 2; | |
437 | #endif | |
438 | return i; | |
439 | } | |
440 | ||
441 | /* Give up external provider (FPU, Altivec, VSX) */ | |
442 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
443 | { | |
444 | struct thread_struct *t = ¤t->thread; | |
445 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
446 | #ifdef CONFIG_VSX | |
447 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
448 | #endif | |
449 | u64 *thread_fpr = (u64*)t->fpr; | |
450 | int i; | |
451 | ||
452 | if (!(vcpu->arch.guest_owned_ext & msr)) | |
453 | return; | |
454 | ||
455 | #ifdef DEBUG_EXT | |
456 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
457 | #endif | |
458 | ||
459 | switch (msr) { | |
460 | case MSR_FP: | |
461 | giveup_fpu(current); | |
462 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | |
463 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | |
464 | ||
465 | vcpu->arch.fpscr = t->fpscr.val; | |
466 | break; | |
467 | case MSR_VEC: | |
468 | #ifdef CONFIG_ALTIVEC | |
469 | giveup_altivec(current); | |
470 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | |
471 | vcpu->arch.vscr = t->vscr; | |
472 | #endif | |
473 | break; | |
474 | case MSR_VSX: | |
475 | #ifdef CONFIG_VSX | |
476 | __giveup_vsx(current); | |
477 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | |
478 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | |
479 | #endif | |
480 | break; | |
481 | default: | |
482 | BUG(); | |
483 | } | |
484 | ||
485 | vcpu->arch.guest_owned_ext &= ~msr; | |
486 | current->thread.regs->msr &= ~msr; | |
487 | kvmppc_recalc_shadow_msr(vcpu); | |
488 | } | |
489 | ||
490 | static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |
491 | { | |
492 | ulong srr0 = kvmppc_get_pc(vcpu); | |
493 | u32 last_inst = kvmppc_get_last_inst(vcpu); | |
494 | int ret; | |
495 | ||
496 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | |
497 | if (ret == -ENOENT) { | |
498 | ulong msr = vcpu->arch.shared->msr; | |
499 | ||
500 | msr = kvmppc_set_field(msr, 33, 33, 1); | |
501 | msr = kvmppc_set_field(msr, 34, 36, 0); | |
502 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | |
503 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | |
504 | return EMULATE_AGAIN; | |
505 | } | |
506 | ||
507 | return EMULATE_DONE; | |
508 | } | |
509 | ||
510 | static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) | |
511 | { | |
512 | ||
513 | /* Need to do paired single emulation? */ | |
514 | if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) | |
515 | return EMULATE_DONE; | |
516 | ||
517 | /* Read out the instruction */ | |
518 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) | |
519 | /* Need to emulate */ | |
520 | return EMULATE_FAIL; | |
521 | ||
522 | return EMULATE_AGAIN; | |
523 | } | |
524 | ||
525 | /* Handle external providers (FPU, Altivec, VSX) */ | |
526 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
527 | ulong msr) | |
528 | { | |
529 | struct thread_struct *t = ¤t->thread; | |
530 | u64 *vcpu_fpr = vcpu->arch.fpr; | |
531 | #ifdef CONFIG_VSX | |
532 | u64 *vcpu_vsx = vcpu->arch.vsr; | |
533 | #endif | |
534 | u64 *thread_fpr = (u64*)t->fpr; | |
535 | int i; | |
536 | ||
537 | /* When we have paired singles, we emulate in software */ | |
538 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
539 | return RESUME_GUEST; | |
540 | ||
541 | if (!(vcpu->arch.shared->msr & msr)) { | |
542 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
543 | return RESUME_GUEST; | |
544 | } | |
545 | ||
546 | /* We already own the ext */ | |
547 | if (vcpu->arch.guest_owned_ext & msr) { | |
548 | return RESUME_GUEST; | |
549 | } | |
550 | ||
551 | #ifdef DEBUG_EXT | |
552 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
553 | #endif | |
554 | ||
555 | current->thread.regs->msr |= msr; | |
556 | ||
557 | switch (msr) { | |
558 | case MSR_FP: | |
559 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | |
560 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | |
561 | ||
562 | t->fpscr.val = vcpu->arch.fpscr; | |
563 | t->fpexc_mode = 0; | |
564 | kvmppc_load_up_fpu(); | |
565 | break; | |
566 | case MSR_VEC: | |
567 | #ifdef CONFIG_ALTIVEC | |
568 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | |
569 | t->vscr = vcpu->arch.vscr; | |
570 | t->vrsave = -1; | |
571 | kvmppc_load_up_altivec(); | |
572 | #endif | |
573 | break; | |
574 | case MSR_VSX: | |
575 | #ifdef CONFIG_VSX | |
576 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | |
577 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | |
578 | kvmppc_load_up_vsx(); | |
579 | #endif | |
580 | break; | |
581 | default: | |
582 | BUG(); | |
583 | } | |
584 | ||
585 | vcpu->arch.guest_owned_ext |= msr; | |
586 | ||
587 | kvmppc_recalc_shadow_msr(vcpu); | |
588 | ||
589 | return RESUME_GUEST; | |
590 | } | |
591 | ||
592 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
593 | unsigned int exit_nr) | |
594 | { | |
595 | int r = RESUME_HOST; | |
7ee78855 | 596 | int s; |
f05ed4d5 PM |
597 | |
598 | vcpu->stat.sum_exits++; | |
599 | ||
600 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
601 | run->ready_for_interrupt_injection = 1; | |
602 | ||
bd2be683 | 603 | /* We get here with MSR.EE=1 */ |
3b1d9d7d | 604 | |
97c95059 | 605 | trace_kvm_exit(exit_nr, vcpu); |
706fb730 | 606 | kvm_guest_exit(); |
c63ddcb4 | 607 | |
f05ed4d5 PM |
608 | switch (exit_nr) { |
609 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 AG |
610 | { |
611 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | |
612 | ulong shadow_srr1 = svcpu->shadow_srr1; | |
f05ed4d5 PM |
613 | vcpu->stat.pf_instruc++; |
614 | ||
615 | #ifdef CONFIG_PPC_BOOK3S_32 | |
616 | /* We set segments as unused segments when invalidating them. So | |
617 | * treat the respective fault as segment fault. */ | |
468a12c2 | 618 | if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) { |
f05ed4d5 PM |
619 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
620 | r = RESUME_GUEST; | |
468a12c2 | 621 | svcpu_put(svcpu); |
f05ed4d5 PM |
622 | break; |
623 | } | |
624 | #endif | |
468a12c2 | 625 | svcpu_put(svcpu); |
f05ed4d5 PM |
626 | |
627 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 628 | if (shadow_srr1 & 0x40000000) { |
f05ed4d5 PM |
629 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
630 | vcpu->stat.sp_instruc++; | |
631 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
632 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
633 | /* | |
634 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
635 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
636 | * that no guest that needs the dcbz hack does NX. | |
637 | */ | |
638 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
639 | r = RESUME_GUEST; | |
640 | } else { | |
468a12c2 | 641 | vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000; |
f05ed4d5 PM |
642 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
643 | r = RESUME_GUEST; | |
644 | } | |
645 | break; | |
468a12c2 | 646 | } |
f05ed4d5 PM |
647 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
648 | { | |
649 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
468a12c2 AG |
650 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
651 | u32 fault_dsisr = svcpu->fault_dsisr; | |
f05ed4d5 PM |
652 | vcpu->stat.pf_storage++; |
653 | ||
654 | #ifdef CONFIG_PPC_BOOK3S_32 | |
655 | /* We set segments as unused segments when invalidating them. So | |
656 | * treat the respective fault as segment fault. */ | |
468a12c2 | 657 | if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) { |
f05ed4d5 PM |
658 | kvmppc_mmu_map_segment(vcpu, dar); |
659 | r = RESUME_GUEST; | |
468a12c2 | 660 | svcpu_put(svcpu); |
f05ed4d5 PM |
661 | break; |
662 | } | |
663 | #endif | |
468a12c2 | 664 | svcpu_put(svcpu); |
f05ed4d5 PM |
665 | |
666 | /* The only case we need to handle is missing shadow PTEs */ | |
468a12c2 | 667 | if (fault_dsisr & DSISR_NOHPTE) { |
f05ed4d5 PM |
668 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
669 | } else { | |
670 | vcpu->arch.shared->dar = dar; | |
468a12c2 | 671 | vcpu->arch.shared->dsisr = fault_dsisr; |
f05ed4d5 PM |
672 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
673 | r = RESUME_GUEST; | |
674 | } | |
675 | break; | |
676 | } | |
677 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
678 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
679 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); | |
680 | kvmppc_book3s_queue_irqprio(vcpu, | |
681 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
682 | } | |
683 | r = RESUME_GUEST; | |
684 | break; | |
685 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
686 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
687 | kvmppc_book3s_queue_irqprio(vcpu, | |
688 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
689 | } | |
690 | r = RESUME_GUEST; | |
691 | break; | |
692 | /* We're good on these - the host merely wanted to get our attention */ | |
693 | case BOOK3S_INTERRUPT_DECREMENTER: | |
4f225ae0 | 694 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
f05ed4d5 PM |
695 | vcpu->stat.dec_exits++; |
696 | r = RESUME_GUEST; | |
697 | break; | |
698 | case BOOK3S_INTERRUPT_EXTERNAL: | |
4f225ae0 AG |
699 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
700 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | |
f05ed4d5 PM |
701 | vcpu->stat.ext_intr_exits++; |
702 | r = RESUME_GUEST; | |
703 | break; | |
704 | case BOOK3S_INTERRUPT_PERFMON: | |
705 | r = RESUME_GUEST; | |
706 | break; | |
707 | case BOOK3S_INTERRUPT_PROGRAM: | |
4f225ae0 | 708 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
f05ed4d5 PM |
709 | { |
710 | enum emulation_result er; | |
468a12c2 | 711 | struct kvmppc_book3s_shadow_vcpu *svcpu; |
f05ed4d5 PM |
712 | ulong flags; |
713 | ||
714 | program_interrupt: | |
468a12c2 AG |
715 | svcpu = svcpu_get(vcpu); |
716 | flags = svcpu->shadow_srr1 & 0x1f0000ull; | |
717 | svcpu_put(svcpu); | |
f05ed4d5 PM |
718 | |
719 | if (vcpu->arch.shared->msr & MSR_PR) { | |
720 | #ifdef EXIT_DEBUG | |
721 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
722 | #endif | |
723 | if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != | |
724 | (INS_DCBZ & 0xfffffff7)) { | |
725 | kvmppc_core_queue_program(vcpu, flags); | |
726 | r = RESUME_GUEST; | |
727 | break; | |
728 | } | |
729 | } | |
730 | ||
731 | vcpu->stat.emulated_inst_exits++; | |
732 | er = kvmppc_emulate_instruction(run, vcpu); | |
733 | switch (er) { | |
734 | case EMULATE_DONE: | |
735 | r = RESUME_GUEST_NV; | |
736 | break; | |
737 | case EMULATE_AGAIN: | |
738 | r = RESUME_GUEST; | |
739 | break; | |
740 | case EMULATE_FAIL: | |
741 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
742 | __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | |
743 | kvmppc_core_queue_program(vcpu, flags); | |
744 | r = RESUME_GUEST; | |
745 | break; | |
746 | case EMULATE_DO_MMIO: | |
747 | run->exit_reason = KVM_EXIT_MMIO; | |
748 | r = RESUME_HOST_NV; | |
749 | break; | |
750 | default: | |
751 | BUG(); | |
752 | } | |
753 | break; | |
754 | } | |
755 | case BOOK3S_INTERRUPT_SYSCALL: | |
a668f2bd AG |
756 | if (vcpu->arch.papr_enabled && |
757 | (kvmppc_get_last_inst(vcpu) == 0x44000022) && | |
758 | !(vcpu->arch.shared->msr & MSR_PR)) { | |
759 | /* SC 1 papr hypercalls */ | |
760 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
761 | int i; | |
762 | ||
96f38d72 | 763 | #ifdef CONFIG_KVM_BOOK3S_64_PR |
a668f2bd AG |
764 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
765 | r = RESUME_GUEST; | |
766 | break; | |
767 | } | |
96f38d72 | 768 | #endif |
a668f2bd AG |
769 | |
770 | run->papr_hcall.nr = cmd; | |
771 | for (i = 0; i < 9; ++i) { | |
772 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
773 | run->papr_hcall.args[i] = gpr; | |
774 | } | |
775 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
776 | vcpu->arch.hcall_needed = 1; | |
777 | r = RESUME_HOST; | |
778 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
779 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
780 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
781 | /* MOL hypercalls */ | |
782 | u64 *gprs = run->osi.gprs; | |
783 | int i; | |
784 | ||
785 | run->exit_reason = KVM_EXIT_OSI; | |
786 | for (i = 0; i < 32; i++) | |
787 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
788 | vcpu->arch.osi_needed = 1; | |
789 | r = RESUME_HOST_NV; | |
790 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | |
791 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
792 | /* KVM PV hypercalls */ | |
793 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
794 | r = RESUME_GUEST; | |
795 | } else { | |
796 | /* Guest syscalls */ | |
797 | vcpu->stat.syscall_exits++; | |
798 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
799 | r = RESUME_GUEST; | |
800 | } | |
801 | break; | |
802 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
803 | case BOOK3S_INTERRUPT_ALTIVEC: | |
804 | case BOOK3S_INTERRUPT_VSX: | |
805 | { | |
806 | int ext_msr = 0; | |
807 | ||
808 | switch (exit_nr) { | |
809 | case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; | |
810 | case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; | |
811 | case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; | |
812 | } | |
813 | ||
814 | switch (kvmppc_check_ext(vcpu, exit_nr)) { | |
815 | case EMULATE_DONE: | |
816 | /* everything ok - let's enable the ext */ | |
817 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
818 | break; | |
819 | case EMULATE_FAIL: | |
820 | /* we need to emulate this instruction */ | |
821 | goto program_interrupt; | |
822 | break; | |
823 | default: | |
824 | /* nothing to worry about - go again */ | |
825 | break; | |
826 | } | |
827 | break; | |
828 | } | |
829 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
830 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | |
831 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, | |
832 | kvmppc_get_last_inst(vcpu)); | |
833 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, | |
834 | kvmppc_get_last_inst(vcpu)); | |
835 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
836 | } | |
837 | r = RESUME_GUEST; | |
838 | break; | |
839 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
840 | case BOOK3S_INTERRUPT_TRACE: | |
841 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
842 | r = RESUME_GUEST; | |
843 | break; | |
844 | default: | |
468a12c2 AG |
845 | { |
846 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | |
847 | ulong shadow_srr1 = svcpu->shadow_srr1; | |
848 | svcpu_put(svcpu); | |
f05ed4d5 PM |
849 | /* Ugh - bork here! What did we get? */ |
850 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 851 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
852 | r = RESUME_HOST; |
853 | BUG(); | |
854 | break; | |
855 | } | |
468a12c2 | 856 | } |
f05ed4d5 PM |
857 | |
858 | if (!(r & RESUME_HOST)) { | |
859 | /* To avoid clobbering exit_reason, only check for signals if | |
860 | * we aren't already exiting to userspace for some other | |
861 | * reason. */ | |
e371f713 AG |
862 | |
863 | /* | |
864 | * Interrupts could be timers for the guest which we have to | |
865 | * inject again, so let's postpone them until we're in the guest | |
866 | * and if we really did time things so badly, then we just exit | |
867 | * again due to a host external interrupt. | |
868 | */ | |
bd2be683 | 869 | local_irq_disable(); |
7ee78855 AG |
870 | s = kvmppc_prepare_to_enter(vcpu); |
871 | if (s <= 0) { | |
bd2be683 | 872 | local_irq_enable(); |
7ee78855 | 873 | r = s; |
24afa37b | 874 | } else { |
bd2be683 | 875 | kvmppc_lazy_ee_enable(); |
f05ed4d5 PM |
876 | } |
877 | } | |
878 | ||
879 | trace_kvm_book3s_reenter(r, vcpu); | |
880 | ||
881 | return r; | |
882 | } | |
883 | ||
884 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
885 | struct kvm_sregs *sregs) | |
886 | { | |
887 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
888 | int i; | |
889 | ||
890 | sregs->pvr = vcpu->arch.pvr; | |
891 | ||
892 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
893 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
894 | for (i = 0; i < 64; i++) { | |
895 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
896 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
897 | } | |
898 | } else { | |
899 | for (i = 0; i < 16; i++) | |
900 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; | |
901 | ||
902 | for (i = 0; i < 8; i++) { | |
903 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
904 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
905 | } | |
906 | } | |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
912 | struct kvm_sregs *sregs) | |
913 | { | |
914 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
915 | int i; | |
916 | ||
917 | kvmppc_set_pvr(vcpu, sregs->pvr); | |
918 | ||
919 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
920 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
921 | for (i = 0; i < 64; i++) { | |
922 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
923 | sregs->u.s.ppc64.slb[i].slbe); | |
924 | } | |
925 | } else { | |
926 | for (i = 0; i < 16; i++) { | |
927 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
928 | } | |
929 | for (i = 0; i < 8; i++) { | |
930 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
931 | (u32)sregs->u.s.ppc32.ibat[i]); | |
932 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
933 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
934 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
935 | (u32)sregs->u.s.ppc32.dbat[i]); | |
936 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
937 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
938 | } | |
939 | } | |
940 | ||
941 | /* Flush the MMU after messing with the segments */ | |
942 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
943 | ||
944 | return 0; | |
945 | } | |
946 | ||
31f3438e PM |
947 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
948 | { | |
949 | int r = -EINVAL; | |
950 | ||
951 | switch (reg->id) { | |
952 | case KVM_REG_PPC_HIOR: | |
b8e6f8ae AG |
953 | r = copy_to_user((u64 __user *)(long)reg->addr, |
954 | &to_book3s(vcpu)->hior, sizeof(u64)); | |
31f3438e PM |
955 | break; |
956 | default: | |
957 | break; | |
958 | } | |
959 | ||
960 | return r; | |
961 | } | |
962 | ||
963 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
964 | { | |
965 | int r = -EINVAL; | |
966 | ||
967 | switch (reg->id) { | |
968 | case KVM_REG_PPC_HIOR: | |
b8e6f8ae AG |
969 | r = copy_from_user(&to_book3s(vcpu)->hior, |
970 | (u64 __user *)(long)reg->addr, sizeof(u64)); | |
31f3438e PM |
971 | if (!r) |
972 | to_book3s(vcpu)->hior_explicit = true; | |
973 | break; | |
974 | default: | |
975 | break; | |
976 | } | |
977 | ||
978 | return r; | |
979 | } | |
980 | ||
f05ed4d5 PM |
981 | int kvmppc_core_check_processor_compat(void) |
982 | { | |
983 | return 0; | |
984 | } | |
985 | ||
986 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
987 | { | |
988 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
989 | struct kvm_vcpu *vcpu; | |
990 | int err = -ENOMEM; | |
991 | unsigned long p; | |
992 | ||
993 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); | |
994 | if (!vcpu_book3s) | |
995 | goto out; | |
996 | ||
997 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) | |
998 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | |
999 | if (!vcpu_book3s->shadow_vcpu) | |
1000 | goto free_vcpu; | |
1001 | ||
1002 | vcpu = &vcpu_book3s->vcpu; | |
1003 | err = kvm_vcpu_init(vcpu, kvm, id); | |
1004 | if (err) | |
1005 | goto free_shadow_vcpu; | |
1006 | ||
1007 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | |
1008 | /* the real shared page fills the last 4k of our page */ | |
1009 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | |
1010 | if (!p) | |
1011 | goto uninit_vcpu; | |
1012 | ||
f05ed4d5 PM |
1013 | #ifdef CONFIG_PPC_BOOK3S_64 |
1014 | /* default to book3s_64 (970fx) */ | |
1015 | vcpu->arch.pvr = 0x3C0301; | |
1016 | #else | |
1017 | /* default to book3s_32 (750) */ | |
1018 | vcpu->arch.pvr = 0x84202; | |
1019 | #endif | |
1020 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
1021 | vcpu->arch.slb_nr = 64; | |
1022 | ||
f05ed4d5 PM |
1023 | vcpu->arch.shadow_msr = MSR_USER64; |
1024 | ||
1025 | err = kvmppc_mmu_init(vcpu); | |
1026 | if (err < 0) | |
1027 | goto uninit_vcpu; | |
1028 | ||
1029 | return vcpu; | |
1030 | ||
1031 | uninit_vcpu: | |
1032 | kvm_vcpu_uninit(vcpu); | |
1033 | free_shadow_vcpu: | |
1034 | kfree(vcpu_book3s->shadow_vcpu); | |
1035 | free_vcpu: | |
1036 | vfree(vcpu_book3s); | |
1037 | out: | |
1038 | return ERR_PTR(err); | |
1039 | } | |
1040 | ||
1041 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
1042 | { | |
1043 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
1044 | ||
1045 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
1046 | kvm_vcpu_uninit(vcpu); | |
1047 | kfree(vcpu_book3s->shadow_vcpu); | |
1048 | vfree(vcpu_book3s); | |
1049 | } | |
1050 | ||
df6909e5 | 1051 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1052 | { |
1053 | int ret; | |
1054 | double fpr[32][TS_FPRWIDTH]; | |
1055 | unsigned int fpscr; | |
1056 | int fpexc_mode; | |
1057 | #ifdef CONFIG_ALTIVEC | |
1058 | vector128 vr[32]; | |
1059 | vector128 vscr; | |
1060 | unsigned long uninitialized_var(vrsave); | |
1061 | int used_vr; | |
1062 | #endif | |
1063 | #ifdef CONFIG_VSX | |
1064 | int used_vsr; | |
1065 | #endif | |
1066 | ulong ext_msr; | |
1067 | ||
af8f38b3 AG |
1068 | /* Check if we can run the vcpu at all */ |
1069 | if (!vcpu->arch.sane) { | |
1070 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
7d82714d AG |
1071 | ret = -EINVAL; |
1072 | goto out; | |
af8f38b3 AG |
1073 | } |
1074 | ||
e371f713 AG |
1075 | /* |
1076 | * Interrupts could be timers for the guest which we have to inject | |
1077 | * again, so let's postpone them until we're in the guest and if we | |
1078 | * really did time things so badly, then we just exit again due to | |
1079 | * a host external interrupt. | |
1080 | */ | |
bd2be683 | 1081 | local_irq_disable(); |
7ee78855 AG |
1082 | ret = kvmppc_prepare_to_enter(vcpu); |
1083 | if (ret <= 0) { | |
bd2be683 | 1084 | local_irq_enable(); |
7d82714d | 1085 | goto out; |
f05ed4d5 PM |
1086 | } |
1087 | ||
1088 | /* Save FPU state in stack */ | |
1089 | if (current->thread.regs->msr & MSR_FP) | |
1090 | giveup_fpu(current); | |
1091 | memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); | |
1092 | fpscr = current->thread.fpscr.val; | |
1093 | fpexc_mode = current->thread.fpexc_mode; | |
1094 | ||
1095 | #ifdef CONFIG_ALTIVEC | |
1096 | /* Save Altivec state in stack */ | |
1097 | used_vr = current->thread.used_vr; | |
1098 | if (used_vr) { | |
1099 | if (current->thread.regs->msr & MSR_VEC) | |
1100 | giveup_altivec(current); | |
1101 | memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); | |
1102 | vscr = current->thread.vscr; | |
1103 | vrsave = current->thread.vrsave; | |
1104 | } | |
1105 | #endif | |
1106 | ||
1107 | #ifdef CONFIG_VSX | |
1108 | /* Save VSX state in stack */ | |
1109 | used_vsr = current->thread.used_vsr; | |
1110 | if (used_vsr && (current->thread.regs->msr & MSR_VSX)) | |
1111 | __giveup_vsx(current); | |
1112 | #endif | |
1113 | ||
1114 | /* Remember the MSR with disabled extensions */ | |
1115 | ext_msr = current->thread.regs->msr; | |
1116 | ||
f05ed4d5 PM |
1117 | /* Preload FPU if it's enabled */ |
1118 | if (vcpu->arch.shared->msr & MSR_FP) | |
1119 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | |
1120 | ||
bd2be683 | 1121 | kvmppc_lazy_ee_enable(); |
df6909e5 PM |
1122 | |
1123 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
1124 | ||
24afa37b AG |
1125 | /* No need for kvm_guest_exit. It's done in handle_exit. |
1126 | We also get here with interrupts enabled. */ | |
f05ed4d5 | 1127 | |
f05ed4d5 PM |
1128 | current->thread.regs->msr = ext_msr; |
1129 | ||
1130 | /* Make sure we save the guest FPU/Altivec/VSX state */ | |
1131 | kvmppc_giveup_ext(vcpu, MSR_FP); | |
1132 | kvmppc_giveup_ext(vcpu, MSR_VEC); | |
1133 | kvmppc_giveup_ext(vcpu, MSR_VSX); | |
1134 | ||
1135 | /* Restore FPU state from stack */ | |
1136 | memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); | |
1137 | current->thread.fpscr.val = fpscr; | |
1138 | current->thread.fpexc_mode = fpexc_mode; | |
1139 | ||
1140 | #ifdef CONFIG_ALTIVEC | |
1141 | /* Restore Altivec state from stack */ | |
1142 | if (used_vr && current->thread.used_vr) { | |
1143 | memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); | |
1144 | current->thread.vscr = vscr; | |
1145 | current->thread.vrsave = vrsave; | |
1146 | } | |
1147 | current->thread.used_vr = used_vr; | |
1148 | #endif | |
1149 | ||
1150 | #ifdef CONFIG_VSX | |
1151 | current->thread.used_vsr = used_vsr; | |
1152 | #endif | |
1153 | ||
7d82714d | 1154 | out: |
0652eaae | 1155 | vcpu->mode = OUTSIDE_GUEST_MODE; |
f05ed4d5 PM |
1156 | return ret; |
1157 | } | |
1158 | ||
82ed3616 PM |
1159 | /* |
1160 | * Get (and clear) the dirty memory log for a memory slot. | |
1161 | */ | |
1162 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
1163 | struct kvm_dirty_log *log) | |
1164 | { | |
1165 | struct kvm_memory_slot *memslot; | |
1166 | struct kvm_vcpu *vcpu; | |
1167 | ulong ga, ga_end; | |
1168 | int is_dirty = 0; | |
1169 | int r; | |
1170 | unsigned long n; | |
1171 | ||
1172 | mutex_lock(&kvm->slots_lock); | |
1173 | ||
1174 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1175 | if (r) | |
1176 | goto out; | |
1177 | ||
1178 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1179 | if (is_dirty) { | |
1180 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1181 | ||
1182 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1183 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1184 | ||
1185 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1186 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1187 | ||
1188 | n = kvm_dirty_bitmap_bytes(memslot); | |
1189 | memset(memslot->dirty_bitmap, 0, n); | |
1190 | } | |
1191 | ||
1192 | r = 0; | |
1193 | out: | |
1194 | mutex_unlock(&kvm->slots_lock); | |
1195 | return r; | |
1196 | } | |
1197 | ||
5b74716e BH |
1198 | #ifdef CONFIG_PPC64 |
1199 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | |
1200 | { | |
1201 | /* No flags */ | |
1202 | info->flags = 0; | |
1203 | ||
1204 | /* SLB is always 64 entries */ | |
1205 | info->slb_size = 64; | |
1206 | ||
1207 | /* Standard 4k base page size segment */ | |
1208 | info->sps[0].page_shift = 12; | |
1209 | info->sps[0].slb_enc = 0; | |
1210 | info->sps[0].enc[0].page_shift = 12; | |
1211 | info->sps[0].enc[0].pte_enc = 0; | |
1212 | ||
1213 | /* Standard 16M large page size segment */ | |
1214 | info->sps[1].page_shift = 24; | |
1215 | info->sps[1].slb_enc = SLB_VSID_L; | |
1216 | info->sps[1].enc[0].page_shift = 24; | |
1217 | info->sps[1].enc[0].pte_enc = 0; | |
1218 | ||
1219 | return 0; | |
1220 | } | |
1221 | #endif /* CONFIG_PPC64 */ | |
1222 | ||
a66b48c3 PM |
1223 | void kvmppc_core_free_memslot(struct kvm_memory_slot *free, |
1224 | struct kvm_memory_slot *dont) | |
1225 | { | |
1226 | } | |
1227 | ||
1228 | int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, | |
1229 | unsigned long npages) | |
1230 | { | |
1231 | return 0; | |
1232 | } | |
1233 | ||
f9e0554d | 1234 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
a66b48c3 | 1235 | struct kvm_memory_slot *memslot, |
f9e0554d PM |
1236 | struct kvm_userspace_memory_region *mem) |
1237 | { | |
1238 | return 0; | |
1239 | } | |
1240 | ||
1241 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
dfe49dbd PM |
1242 | struct kvm_userspace_memory_region *mem, |
1243 | struct kvm_memory_slot old) | |
1244 | { | |
1245 | } | |
1246 | ||
1247 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | |
f9e0554d PM |
1248 | { |
1249 | } | |
1250 | ||
1251 | int kvmppc_core_init_vm(struct kvm *kvm) | |
1252 | { | |
f31e65e1 BH |
1253 | #ifdef CONFIG_PPC64 |
1254 | INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); | |
1255 | #endif | |
1256 | ||
f9e0554d PM |
1257 | return 0; |
1258 | } | |
1259 | ||
1260 | void kvmppc_core_destroy_vm(struct kvm *kvm) | |
1261 | { | |
f31e65e1 BH |
1262 | #ifdef CONFIG_PPC64 |
1263 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | |
1264 | #endif | |
f9e0554d PM |
1265 | } |
1266 | ||
f05ed4d5 PM |
1267 | static int kvmppc_book3s_init(void) |
1268 | { | |
1269 | int r; | |
1270 | ||
1271 | r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, | |
1272 | THIS_MODULE); | |
1273 | ||
1274 | if (r) | |
1275 | return r; | |
1276 | ||
1277 | r = kvmppc_mmu_hpte_sysinit(); | |
1278 | ||
1279 | return r; | |
1280 | } | |
1281 | ||
1282 | static void kvmppc_book3s_exit(void) | |
1283 | { | |
1284 | kvmppc_mmu_hpte_sysexit(); | |
1285 | kvm_exit(); | |
1286 | } | |
1287 | ||
1288 | module_init(kvmppc_book3s_init); | |
1289 | module_exit(kvmppc_book3s_exit); |