Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Paul Mackerras <paulus@au1.ibm.com> | |
7 | * Alexander Graf <agraf@suse.de> | |
8 | * Kevin Wolf <mail@kevin-wolf.de> | |
9 | * | |
10 | * Description: KVM functions specific to running on Book 3S | |
11 | * processors in hypervisor mode (specifically POWER7 and later). | |
12 | * | |
13 | * This file is derived from arch/powerpc/kvm/book3s.c, | |
14 | * by Alexander Graf <agraf@suse.de>. | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or modify | |
17 | * it under the terms of the GNU General Public License, version 2, as | |
18 | * published by the Free Software Foundation. | |
19 | */ | |
20 | ||
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/preempt.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/delay.h> | |
66b15db6 | 27 | #include <linux/export.h> |
de56a948 PM |
28 | #include <linux/fs.h> |
29 | #include <linux/anon_inodes.h> | |
30 | #include <linux/cpumask.h> | |
aa04b4cc PM |
31 | #include <linux/spinlock.h> |
32 | #include <linux/page-flags.h> | |
2c9097e4 | 33 | #include <linux/srcu.h> |
de56a948 PM |
34 | |
35 | #include <asm/reg.h> | |
36 | #include <asm/cputable.h> | |
37 | #include <asm/cacheflush.h> | |
38 | #include <asm/tlbflush.h> | |
39 | #include <asm/uaccess.h> | |
40 | #include <asm/io.h> | |
41 | #include <asm/kvm_ppc.h> | |
42 | #include <asm/kvm_book3s.h> | |
43 | #include <asm/mmu_context.h> | |
44 | #include <asm/lppaca.h> | |
45 | #include <asm/processor.h> | |
371fefd6 | 46 | #include <asm/cputhreads.h> |
aa04b4cc | 47 | #include <asm/page.h> |
de1d9248 | 48 | #include <asm/hvcall.h> |
ae3a197e | 49 | #include <asm/switch_to.h> |
512691d4 | 50 | #include <asm/smp.h> |
de56a948 | 51 | #include <linux/gfp.h> |
de56a948 PM |
52 | #include <linux/vmalloc.h> |
53 | #include <linux/highmem.h> | |
c77162de | 54 | #include <linux/hugetlb.h> |
2ba9f0d8 | 55 | #include <linux/module.h> |
de56a948 | 56 | |
3a167bea AK |
57 | #include "book3s.h" |
58 | ||
de56a948 PM |
59 | /* #define EXIT_DEBUG */ |
60 | /* #define EXIT_DEBUG_SIMPLE */ | |
61 | /* #define EXIT_DEBUG_INT */ | |
62 | ||
913d3ff9 PM |
63 | /* Used to indicate that a guest page fault needs to be handled */ |
64 | #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) | |
65 | ||
c7b67670 PM |
66 | /* Used as a "null" value for timebase values */ |
67 | #define TB_NIL (~(u64)0) | |
68 | ||
19ccb76a | 69 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
32fad281 | 70 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
19ccb76a | 71 | |
3a167bea | 72 | static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) |
54695c30 BH |
73 | { |
74 | int me; | |
75 | int cpu = vcpu->cpu; | |
76 | wait_queue_head_t *wqp; | |
77 | ||
78 | wqp = kvm_arch_vcpu_wq(vcpu); | |
79 | if (waitqueue_active(wqp)) { | |
80 | wake_up_interruptible(wqp); | |
81 | ++vcpu->stat.halt_wakeup; | |
82 | } | |
83 | ||
84 | me = get_cpu(); | |
85 | ||
86 | /* CPU points to the first thread of the core */ | |
87 | if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { | |
88 | int real_cpu = cpu + vcpu->arch.ptid; | |
89 | if (paca[real_cpu].kvm_hstate.xics_phys) | |
90 | xics_wake_cpu(real_cpu); | |
91 | else if (cpu_online(cpu)) | |
92 | smp_send_reschedule(cpu); | |
93 | } | |
94 | put_cpu(); | |
95 | } | |
96 | ||
c7b67670 PM |
97 | /* |
98 | * We use the vcpu_load/put functions to measure stolen time. | |
99 | * Stolen time is counted as time when either the vcpu is able to | |
100 | * run as part of a virtual core, but the task running the vcore | |
101 | * is preempted or sleeping, or when the vcpu needs something done | |
102 | * in the kernel by the task running the vcpu, but that task is | |
103 | * preempted or sleeping. Those two things have to be counted | |
104 | * separately, since one of the vcpu tasks will take on the job | |
105 | * of running the core, and the other vcpu tasks in the vcore will | |
106 | * sleep waiting for it to do that, but that sleep shouldn't count | |
107 | * as stolen time. | |
108 | * | |
109 | * Hence we accumulate stolen time when the vcpu can run as part of | |
110 | * a vcore using vc->stolen_tb, and the stolen time when the vcpu | |
111 | * needs its task to do other things in the kernel (for example, | |
112 | * service a page fault) in busy_stolen. We don't accumulate | |
113 | * stolen time for a vcore when it is inactive, or for a vcpu | |
114 | * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of | |
115 | * a misnomer; it means that the vcpu task is not executing in | |
116 | * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in | |
117 | * the kernel. We don't have any way of dividing up that time | |
118 | * between time that the vcpu is genuinely stopped, time that | |
119 | * the task is actively working on behalf of the vcpu, and time | |
120 | * that the task is preempted, so we don't count any of it as | |
121 | * stolen. | |
122 | * | |
123 | * Updates to busy_stolen are protected by arch.tbacct_lock; | |
124 | * updates to vc->stolen_tb are protected by the arch.tbacct_lock | |
125 | * of the vcpu that has taken responsibility for running the vcore | |
126 | * (i.e. vc->runner). The stolen times are measured in units of | |
127 | * timebase ticks. (Note that the != TB_NIL checks below are | |
128 | * purely defensive; they should never fail.) | |
129 | */ | |
130 | ||
3a167bea | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
de56a948 | 132 | { |
0456ec4f | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
bf3d32e1 | 134 | unsigned long flags; |
0456ec4f | 135 | |
bf3d32e1 | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
c7b67670 PM |
137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
138 | vc->preempt_tb != TB_NIL) { | |
0456ec4f | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
c7b67670 PM |
140 | vc->preempt_tb = TB_NIL; |
141 | } | |
142 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && | |
143 | vcpu->arch.busy_preempt != TB_NIL) { | |
144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | |
145 | vcpu->arch.busy_preempt = TB_NIL; | |
146 | } | |
bf3d32e1 | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
de56a948 PM |
148 | } |
149 | ||
3a167bea | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
de56a948 | 151 | { |
0456ec4f | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
bf3d32e1 | 153 | unsigned long flags; |
0456ec4f | 154 | |
bf3d32e1 | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
0456ec4f PM |
156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
157 | vc->preempt_tb = mftb(); | |
c7b67670 PM |
158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
159 | vcpu->arch.busy_preempt = mftb(); | |
bf3d32e1 | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
de56a948 PM |
161 | } |
162 | ||
3a167bea | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
de56a948 PM |
164 | { |
165 | vcpu->arch.shregs.msr = msr; | |
19ccb76a | 166 | kvmppc_end_cede(vcpu); |
de56a948 PM |
167 | } |
168 | ||
3a167bea | 169 | void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) |
de56a948 PM |
170 | { |
171 | vcpu->arch.pvr = pvr; | |
172 | } | |
173 | ||
388cc6e1 PM |
174 | int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) |
175 | { | |
176 | unsigned long pcr = 0; | |
177 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
178 | ||
179 | if (arch_compat) { | |
180 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) | |
181 | return -EINVAL; /* 970 has no compat mode support */ | |
182 | ||
183 | switch (arch_compat) { | |
184 | case PVR_ARCH_205: | |
185 | pcr = PCR_ARCH_205; | |
186 | break; | |
187 | case PVR_ARCH_206: | |
188 | case PVR_ARCH_206p: | |
189 | break; | |
190 | default: | |
191 | return -EINVAL; | |
192 | } | |
193 | } | |
194 | ||
195 | spin_lock(&vc->lock); | |
196 | vc->arch_compat = arch_compat; | |
197 | vc->pcr = pcr; | |
198 | spin_unlock(&vc->lock); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | ||
de56a948 PM |
203 | void kvmppc_dump_regs(struct kvm_vcpu *vcpu) |
204 | { | |
205 | int r; | |
206 | ||
207 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | |
208 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | |
209 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); | |
210 | for (r = 0; r < 16; ++r) | |
211 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | |
212 | r, kvmppc_get_gpr(vcpu, r), | |
213 | r+16, kvmppc_get_gpr(vcpu, r+16)); | |
214 | pr_err("ctr = %.16lx lr = %.16lx\n", | |
215 | vcpu->arch.ctr, vcpu->arch.lr); | |
216 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", | |
217 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | |
218 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | |
219 | vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); | |
220 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | |
221 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | |
222 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", | |
223 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); | |
224 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); | |
225 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | |
226 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
227 | pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); | |
228 | for (r = 0; r < vcpu->arch.slb_max; ++r) | |
229 | pr_err(" ESID = %.16llx VSID = %.16llx\n", | |
230 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); | |
231 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", | |
a0144e2a | 232 | vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, |
de56a948 PM |
233 | vcpu->arch.last_inst); |
234 | } | |
235 | ||
a8606e20 PM |
236 | struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) |
237 | { | |
238 | int r; | |
239 | struct kvm_vcpu *v, *ret = NULL; | |
240 | ||
241 | mutex_lock(&kvm->lock); | |
242 | kvm_for_each_vcpu(r, v, kvm) { | |
243 | if (v->vcpu_id == id) { | |
244 | ret = v; | |
245 | break; | |
246 | } | |
247 | } | |
248 | mutex_unlock(&kvm->lock); | |
249 | return ret; | |
250 | } | |
251 | ||
252 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | |
253 | { | |
f13c13a0 | 254 | vpa->__old_status |= LPPACA_OLD_SHARED_PROC; |
a8606e20 PM |
255 | vpa->yield_count = 1; |
256 | } | |
257 | ||
55b665b0 PM |
258 | static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, |
259 | unsigned long addr, unsigned long len) | |
260 | { | |
261 | /* check address is cacheline aligned */ | |
262 | if (addr & (L1_CACHE_BYTES - 1)) | |
263 | return -EINVAL; | |
264 | spin_lock(&vcpu->arch.vpa_update_lock); | |
265 | if (v->next_gpa != addr || v->len != len) { | |
266 | v->next_gpa = addr; | |
267 | v->len = addr ? len : 0; | |
268 | v->update_pending = 1; | |
269 | } | |
270 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
271 | return 0; | |
272 | } | |
273 | ||
2e25aa5f PM |
274 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ |
275 | struct reg_vpa { | |
276 | u32 dummy; | |
277 | union { | |
278 | u16 hword; | |
279 | u32 word; | |
280 | } length; | |
281 | }; | |
282 | ||
283 | static int vpa_is_registered(struct kvmppc_vpa *vpap) | |
284 | { | |
285 | if (vpap->update_pending) | |
286 | return vpap->next_gpa != 0; | |
287 | return vpap->pinned_addr != NULL; | |
288 | } | |
289 | ||
a8606e20 PM |
290 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
291 | unsigned long flags, | |
292 | unsigned long vcpuid, unsigned long vpa) | |
293 | { | |
294 | struct kvm *kvm = vcpu->kvm; | |
93e60249 | 295 | unsigned long len, nb; |
a8606e20 PM |
296 | void *va; |
297 | struct kvm_vcpu *tvcpu; | |
2e25aa5f PM |
298 | int err; |
299 | int subfunc; | |
300 | struct kvmppc_vpa *vpap; | |
a8606e20 PM |
301 | |
302 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | |
303 | if (!tvcpu) | |
304 | return H_PARAMETER; | |
305 | ||
2e25aa5f PM |
306 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
307 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || | |
308 | subfunc == H_VPA_REG_SLB) { | |
309 | /* Registering new area - address must be cache-line aligned */ | |
310 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) | |
a8606e20 | 311 | return H_PARAMETER; |
2e25aa5f PM |
312 | |
313 | /* convert logical addr to kernel addr and read length */ | |
93e60249 PM |
314 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
315 | if (va == NULL) | |
b2b2f165 | 316 | return H_PARAMETER; |
2e25aa5f PM |
317 | if (subfunc == H_VPA_REG_VPA) |
318 | len = ((struct reg_vpa *)va)->length.hword; | |
a8606e20 | 319 | else |
2e25aa5f | 320 | len = ((struct reg_vpa *)va)->length.word; |
c35635ef | 321 | kvmppc_unpin_guest_page(kvm, va, vpa, false); |
2e25aa5f PM |
322 | |
323 | /* Check length */ | |
324 | if (len > nb || len < sizeof(struct reg_vpa)) | |
325 | return H_PARAMETER; | |
326 | } else { | |
327 | vpa = 0; | |
328 | len = 0; | |
329 | } | |
330 | ||
331 | err = H_PARAMETER; | |
332 | vpap = NULL; | |
333 | spin_lock(&tvcpu->arch.vpa_update_lock); | |
334 | ||
335 | switch (subfunc) { | |
336 | case H_VPA_REG_VPA: /* register VPA */ | |
337 | if (len < sizeof(struct lppaca)) | |
a8606e20 | 338 | break; |
2e25aa5f PM |
339 | vpap = &tvcpu->arch.vpa; |
340 | err = 0; | |
341 | break; | |
342 | ||
343 | case H_VPA_REG_DTL: /* register DTL */ | |
344 | if (len < sizeof(struct dtl_entry)) | |
a8606e20 | 345 | break; |
2e25aa5f PM |
346 | len -= len % sizeof(struct dtl_entry); |
347 | ||
348 | /* Check that they have previously registered a VPA */ | |
349 | err = H_RESOURCE; | |
350 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 351 | break; |
2e25aa5f PM |
352 | |
353 | vpap = &tvcpu->arch.dtl; | |
354 | err = 0; | |
355 | break; | |
356 | ||
357 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ | |
358 | /* Check that they have previously registered a VPA */ | |
359 | err = H_RESOURCE; | |
360 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 361 | break; |
2e25aa5f PM |
362 | |
363 | vpap = &tvcpu->arch.slb_shadow; | |
364 | err = 0; | |
365 | break; | |
366 | ||
367 | case H_VPA_DEREG_VPA: /* deregister VPA */ | |
368 | /* Check they don't still have a DTL or SLB buf registered */ | |
369 | err = H_RESOURCE; | |
370 | if (vpa_is_registered(&tvcpu->arch.dtl) || | |
371 | vpa_is_registered(&tvcpu->arch.slb_shadow)) | |
a8606e20 | 372 | break; |
2e25aa5f PM |
373 | |
374 | vpap = &tvcpu->arch.vpa; | |
375 | err = 0; | |
376 | break; | |
377 | ||
378 | case H_VPA_DEREG_DTL: /* deregister DTL */ | |
379 | vpap = &tvcpu->arch.dtl; | |
380 | err = 0; | |
381 | break; | |
382 | ||
383 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ | |
384 | vpap = &tvcpu->arch.slb_shadow; | |
385 | err = 0; | |
386 | break; | |
387 | } | |
388 | ||
389 | if (vpap) { | |
390 | vpap->next_gpa = vpa; | |
391 | vpap->len = len; | |
392 | vpap->update_pending = 1; | |
a8606e20 | 393 | } |
93e60249 | 394 | |
2e25aa5f PM |
395 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
396 | ||
93e60249 | 397 | return err; |
a8606e20 PM |
398 | } |
399 | ||
081f323b | 400 | static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) |
2e25aa5f | 401 | { |
081f323b | 402 | struct kvm *kvm = vcpu->kvm; |
2e25aa5f PM |
403 | void *va; |
404 | unsigned long nb; | |
081f323b | 405 | unsigned long gpa; |
2e25aa5f | 406 | |
081f323b PM |
407 | /* |
408 | * We need to pin the page pointed to by vpap->next_gpa, | |
409 | * but we can't call kvmppc_pin_guest_page under the lock | |
410 | * as it does get_user_pages() and down_read(). So we | |
411 | * have to drop the lock, pin the page, then get the lock | |
412 | * again and check that a new area didn't get registered | |
413 | * in the meantime. | |
414 | */ | |
415 | for (;;) { | |
416 | gpa = vpap->next_gpa; | |
417 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
418 | va = NULL; | |
419 | nb = 0; | |
420 | if (gpa) | |
c35635ef | 421 | va = kvmppc_pin_guest_page(kvm, gpa, &nb); |
081f323b PM |
422 | spin_lock(&vcpu->arch.vpa_update_lock); |
423 | if (gpa == vpap->next_gpa) | |
424 | break; | |
425 | /* sigh... unpin that one and try again */ | |
426 | if (va) | |
c35635ef | 427 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
081f323b PM |
428 | } |
429 | ||
430 | vpap->update_pending = 0; | |
431 | if (va && nb < vpap->len) { | |
432 | /* | |
433 | * If it's now too short, it must be that userspace | |
434 | * has changed the mappings underlying guest memory, | |
435 | * so unregister the region. | |
436 | */ | |
c35635ef | 437 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
081f323b | 438 | va = NULL; |
2e25aa5f PM |
439 | } |
440 | if (vpap->pinned_addr) | |
c35635ef PM |
441 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, |
442 | vpap->dirty); | |
443 | vpap->gpa = gpa; | |
2e25aa5f | 444 | vpap->pinned_addr = va; |
c35635ef | 445 | vpap->dirty = false; |
2e25aa5f PM |
446 | if (va) |
447 | vpap->pinned_end = va + vpap->len; | |
448 | } | |
449 | ||
450 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | |
451 | { | |
2f12f034 PM |
452 | if (!(vcpu->arch.vpa.update_pending || |
453 | vcpu->arch.slb_shadow.update_pending || | |
454 | vcpu->arch.dtl.update_pending)) | |
455 | return; | |
456 | ||
2e25aa5f PM |
457 | spin_lock(&vcpu->arch.vpa_update_lock); |
458 | if (vcpu->arch.vpa.update_pending) { | |
081f323b | 459 | kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); |
55b665b0 PM |
460 | if (vcpu->arch.vpa.pinned_addr) |
461 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | |
2e25aa5f PM |
462 | } |
463 | if (vcpu->arch.dtl.update_pending) { | |
081f323b | 464 | kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); |
2e25aa5f PM |
465 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; |
466 | vcpu->arch.dtl_index = 0; | |
467 | } | |
468 | if (vcpu->arch.slb_shadow.update_pending) | |
081f323b | 469 | kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); |
2e25aa5f PM |
470 | spin_unlock(&vcpu->arch.vpa_update_lock); |
471 | } | |
472 | ||
c7b67670 PM |
473 | /* |
474 | * Return the accumulated stolen time for the vcore up until `now'. | |
475 | * The caller should hold the vcore lock. | |
476 | */ | |
477 | static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |
478 | { | |
479 | u64 p; | |
480 | ||
481 | /* | |
482 | * If we are the task running the vcore, then since we hold | |
483 | * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb | |
484 | * can't be updated, so we don't need the tbacct_lock. | |
485 | * If the vcore is inactive, it can't become active (since we | |
486 | * hold the vcore lock), so the vcpu load/put functions won't | |
487 | * update stolen_tb/preempt_tb, and we don't need tbacct_lock. | |
488 | */ | |
489 | if (vc->vcore_state != VCORE_INACTIVE && | |
490 | vc->runner->arch.run_task != current) { | |
bf3d32e1 | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
c7b67670 PM |
492 | p = vc->stolen_tb; |
493 | if (vc->preempt_tb != TB_NIL) | |
494 | p += now - vc->preempt_tb; | |
bf3d32e1 | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
c7b67670 PM |
496 | } else { |
497 | p = vc->stolen_tb; | |
498 | } | |
499 | return p; | |
500 | } | |
501 | ||
0456ec4f PM |
502 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, |
503 | struct kvmppc_vcore *vc) | |
504 | { | |
505 | struct dtl_entry *dt; | |
506 | struct lppaca *vpa; | |
c7b67670 PM |
507 | unsigned long stolen; |
508 | unsigned long core_stolen; | |
509 | u64 now; | |
0456ec4f PM |
510 | |
511 | dt = vcpu->arch.dtl_ptr; | |
512 | vpa = vcpu->arch.vpa.pinned_addr; | |
c7b67670 PM |
513 | now = mftb(); |
514 | core_stolen = vcore_stolen_time(vc, now); | |
515 | stolen = core_stolen - vcpu->arch.stolen_logged; | |
516 | vcpu->arch.stolen_logged = core_stolen; | |
bf3d32e1 | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
c7b67670 PM |
518 | stolen += vcpu->arch.busy_stolen; |
519 | vcpu->arch.busy_stolen = 0; | |
bf3d32e1 | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
0456ec4f PM |
521 | if (!dt || !vpa) |
522 | return; | |
523 | memset(dt, 0, sizeof(struct dtl_entry)); | |
524 | dt->dispatch_reason = 7; | |
525 | dt->processor_id = vc->pcpu + vcpu->arch.ptid; | |
93b0f4dc | 526 | dt->timebase = now + vc->tb_offset; |
c7b67670 | 527 | dt->enqueue_to_dispatch_time = stolen; |
0456ec4f PM |
528 | dt->srr0 = kvmppc_get_pc(vcpu); |
529 | dt->srr1 = vcpu->arch.shregs.msr; | |
530 | ++dt; | |
531 | if (dt == vcpu->arch.dtl.pinned_end) | |
532 | dt = vcpu->arch.dtl.pinned_addr; | |
533 | vcpu->arch.dtl_ptr = dt; | |
534 | /* order writing *dt vs. writing vpa->dtl_idx */ | |
535 | smp_wmb(); | |
536 | vpa->dtl_idx = ++vcpu->arch.dtl_index; | |
c35635ef | 537 | vcpu->arch.dtl.dirty = true; |
0456ec4f PM |
538 | } |
539 | ||
a8606e20 PM |
540 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
541 | { | |
542 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | |
543 | unsigned long target, ret = H_SUCCESS; | |
544 | struct kvm_vcpu *tvcpu; | |
8e591cb7 | 545 | int idx, rc; |
a8606e20 PM |
546 | |
547 | switch (req) { | |
c77162de | 548 | case H_ENTER: |
2c9097e4 | 549 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
c77162de PM |
550 | ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), |
551 | kvmppc_get_gpr(vcpu, 5), | |
552 | kvmppc_get_gpr(vcpu, 6), | |
553 | kvmppc_get_gpr(vcpu, 7)); | |
2c9097e4 | 554 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
c77162de | 555 | break; |
a8606e20 | 556 | case H_CEDE: |
a8606e20 PM |
557 | break; |
558 | case H_PROD: | |
559 | target = kvmppc_get_gpr(vcpu, 4); | |
560 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | |
561 | if (!tvcpu) { | |
562 | ret = H_PARAMETER; | |
563 | break; | |
564 | } | |
565 | tvcpu->arch.prodded = 1; | |
566 | smp_mb(); | |
567 | if (vcpu->arch.ceded) { | |
568 | if (waitqueue_active(&vcpu->wq)) { | |
569 | wake_up_interruptible(&vcpu->wq); | |
570 | vcpu->stat.halt_wakeup++; | |
571 | } | |
572 | } | |
573 | break; | |
574 | case H_CONFER: | |
42d7604d PM |
575 | target = kvmppc_get_gpr(vcpu, 4); |
576 | if (target == -1) | |
577 | break; | |
578 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | |
579 | if (!tvcpu) { | |
580 | ret = H_PARAMETER; | |
581 | break; | |
582 | } | |
583 | kvm_vcpu_yield_to(tvcpu); | |
a8606e20 PM |
584 | break; |
585 | case H_REGISTER_VPA: | |
586 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | |
587 | kvmppc_get_gpr(vcpu, 5), | |
588 | kvmppc_get_gpr(vcpu, 6)); | |
589 | break; | |
8e591cb7 ME |
590 | case H_RTAS: |
591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | |
592 | return RESUME_HOST; | |
593 | ||
c9438092 | 594 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
8e591cb7 | 595 | rc = kvmppc_rtas_hcall(vcpu); |
c9438092 | 596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
8e591cb7 ME |
597 | |
598 | if (rc == -ENOENT) | |
599 | return RESUME_HOST; | |
600 | else if (rc == 0) | |
601 | break; | |
602 | ||
603 | /* Send the error out to userspace via KVM_RUN */ | |
604 | return rc; | |
bc5ad3f3 BH |
605 | |
606 | case H_XIRR: | |
607 | case H_CPPR: | |
608 | case H_EOI: | |
609 | case H_IPI: | |
8e44ddc3 PM |
610 | case H_IPOLL: |
611 | case H_XIRR_X: | |
bc5ad3f3 BH |
612 | if (kvmppc_xics_enabled(vcpu)) { |
613 | ret = kvmppc_xics_hcall(vcpu, req); | |
614 | break; | |
615 | } /* fallthrough */ | |
a8606e20 PM |
616 | default: |
617 | return RESUME_HOST; | |
618 | } | |
619 | kvmppc_set_gpr(vcpu, 3, ret); | |
620 | vcpu->arch.hcall_needed = 0; | |
621 | return RESUME_GUEST; | |
622 | } | |
623 | ||
3a167bea AK |
624 | static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
625 | struct task_struct *tsk) | |
de56a948 PM |
626 | { |
627 | int r = RESUME_HOST; | |
628 | ||
629 | vcpu->stat.sum_exits++; | |
630 | ||
631 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
632 | run->ready_for_interrupt_injection = 1; | |
633 | switch (vcpu->arch.trap) { | |
634 | /* We're good on these - the host merely wanted to get our attention */ | |
635 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | |
636 | vcpu->stat.dec_exits++; | |
637 | r = RESUME_GUEST; | |
638 | break; | |
639 | case BOOK3S_INTERRUPT_EXTERNAL: | |
640 | vcpu->stat.ext_intr_exits++; | |
641 | r = RESUME_GUEST; | |
642 | break; | |
643 | case BOOK3S_INTERRUPT_PERFMON: | |
644 | r = RESUME_GUEST; | |
645 | break; | |
b4072df4 PM |
646 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
647 | /* | |
648 | * Deliver a machine check interrupt to the guest. | |
649 | * We have to do this, even if the host has handled the | |
650 | * machine check, because machine checks use SRR0/1 and | |
651 | * the interrupt might have trashed guest state in them. | |
652 | */ | |
653 | kvmppc_book3s_queue_irqprio(vcpu, | |
654 | BOOK3S_INTERRUPT_MACHINE_CHECK); | |
655 | r = RESUME_GUEST; | |
656 | break; | |
de56a948 PM |
657 | case BOOK3S_INTERRUPT_PROGRAM: |
658 | { | |
659 | ulong flags; | |
660 | /* | |
661 | * Normally program interrupts are delivered directly | |
662 | * to the guest by the hardware, but we can get here | |
663 | * as a result of a hypervisor emulation interrupt | |
664 | * (e40) getting turned into a 700 by BML RTAS. | |
665 | */ | |
666 | flags = vcpu->arch.shregs.msr & 0x1f0000ull; | |
667 | kvmppc_core_queue_program(vcpu, flags); | |
668 | r = RESUME_GUEST; | |
669 | break; | |
670 | } | |
671 | case BOOK3S_INTERRUPT_SYSCALL: | |
672 | { | |
673 | /* hcall - punt to userspace */ | |
674 | int i; | |
675 | ||
676 | if (vcpu->arch.shregs.msr & MSR_PR) { | |
677 | /* sc 1 from userspace - reflect to guest syscall */ | |
678 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); | |
679 | r = RESUME_GUEST; | |
680 | break; | |
681 | } | |
682 | run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); | |
683 | for (i = 0; i < 9; ++i) | |
684 | run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); | |
685 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
686 | vcpu->arch.hcall_needed = 1; | |
687 | r = RESUME_HOST; | |
688 | break; | |
689 | } | |
690 | /* | |
342d3db7 PM |
691 | * We get these next two if the guest accesses a page which it thinks |
692 | * it has mapped but which is not actually present, either because | |
693 | * it is for an emulated I/O device or because the corresonding | |
694 | * host page has been paged out. Any other HDSI/HISI interrupts | |
695 | * have been handled already. | |
de56a948 PM |
696 | */ |
697 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: | |
913d3ff9 | 698 | r = RESUME_PAGE_FAULT; |
de56a948 PM |
699 | break; |
700 | case BOOK3S_INTERRUPT_H_INST_STORAGE: | |
913d3ff9 PM |
701 | vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); |
702 | vcpu->arch.fault_dsisr = 0; | |
703 | r = RESUME_PAGE_FAULT; | |
de56a948 PM |
704 | break; |
705 | /* | |
706 | * This occurs if the guest executes an illegal instruction. | |
707 | * We just generate a program interrupt to the guest, since | |
708 | * we don't emulate any guest instructions at this stage. | |
709 | */ | |
710 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | |
711 | kvmppc_core_queue_program(vcpu, 0x80000); | |
712 | r = RESUME_GUEST; | |
713 | break; | |
714 | default: | |
715 | kvmppc_dump_regs(vcpu); | |
716 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
717 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
718 | vcpu->arch.shregs.msr); | |
f3271d4c | 719 | run->hw.hardware_exit_reason = vcpu->arch.trap; |
de56a948 | 720 | r = RESUME_HOST; |
de56a948 PM |
721 | break; |
722 | } | |
723 | ||
de56a948 PM |
724 | return r; |
725 | } | |
726 | ||
3a167bea AK |
727 | static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, |
728 | struct kvm_sregs *sregs) | |
de56a948 PM |
729 | { |
730 | int i; | |
731 | ||
de56a948 | 732 | memset(sregs, 0, sizeof(struct kvm_sregs)); |
87916442 | 733 | sregs->pvr = vcpu->arch.pvr; |
de56a948 PM |
734 | for (i = 0; i < vcpu->arch.slb_max; i++) { |
735 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; | |
736 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
737 | } | |
738 | ||
739 | return 0; | |
740 | } | |
741 | ||
3a167bea AK |
742 | static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, |
743 | struct kvm_sregs *sregs) | |
de56a948 PM |
744 | { |
745 | int i, j; | |
746 | ||
3a167bea | 747 | kvmppc_set_pvr_hv(vcpu, sregs->pvr); |
de56a948 PM |
748 | |
749 | j = 0; | |
750 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
751 | if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { | |
752 | vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; | |
753 | vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; | |
754 | ++j; | |
755 | } | |
756 | } | |
757 | vcpu->arch.slb_max = j; | |
758 | ||
759 | return 0; | |
760 | } | |
761 | ||
a0144e2a PM |
762 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
763 | { | |
764 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
765 | u64 mask; | |
766 | ||
767 | spin_lock(&vc->lock); | |
768 | /* | |
769 | * Userspace can only modify DPFD (default prefetch depth), | |
770 | * ILE (interrupt little-endian) and TC (translation control). | |
771 | */ | |
772 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; | |
773 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | |
774 | spin_unlock(&vc->lock); | |
775 | } | |
776 | ||
3a167bea AK |
777 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
778 | union kvmppc_one_reg *val) | |
31f3438e | 779 | { |
a136a8bd PM |
780 | int r = 0; |
781 | long int i; | |
31f3438e | 782 | |
a136a8bd | 783 | switch (id) { |
31f3438e | 784 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
785 | *val = get_reg_val(id, 0); |
786 | break; | |
787 | case KVM_REG_PPC_DABR: | |
788 | *val = get_reg_val(id, vcpu->arch.dabr); | |
789 | break; | |
790 | case KVM_REG_PPC_DSCR: | |
791 | *val = get_reg_val(id, vcpu->arch.dscr); | |
792 | break; | |
793 | case KVM_REG_PPC_PURR: | |
794 | *val = get_reg_val(id, vcpu->arch.purr); | |
795 | break; | |
796 | case KVM_REG_PPC_SPURR: | |
797 | *val = get_reg_val(id, vcpu->arch.spurr); | |
798 | break; | |
799 | case KVM_REG_PPC_AMR: | |
800 | *val = get_reg_val(id, vcpu->arch.amr); | |
801 | break; | |
802 | case KVM_REG_PPC_UAMOR: | |
803 | *val = get_reg_val(id, vcpu->arch.uamor); | |
804 | break; | |
805 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: | |
806 | i = id - KVM_REG_PPC_MMCR0; | |
807 | *val = get_reg_val(id, vcpu->arch.mmcr[i]); | |
808 | break; | |
809 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: | |
810 | i = id - KVM_REG_PPC_PMC1; | |
811 | *val = get_reg_val(id, vcpu->arch.pmc[i]); | |
31f3438e | 812 | break; |
14941789 PM |
813 | case KVM_REG_PPC_SIAR: |
814 | *val = get_reg_val(id, vcpu->arch.siar); | |
815 | break; | |
816 | case KVM_REG_PPC_SDAR: | |
817 | *val = get_reg_val(id, vcpu->arch.sdar); | |
818 | break; | |
a8bd19ef PM |
819 | #ifdef CONFIG_VSX |
820 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: | |
821 | if (cpu_has_feature(CPU_FTR_VSX)) { | |
822 | /* VSX => FP reg i is stored in arch.vsr[2*i] */ | |
823 | long int i = id - KVM_REG_PPC_FPR0; | |
824 | *val = get_reg_val(id, vcpu->arch.vsr[2 * i]); | |
825 | } else { | |
826 | /* let generic code handle it */ | |
827 | r = -EINVAL; | |
828 | } | |
829 | break; | |
830 | case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: | |
831 | if (cpu_has_feature(CPU_FTR_VSX)) { | |
832 | long int i = id - KVM_REG_PPC_VSR0; | |
833 | val->vsxval[0] = vcpu->arch.vsr[2 * i]; | |
834 | val->vsxval[1] = vcpu->arch.vsr[2 * i + 1]; | |
835 | } else { | |
836 | r = -ENXIO; | |
837 | } | |
838 | break; | |
839 | #endif /* CONFIG_VSX */ | |
55b665b0 PM |
840 | case KVM_REG_PPC_VPA_ADDR: |
841 | spin_lock(&vcpu->arch.vpa_update_lock); | |
842 | *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); | |
843 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
844 | break; | |
845 | case KVM_REG_PPC_VPA_SLB: | |
846 | spin_lock(&vcpu->arch.vpa_update_lock); | |
847 | val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; | |
848 | val->vpaval.length = vcpu->arch.slb_shadow.len; | |
849 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
850 | break; | |
851 | case KVM_REG_PPC_VPA_DTL: | |
852 | spin_lock(&vcpu->arch.vpa_update_lock); | |
853 | val->vpaval.addr = vcpu->arch.dtl.next_gpa; | |
854 | val->vpaval.length = vcpu->arch.dtl.len; | |
855 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
856 | break; | |
93b0f4dc PM |
857 | case KVM_REG_PPC_TB_OFFSET: |
858 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); | |
859 | break; | |
a0144e2a PM |
860 | case KVM_REG_PPC_LPCR: |
861 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); | |
862 | break; | |
4b8473c9 PM |
863 | case KVM_REG_PPC_PPR: |
864 | *val = get_reg_val(id, vcpu->arch.ppr); | |
865 | break; | |
388cc6e1 PM |
866 | case KVM_REG_PPC_ARCH_COMPAT: |
867 | *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); | |
868 | break; | |
31f3438e | 869 | default: |
a136a8bd | 870 | r = -EINVAL; |
31f3438e PM |
871 | break; |
872 | } | |
873 | ||
874 | return r; | |
875 | } | |
876 | ||
3a167bea AK |
877 | static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
878 | union kvmppc_one_reg *val) | |
31f3438e | 879 | { |
a136a8bd PM |
880 | int r = 0; |
881 | long int i; | |
55b665b0 | 882 | unsigned long addr, len; |
31f3438e | 883 | |
a136a8bd | 884 | switch (id) { |
31f3438e | 885 | case KVM_REG_PPC_HIOR: |
31f3438e | 886 | /* Only allow this to be set to zero */ |
a136a8bd | 887 | if (set_reg_val(id, *val)) |
31f3438e PM |
888 | r = -EINVAL; |
889 | break; | |
a136a8bd PM |
890 | case KVM_REG_PPC_DABR: |
891 | vcpu->arch.dabr = set_reg_val(id, *val); | |
892 | break; | |
893 | case KVM_REG_PPC_DSCR: | |
894 | vcpu->arch.dscr = set_reg_val(id, *val); | |
895 | break; | |
896 | case KVM_REG_PPC_PURR: | |
897 | vcpu->arch.purr = set_reg_val(id, *val); | |
898 | break; | |
899 | case KVM_REG_PPC_SPURR: | |
900 | vcpu->arch.spurr = set_reg_val(id, *val); | |
901 | break; | |
902 | case KVM_REG_PPC_AMR: | |
903 | vcpu->arch.amr = set_reg_val(id, *val); | |
904 | break; | |
905 | case KVM_REG_PPC_UAMOR: | |
906 | vcpu->arch.uamor = set_reg_val(id, *val); | |
907 | break; | |
908 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: | |
909 | i = id - KVM_REG_PPC_MMCR0; | |
910 | vcpu->arch.mmcr[i] = set_reg_val(id, *val); | |
911 | break; | |
912 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: | |
913 | i = id - KVM_REG_PPC_PMC1; | |
914 | vcpu->arch.pmc[i] = set_reg_val(id, *val); | |
915 | break; | |
14941789 PM |
916 | case KVM_REG_PPC_SIAR: |
917 | vcpu->arch.siar = set_reg_val(id, *val); | |
918 | break; | |
919 | case KVM_REG_PPC_SDAR: | |
920 | vcpu->arch.sdar = set_reg_val(id, *val); | |
921 | break; | |
a8bd19ef PM |
922 | #ifdef CONFIG_VSX |
923 | case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: | |
924 | if (cpu_has_feature(CPU_FTR_VSX)) { | |
925 | /* VSX => FP reg i is stored in arch.vsr[2*i] */ | |
926 | long int i = id - KVM_REG_PPC_FPR0; | |
927 | vcpu->arch.vsr[2 * i] = set_reg_val(id, *val); | |
928 | } else { | |
929 | /* let generic code handle it */ | |
930 | r = -EINVAL; | |
931 | } | |
932 | break; | |
933 | case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: | |
934 | if (cpu_has_feature(CPU_FTR_VSX)) { | |
935 | long int i = id - KVM_REG_PPC_VSR0; | |
936 | vcpu->arch.vsr[2 * i] = val->vsxval[0]; | |
937 | vcpu->arch.vsr[2 * i + 1] = val->vsxval[1]; | |
938 | } else { | |
939 | r = -ENXIO; | |
940 | } | |
941 | break; | |
942 | #endif /* CONFIG_VSX */ | |
55b665b0 PM |
943 | case KVM_REG_PPC_VPA_ADDR: |
944 | addr = set_reg_val(id, *val); | |
945 | r = -EINVAL; | |
946 | if (!addr && (vcpu->arch.slb_shadow.next_gpa || | |
947 | vcpu->arch.dtl.next_gpa)) | |
948 | break; | |
949 | r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); | |
950 | break; | |
951 | case KVM_REG_PPC_VPA_SLB: | |
952 | addr = val->vpaval.addr; | |
953 | len = val->vpaval.length; | |
954 | r = -EINVAL; | |
955 | if (addr && !vcpu->arch.vpa.next_gpa) | |
956 | break; | |
957 | r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); | |
958 | break; | |
959 | case KVM_REG_PPC_VPA_DTL: | |
960 | addr = val->vpaval.addr; | |
961 | len = val->vpaval.length; | |
962 | r = -EINVAL; | |
9f8c8c78 PM |
963 | if (addr && (len < sizeof(struct dtl_entry) || |
964 | !vcpu->arch.vpa.next_gpa)) | |
55b665b0 PM |
965 | break; |
966 | len -= len % sizeof(struct dtl_entry); | |
967 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); | |
968 | break; | |
93b0f4dc PM |
969 | case KVM_REG_PPC_TB_OFFSET: |
970 | /* round up to multiple of 2^24 */ | |
971 | vcpu->arch.vcore->tb_offset = | |
972 | ALIGN(set_reg_val(id, *val), 1UL << 24); | |
973 | break; | |
a0144e2a PM |
974 | case KVM_REG_PPC_LPCR: |
975 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); | |
976 | break; | |
4b8473c9 PM |
977 | case KVM_REG_PPC_PPR: |
978 | vcpu->arch.ppr = set_reg_val(id, *val); | |
979 | break; | |
388cc6e1 PM |
980 | case KVM_REG_PPC_ARCH_COMPAT: |
981 | r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); | |
982 | break; | |
31f3438e | 983 | default: |
a136a8bd | 984 | r = -EINVAL; |
31f3438e PM |
985 | break; |
986 | } | |
987 | ||
988 | return r; | |
989 | } | |
990 | ||
3a167bea AK |
991 | static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
992 | unsigned int id) | |
de56a948 PM |
993 | { |
994 | struct kvm_vcpu *vcpu; | |
371fefd6 PM |
995 | int err = -EINVAL; |
996 | int core; | |
997 | struct kvmppc_vcore *vcore; | |
de56a948 | 998 | |
371fefd6 PM |
999 | core = id / threads_per_core; |
1000 | if (core >= KVM_MAX_VCORES) | |
1001 | goto out; | |
1002 | ||
1003 | err = -ENOMEM; | |
6b75e6bf | 1004 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
de56a948 PM |
1005 | if (!vcpu) |
1006 | goto out; | |
1007 | ||
1008 | err = kvm_vcpu_init(vcpu, kvm, id); | |
1009 | if (err) | |
1010 | goto free_vcpu; | |
1011 | ||
1012 | vcpu->arch.shared = &vcpu->arch.shregs; | |
de56a948 PM |
1013 | vcpu->arch.mmcr[0] = MMCR0_FC; |
1014 | vcpu->arch.ctrl = CTRL_RUNLATCH; | |
1015 | /* default to host PVR, since we can't spoof it */ | |
3a167bea | 1016 | kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); |
2e25aa5f | 1017 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
c7b67670 PM |
1018 | spin_lock_init(&vcpu->arch.tbacct_lock); |
1019 | vcpu->arch.busy_preempt = TB_NIL; | |
de56a948 | 1020 | |
de56a948 PM |
1021 | kvmppc_mmu_book3s_hv_init(vcpu); |
1022 | ||
8455d79e | 1023 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
371fefd6 PM |
1024 | |
1025 | init_waitqueue_head(&vcpu->arch.cpu_run); | |
1026 | ||
1027 | mutex_lock(&kvm->lock); | |
1028 | vcore = kvm->arch.vcores[core]; | |
1029 | if (!vcore) { | |
1030 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); | |
1031 | if (vcore) { | |
1032 | INIT_LIST_HEAD(&vcore->runnable_threads); | |
1033 | spin_lock_init(&vcore->lock); | |
19ccb76a | 1034 | init_waitqueue_head(&vcore->wq); |
c7b67670 | 1035 | vcore->preempt_tb = TB_NIL; |
a0144e2a | 1036 | vcore->lpcr = kvm->arch.lpcr; |
371fefd6 PM |
1037 | } |
1038 | kvm->arch.vcores[core] = vcore; | |
1b400ba0 | 1039 | kvm->arch.online_vcores++; |
371fefd6 PM |
1040 | } |
1041 | mutex_unlock(&kvm->lock); | |
1042 | ||
1043 | if (!vcore) | |
1044 | goto free_vcpu; | |
1045 | ||
1046 | spin_lock(&vcore->lock); | |
1047 | ++vcore->num_threads; | |
371fefd6 PM |
1048 | spin_unlock(&vcore->lock); |
1049 | vcpu->arch.vcore = vcore; | |
1050 | ||
af8f38b3 AG |
1051 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
1052 | kvmppc_sanity_check(vcpu); | |
1053 | ||
de56a948 PM |
1054 | return vcpu; |
1055 | ||
1056 | free_vcpu: | |
6b75e6bf | 1057 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
de56a948 PM |
1058 | out: |
1059 | return ERR_PTR(err); | |
1060 | } | |
1061 | ||
c35635ef PM |
1062 | static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) |
1063 | { | |
1064 | if (vpa->pinned_addr) | |
1065 | kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, | |
1066 | vpa->dirty); | |
1067 | } | |
1068 | ||
3a167bea | 1069 | static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) |
de56a948 | 1070 | { |
2e25aa5f | 1071 | spin_lock(&vcpu->arch.vpa_update_lock); |
c35635ef PM |
1072 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); |
1073 | unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); | |
1074 | unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); | |
2e25aa5f | 1075 | spin_unlock(&vcpu->arch.vpa_update_lock); |
de56a948 | 1076 | kvm_vcpu_uninit(vcpu); |
6b75e6bf | 1077 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
de56a948 PM |
1078 | } |
1079 | ||
3a167bea AK |
1080 | static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) |
1081 | { | |
1082 | /* Indicate we want to get back into the guest */ | |
1083 | return 1; | |
1084 | } | |
1085 | ||
19ccb76a | 1086 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) |
371fefd6 | 1087 | { |
19ccb76a | 1088 | unsigned long dec_nsec, now; |
371fefd6 | 1089 | |
19ccb76a PM |
1090 | now = get_tb(); |
1091 | if (now > vcpu->arch.dec_expires) { | |
1092 | /* decrementer has already gone negative */ | |
1093 | kvmppc_core_queue_dec(vcpu); | |
7e28e60e | 1094 | kvmppc_core_prepare_to_enter(vcpu); |
19ccb76a | 1095 | return; |
371fefd6 | 1096 | } |
19ccb76a PM |
1097 | dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC |
1098 | / tb_ticks_per_sec; | |
1099 | hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), | |
1100 | HRTIMER_MODE_REL); | |
1101 | vcpu->arch.timer_running = 1; | |
371fefd6 PM |
1102 | } |
1103 | ||
19ccb76a | 1104 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu) |
371fefd6 | 1105 | { |
19ccb76a PM |
1106 | vcpu->arch.ceded = 0; |
1107 | if (vcpu->arch.timer_running) { | |
1108 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
1109 | vcpu->arch.timer_running = 0; | |
1110 | } | |
371fefd6 PM |
1111 | } |
1112 | ||
de56a948 PM |
1113 | extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
1114 | ||
371fefd6 PM |
1115 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
1116 | struct kvm_vcpu *vcpu) | |
de56a948 | 1117 | { |
c7b67670 PM |
1118 | u64 now; |
1119 | ||
371fefd6 PM |
1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1121 | return; | |
bf3d32e1 | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
c7b67670 PM |
1123 | now = mftb(); |
1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | |
1125 | vcpu->arch.stolen_logged; | |
1126 | vcpu->arch.busy_preempt = now; | |
1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | |
bf3d32e1 | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
371fefd6 | 1129 | --vc->n_runnable; |
371fefd6 PM |
1130 | list_del(&vcpu->arch.run_list); |
1131 | } | |
1132 | ||
f0888f70 PM |
1133 | static int kvmppc_grab_hwthread(int cpu) |
1134 | { | |
1135 | struct paca_struct *tpaca; | |
1136 | long timeout = 1000; | |
1137 | ||
1138 | tpaca = &paca[cpu]; | |
1139 | ||
1140 | /* Ensure the thread won't go into the kernel if it wakes */ | |
1141 | tpaca->kvm_hstate.hwthread_req = 1; | |
7b444c67 | 1142 | tpaca->kvm_hstate.kvm_vcpu = NULL; |
f0888f70 PM |
1143 | |
1144 | /* | |
1145 | * If the thread is already executing in the kernel (e.g. handling | |
1146 | * a stray interrupt), wait for it to get back to nap mode. | |
1147 | * The smp_mb() is to ensure that our setting of hwthread_req | |
1148 | * is visible before we look at hwthread_state, so if this | |
1149 | * races with the code at system_reset_pSeries and the thread | |
1150 | * misses our setting of hwthread_req, we are sure to see its | |
1151 | * setting of hwthread_state, and vice versa. | |
1152 | */ | |
1153 | smp_mb(); | |
1154 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { | |
1155 | if (--timeout <= 0) { | |
1156 | pr_err("KVM: couldn't grab cpu %d\n", cpu); | |
1157 | return -EBUSY; | |
1158 | } | |
1159 | udelay(1); | |
1160 | } | |
1161 | return 0; | |
1162 | } | |
1163 | ||
1164 | static void kvmppc_release_hwthread(int cpu) | |
1165 | { | |
1166 | struct paca_struct *tpaca; | |
1167 | ||
1168 | tpaca = &paca[cpu]; | |
1169 | tpaca->kvm_hstate.hwthread_req = 0; | |
1170 | tpaca->kvm_hstate.kvm_vcpu = NULL; | |
1171 | } | |
1172 | ||
371fefd6 PM |
1173 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu) |
1174 | { | |
1175 | int cpu; | |
1176 | struct paca_struct *tpaca; | |
1177 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
1178 | ||
19ccb76a PM |
1179 | if (vcpu->arch.timer_running) { |
1180 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
1181 | vcpu->arch.timer_running = 0; | |
1182 | } | |
371fefd6 PM |
1183 | cpu = vc->pcpu + vcpu->arch.ptid; |
1184 | tpaca = &paca[cpu]; | |
1185 | tpaca->kvm_hstate.kvm_vcpu = vcpu; | |
1186 | tpaca->kvm_hstate.kvm_vcore = vc; | |
19ccb76a PM |
1187 | tpaca->kvm_hstate.napping = 0; |
1188 | vcpu->cpu = vc->pcpu; | |
371fefd6 | 1189 | smp_wmb(); |
251da038 | 1190 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) |
371fefd6 | 1191 | if (vcpu->arch.ptid) { |
371fefd6 PM |
1192 | xics_wake_cpu(cpu); |
1193 | ++vc->n_woken; | |
de56a948 | 1194 | } |
371fefd6 PM |
1195 | #endif |
1196 | } | |
de56a948 | 1197 | |
371fefd6 PM |
1198 | static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc) |
1199 | { | |
1200 | int i; | |
1201 | ||
1202 | HMT_low(); | |
1203 | i = 0; | |
1204 | while (vc->nap_count < vc->n_woken) { | |
1205 | if (++i >= 1000000) { | |
1206 | pr_err("kvmppc_wait_for_nap timeout %d %d\n", | |
1207 | vc->nap_count, vc->n_woken); | |
1208 | break; | |
1209 | } | |
1210 | cpu_relax(); | |
1211 | } | |
1212 | HMT_medium(); | |
1213 | } | |
1214 | ||
1215 | /* | |
1216 | * Check that we are on thread 0 and that any other threads in | |
7b444c67 PM |
1217 | * this core are off-line. Then grab the threads so they can't |
1218 | * enter the kernel. | |
371fefd6 PM |
1219 | */ |
1220 | static int on_primary_thread(void) | |
1221 | { | |
1222 | int cpu = smp_processor_id(); | |
1223 | int thr = cpu_thread_in_core(cpu); | |
1224 | ||
1225 | if (thr) | |
1226 | return 0; | |
1227 | while (++thr < threads_per_core) | |
1228 | if (cpu_online(cpu + thr)) | |
1229 | return 0; | |
7b444c67 PM |
1230 | |
1231 | /* Grab all hw threads so they can't go into the kernel */ | |
1232 | for (thr = 1; thr < threads_per_core; ++thr) { | |
1233 | if (kvmppc_grab_hwthread(cpu + thr)) { | |
1234 | /* Couldn't grab one; let the others go */ | |
1235 | do { | |
1236 | kvmppc_release_hwthread(cpu + thr); | |
1237 | } while (--thr > 0); | |
1238 | return 0; | |
1239 | } | |
1240 | } | |
371fefd6 PM |
1241 | return 1; |
1242 | } | |
1243 | ||
1244 | /* | |
1245 | * Run a set of guest threads on a physical core. | |
1246 | * Called with vc->lock held. | |
1247 | */ | |
913d3ff9 | 1248 | static void kvmppc_run_core(struct kvmppc_vcore *vc) |
371fefd6 | 1249 | { |
19ccb76a | 1250 | struct kvm_vcpu *vcpu, *vcpu0, *vnext; |
371fefd6 PM |
1251 | long ret; |
1252 | u64 now; | |
081f323b | 1253 | int ptid, i, need_vpa_update; |
2c9097e4 | 1254 | int srcu_idx; |
913d3ff9 | 1255 | struct kvm_vcpu *vcpus_to_update[threads_per_core]; |
371fefd6 PM |
1256 | |
1257 | /* don't start if any threads have a signal pending */ | |
081f323b PM |
1258 | need_vpa_update = 0; |
1259 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { | |
371fefd6 | 1260 | if (signal_pending(vcpu->arch.run_task)) |
913d3ff9 PM |
1261 | return; |
1262 | if (vcpu->arch.vpa.update_pending || | |
1263 | vcpu->arch.slb_shadow.update_pending || | |
1264 | vcpu->arch.dtl.update_pending) | |
1265 | vcpus_to_update[need_vpa_update++] = vcpu; | |
081f323b PM |
1266 | } |
1267 | ||
1268 | /* | |
1269 | * Initialize *vc, in particular vc->vcore_state, so we can | |
1270 | * drop the vcore lock if necessary. | |
1271 | */ | |
1272 | vc->n_woken = 0; | |
1273 | vc->nap_count = 0; | |
1274 | vc->entry_exit_count = 0; | |
2f12f034 | 1275 | vc->vcore_state = VCORE_STARTING; |
081f323b PM |
1276 | vc->in_guest = 0; |
1277 | vc->napping_threads = 0; | |
1278 | ||
1279 | /* | |
1280 | * Updating any of the vpas requires calling kvmppc_pin_guest_page, | |
1281 | * which can't be called with any spinlocks held. | |
1282 | */ | |
1283 | if (need_vpa_update) { | |
1284 | spin_unlock(&vc->lock); | |
913d3ff9 PM |
1285 | for (i = 0; i < need_vpa_update; ++i) |
1286 | kvmppc_update_vpas(vcpus_to_update[i]); | |
081f323b PM |
1287 | spin_lock(&vc->lock); |
1288 | } | |
de56a948 | 1289 | |
19ccb76a PM |
1290 | /* |
1291 | * Assign physical thread IDs, first to non-ceded vcpus | |
1292 | * and then to ceded ones. | |
1293 | */ | |
1294 | ptid = 0; | |
1295 | vcpu0 = NULL; | |
1296 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { | |
1297 | if (!vcpu->arch.ceded) { | |
1298 | if (!ptid) | |
1299 | vcpu0 = vcpu; | |
1300 | vcpu->arch.ptid = ptid++; | |
1301 | } | |
1302 | } | |
c7b67670 PM |
1303 | if (!vcpu0) |
1304 | goto out; /* nothing to run; should never happen */ | |
19ccb76a PM |
1305 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) |
1306 | if (vcpu->arch.ceded) | |
1307 | vcpu->arch.ptid = ptid++; | |
1308 | ||
7b444c67 PM |
1309 | /* |
1310 | * Make sure we are running on thread 0, and that | |
1311 | * secondary threads are offline. | |
1312 | */ | |
1313 | if (threads_per_core > 1 && !on_primary_thread()) { | |
1314 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | |
1315 | vcpu->arch.ret = -EBUSY; | |
1316 | goto out; | |
1317 | } | |
1318 | ||
371fefd6 | 1319 | vc->pcpu = smp_processor_id(); |
2e25aa5f | 1320 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
371fefd6 | 1321 | kvmppc_start_thread(vcpu); |
0456ec4f | 1322 | kvmppc_create_dtl_entry(vcpu, vc); |
2e25aa5f | 1323 | } |
371fefd6 | 1324 | |
2f12f034 | 1325 | vc->vcore_state = VCORE_RUNNING; |
19ccb76a | 1326 | preempt_disable(); |
371fefd6 | 1327 | spin_unlock(&vc->lock); |
de56a948 | 1328 | |
371fefd6 | 1329 | kvm_guest_enter(); |
2c9097e4 PM |
1330 | |
1331 | srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); | |
1332 | ||
19ccb76a | 1333 | __kvmppc_vcore_entry(NULL, vcpu0); |
de56a948 | 1334 | |
371fefd6 | 1335 | spin_lock(&vc->lock); |
19ccb76a PM |
1336 | /* disable sending of IPIs on virtual external irqs */ |
1337 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) | |
1338 | vcpu->cpu = -1; | |
1339 | /* wait for secondary threads to finish writing their state to memory */ | |
371fefd6 PM |
1340 | if (vc->nap_count < vc->n_woken) |
1341 | kvmppc_wait_for_nap(vc); | |
2f12f034 PM |
1342 | for (i = 0; i < threads_per_core; ++i) |
1343 | kvmppc_release_hwthread(vc->pcpu + i); | |
371fefd6 | 1344 | /* prevent other vcpu threads from doing kvmppc_start_thread() now */ |
19ccb76a | 1345 | vc->vcore_state = VCORE_EXITING; |
371fefd6 PM |
1346 | spin_unlock(&vc->lock); |
1347 | ||
2c9097e4 PM |
1348 | srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); |
1349 | ||
371fefd6 PM |
1350 | /* make sure updates to secondary vcpu structs are visible now */ |
1351 | smp_mb(); | |
de56a948 PM |
1352 | kvm_guest_exit(); |
1353 | ||
1354 | preempt_enable(); | |
1355 | kvm_resched(vcpu); | |
1356 | ||
913d3ff9 | 1357 | spin_lock(&vc->lock); |
de56a948 | 1358 | now = get_tb(); |
371fefd6 PM |
1359 | list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { |
1360 | /* cancel pending dec exception if dec is positive */ | |
1361 | if (now < vcpu->arch.dec_expires && | |
1362 | kvmppc_core_pending_dec(vcpu)) | |
1363 | kvmppc_core_dequeue_dec(vcpu); | |
19ccb76a PM |
1364 | |
1365 | ret = RESUME_GUEST; | |
1366 | if (vcpu->arch.trap) | |
3a167bea AK |
1367 | ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, |
1368 | vcpu->arch.run_task); | |
19ccb76a | 1369 | |
371fefd6 PM |
1370 | vcpu->arch.ret = ret; |
1371 | vcpu->arch.trap = 0; | |
19ccb76a PM |
1372 | |
1373 | if (vcpu->arch.ceded) { | |
1374 | if (ret != RESUME_GUEST) | |
1375 | kvmppc_end_cede(vcpu); | |
1376 | else | |
1377 | kvmppc_set_timer(vcpu); | |
1378 | } | |
371fefd6 | 1379 | } |
de56a948 PM |
1380 | |
1381 | out: | |
19ccb76a | 1382 | vc->vcore_state = VCORE_INACTIVE; |
371fefd6 PM |
1383 | list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, |
1384 | arch.run_list) { | |
1385 | if (vcpu->arch.ret != RESUME_GUEST) { | |
1386 | kvmppc_remove_runnable(vc, vcpu); | |
1387 | wake_up(&vcpu->arch.cpu_run); | |
1388 | } | |
1389 | } | |
371fefd6 PM |
1390 | } |
1391 | ||
19ccb76a PM |
1392 | /* |
1393 | * Wait for some other vcpu thread to execute us, and | |
1394 | * wake us up when we need to handle something in the host. | |
1395 | */ | |
1396 | static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state) | |
371fefd6 | 1397 | { |
371fefd6 PM |
1398 | DEFINE_WAIT(wait); |
1399 | ||
19ccb76a PM |
1400 | prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); |
1401 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) | |
1402 | schedule(); | |
1403 | finish_wait(&vcpu->arch.cpu_run, &wait); | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * All the vcpus in this vcore are idle, so wait for a decrementer | |
1408 | * or external interrupt to one of the vcpus. vc->lock is held. | |
1409 | */ | |
1410 | static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) | |
1411 | { | |
1412 | DEFINE_WAIT(wait); | |
19ccb76a PM |
1413 | |
1414 | prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); | |
1415 | vc->vcore_state = VCORE_SLEEPING; | |
1416 | spin_unlock(&vc->lock); | |
913d3ff9 | 1417 | schedule(); |
19ccb76a PM |
1418 | finish_wait(&vc->wq, &wait); |
1419 | spin_lock(&vc->lock); | |
1420 | vc->vcore_state = VCORE_INACTIVE; | |
1421 | } | |
371fefd6 | 1422 | |
19ccb76a PM |
1423 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
1424 | { | |
1425 | int n_ceded; | |
19ccb76a PM |
1426 | struct kvmppc_vcore *vc; |
1427 | struct kvm_vcpu *v, *vn; | |
9e368f29 | 1428 | |
371fefd6 PM |
1429 | kvm_run->exit_reason = 0; |
1430 | vcpu->arch.ret = RESUME_GUEST; | |
1431 | vcpu->arch.trap = 0; | |
2f12f034 | 1432 | kvmppc_update_vpas(vcpu); |
371fefd6 | 1433 | |
371fefd6 PM |
1434 | /* |
1435 | * Synchronize with other threads in this virtual core | |
1436 | */ | |
1437 | vc = vcpu->arch.vcore; | |
1438 | spin_lock(&vc->lock); | |
19ccb76a | 1439 | vcpu->arch.ceded = 0; |
371fefd6 PM |
1440 | vcpu->arch.run_task = current; |
1441 | vcpu->arch.kvm_run = kvm_run; | |
c7b67670 | 1442 | vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); |
19ccb76a | 1443 | vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; |
c7b67670 | 1444 | vcpu->arch.busy_preempt = TB_NIL; |
371fefd6 PM |
1445 | list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads); |
1446 | ++vc->n_runnable; | |
1447 | ||
19ccb76a PM |
1448 | /* |
1449 | * This happens the first time this is called for a vcpu. | |
1450 | * If the vcore is already running, we may be able to start | |
1451 | * this thread straight away and have it join in. | |
1452 | */ | |
8455d79e | 1453 | if (!signal_pending(current)) { |
19ccb76a PM |
1454 | if (vc->vcore_state == VCORE_RUNNING && |
1455 | VCORE_EXIT_COUNT(vc) == 0) { | |
1456 | vcpu->arch.ptid = vc->n_runnable - 1; | |
2f12f034 | 1457 | kvmppc_create_dtl_entry(vcpu, vc); |
19ccb76a | 1458 | kvmppc_start_thread(vcpu); |
8455d79e PM |
1459 | } else if (vc->vcore_state == VCORE_SLEEPING) { |
1460 | wake_up(&vc->wq); | |
371fefd6 PM |
1461 | } |
1462 | ||
8455d79e | 1463 | } |
371fefd6 | 1464 | |
19ccb76a PM |
1465 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
1466 | !signal_pending(current)) { | |
8455d79e | 1467 | if (vc->vcore_state != VCORE_INACTIVE) { |
19ccb76a PM |
1468 | spin_unlock(&vc->lock); |
1469 | kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE); | |
1470 | spin_lock(&vc->lock); | |
1471 | continue; | |
1472 | } | |
19ccb76a PM |
1473 | list_for_each_entry_safe(v, vn, &vc->runnable_threads, |
1474 | arch.run_list) { | |
7e28e60e | 1475 | kvmppc_core_prepare_to_enter(v); |
19ccb76a PM |
1476 | if (signal_pending(v->arch.run_task)) { |
1477 | kvmppc_remove_runnable(vc, v); | |
1478 | v->stat.signal_exits++; | |
1479 | v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; | |
1480 | v->arch.ret = -EINTR; | |
1481 | wake_up(&v->arch.cpu_run); | |
1482 | } | |
1483 | } | |
8455d79e PM |
1484 | if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1485 | break; | |
1486 | vc->runner = vcpu; | |
1487 | n_ceded = 0; | |
4619ac88 | 1488 | list_for_each_entry(v, &vc->runnable_threads, arch.run_list) { |
8455d79e PM |
1489 | if (!v->arch.pending_exceptions) |
1490 | n_ceded += v->arch.ceded; | |
4619ac88 PM |
1491 | else |
1492 | v->arch.ceded = 0; | |
1493 | } | |
8455d79e PM |
1494 | if (n_ceded == vc->n_runnable) |
1495 | kvmppc_vcore_blocked(vc); | |
1496 | else | |
1497 | kvmppc_run_core(vc); | |
0456ec4f | 1498 | vc->runner = NULL; |
19ccb76a | 1499 | } |
371fefd6 | 1500 | |
8455d79e PM |
1501 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
1502 | (vc->vcore_state == VCORE_RUNNING || | |
1503 | vc->vcore_state == VCORE_EXITING)) { | |
1504 | spin_unlock(&vc->lock); | |
1505 | kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE); | |
1506 | spin_lock(&vc->lock); | |
1507 | } | |
1508 | ||
1509 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { | |
1510 | kvmppc_remove_runnable(vc, vcpu); | |
1511 | vcpu->stat.signal_exits++; | |
1512 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
1513 | vcpu->arch.ret = -EINTR; | |
1514 | } | |
1515 | ||
1516 | if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { | |
1517 | /* Wake up some vcpu to run the core */ | |
1518 | v = list_first_entry(&vc->runnable_threads, | |
1519 | struct kvm_vcpu, arch.run_list); | |
1520 | wake_up(&v->arch.cpu_run); | |
371fefd6 PM |
1521 | } |
1522 | ||
371fefd6 | 1523 | spin_unlock(&vc->lock); |
371fefd6 | 1524 | return vcpu->arch.ret; |
de56a948 PM |
1525 | } |
1526 | ||
3a167bea | 1527 | static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) |
a8606e20 PM |
1528 | { |
1529 | int r; | |
913d3ff9 | 1530 | int srcu_idx; |
a8606e20 | 1531 | |
af8f38b3 AG |
1532 | if (!vcpu->arch.sane) { |
1533 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1534 | return -EINVAL; | |
1535 | } | |
1536 | ||
25051b5a SW |
1537 | kvmppc_core_prepare_to_enter(vcpu); |
1538 | ||
19ccb76a PM |
1539 | /* No need to go into the guest when all we'll do is come back out */ |
1540 | if (signal_pending(current)) { | |
1541 | run->exit_reason = KVM_EXIT_INTR; | |
1542 | return -EINTR; | |
1543 | } | |
1544 | ||
32fad281 PM |
1545 | atomic_inc(&vcpu->kvm->arch.vcpus_running); |
1546 | /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */ | |
1547 | smp_mb(); | |
1548 | ||
1549 | /* On the first time here, set up HTAB and VRMA or RMA */ | |
c77162de | 1550 | if (!vcpu->kvm->arch.rma_setup_done) { |
32fad281 | 1551 | r = kvmppc_hv_setup_htab_rma(vcpu); |
c77162de | 1552 | if (r) |
32fad281 | 1553 | goto out; |
c77162de | 1554 | } |
19ccb76a PM |
1555 | |
1556 | flush_fp_to_thread(current); | |
1557 | flush_altivec_to_thread(current); | |
1558 | flush_vsx_to_thread(current); | |
1559 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; | |
342d3db7 | 1560 | vcpu->arch.pgdir = current->mm->pgd; |
c7b67670 | 1561 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
19ccb76a | 1562 | |
a8606e20 PM |
1563 | do { |
1564 | r = kvmppc_run_vcpu(run, vcpu); | |
1565 | ||
1566 | if (run->exit_reason == KVM_EXIT_PAPR_HCALL && | |
1567 | !(vcpu->arch.shregs.msr & MSR_PR)) { | |
1568 | r = kvmppc_pseries_do_hcall(vcpu); | |
7e28e60e | 1569 | kvmppc_core_prepare_to_enter(vcpu); |
913d3ff9 PM |
1570 | } else if (r == RESUME_PAGE_FAULT) { |
1571 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1572 | r = kvmppc_book3s_hv_page_fault(run, vcpu, | |
1573 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
1574 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | |
a8606e20 PM |
1575 | } |
1576 | } while (r == RESUME_GUEST); | |
32fad281 PM |
1577 | |
1578 | out: | |
c7b67670 | 1579 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
32fad281 | 1580 | atomic_dec(&vcpu->kvm->arch.vcpus_running); |
a8606e20 PM |
1581 | return r; |
1582 | } | |
1583 | ||
54738c09 | 1584 | |
aa04b4cc | 1585 | /* Work out RMLS (real mode limit selector) field value for a given RMA size. |
9e368f29 | 1586 | Assumes POWER7 or PPC970. */ |
aa04b4cc PM |
1587 | static inline int lpcr_rmls(unsigned long rma_size) |
1588 | { | |
1589 | switch (rma_size) { | |
1590 | case 32ul << 20: /* 32 MB */ | |
9e368f29 PM |
1591 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
1592 | return 8; /* only supported on POWER7 */ | |
1593 | return -1; | |
aa04b4cc PM |
1594 | case 64ul << 20: /* 64 MB */ |
1595 | return 3; | |
1596 | case 128ul << 20: /* 128 MB */ | |
1597 | return 7; | |
1598 | case 256ul << 20: /* 256 MB */ | |
1599 | return 4; | |
1600 | case 1ul << 30: /* 1 GB */ | |
1601 | return 2; | |
1602 | case 16ul << 30: /* 16 GB */ | |
1603 | return 1; | |
1604 | case 256ul << 30: /* 256 GB */ | |
1605 | return 0; | |
1606 | default: | |
1607 | return -1; | |
1608 | } | |
1609 | } | |
1610 | ||
1611 | static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
1612 | { | |
aa04b4cc | 1613 | struct page *page; |
6c45b810 | 1614 | struct kvm_rma_info *ri = vma->vm_file->private_data; |
aa04b4cc | 1615 | |
6c45b810 | 1616 | if (vmf->pgoff >= kvm_rma_pages) |
aa04b4cc PM |
1617 | return VM_FAULT_SIGBUS; |
1618 | ||
1619 | page = pfn_to_page(ri->base_pfn + vmf->pgoff); | |
1620 | get_page(page); | |
1621 | vmf->page = page; | |
1622 | return 0; | |
1623 | } | |
1624 | ||
1625 | static const struct vm_operations_struct kvm_rma_vm_ops = { | |
1626 | .fault = kvm_rma_fault, | |
1627 | }; | |
1628 | ||
1629 | static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma) | |
1630 | { | |
314e51b9 | 1631 | vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
aa04b4cc PM |
1632 | vma->vm_ops = &kvm_rma_vm_ops; |
1633 | return 0; | |
1634 | } | |
1635 | ||
1636 | static int kvm_rma_release(struct inode *inode, struct file *filp) | |
1637 | { | |
6c45b810 | 1638 | struct kvm_rma_info *ri = filp->private_data; |
aa04b4cc PM |
1639 | |
1640 | kvm_release_rma(ri); | |
1641 | return 0; | |
1642 | } | |
1643 | ||
75ef9de1 | 1644 | static const struct file_operations kvm_rma_fops = { |
aa04b4cc PM |
1645 | .mmap = kvm_rma_mmap, |
1646 | .release = kvm_rma_release, | |
1647 | }; | |
1648 | ||
3a167bea AK |
1649 | static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, |
1650 | struct kvm_allocate_rma *ret) | |
aa04b4cc | 1651 | { |
aa04b4cc | 1652 | long fd; |
6c45b810 AK |
1653 | struct kvm_rma_info *ri; |
1654 | /* | |
1655 | * Only do this on PPC970 in HV mode | |
1656 | */ | |
1657 | if (!cpu_has_feature(CPU_FTR_HVMODE) || | |
1658 | !cpu_has_feature(CPU_FTR_ARCH_201)) | |
1659 | return -EINVAL; | |
1660 | ||
1661 | if (!kvm_rma_pages) | |
1662 | return -EINVAL; | |
aa04b4cc PM |
1663 | |
1664 | ri = kvm_alloc_rma(); | |
1665 | if (!ri) | |
1666 | return -ENOMEM; | |
1667 | ||
2f84d5ea | 1668 | fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC); |
aa04b4cc PM |
1669 | if (fd < 0) |
1670 | kvm_release_rma(ri); | |
1671 | ||
6c45b810 | 1672 | ret->rma_size = kvm_rma_pages << PAGE_SHIFT; |
aa04b4cc PM |
1673 | return fd; |
1674 | } | |
1675 | ||
5b74716e BH |
1676 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, |
1677 | int linux_psize) | |
1678 | { | |
1679 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; | |
1680 | ||
1681 | if (!def->shift) | |
1682 | return; | |
1683 | (*sps)->page_shift = def->shift; | |
1684 | (*sps)->slb_enc = def->sllp; | |
1685 | (*sps)->enc[0].page_shift = def->shift; | |
b1022fbd AK |
1686 | /* |
1687 | * Only return base page encoding. We don't want to return | |
1688 | * all the supporting pte_enc, because our H_ENTER doesn't | |
1689 | * support MPSS yet. Once they do, we can start passing all | |
1690 | * support pte_enc here | |
1691 | */ | |
1692 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; | |
5b74716e BH |
1693 | (*sps)++; |
1694 | } | |
1695 | ||
3a167bea AK |
1696 | static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, |
1697 | struct kvm_ppc_smmu_info *info) | |
5b74716e BH |
1698 | { |
1699 | struct kvm_ppc_one_seg_page_size *sps; | |
1700 | ||
1701 | info->flags = KVM_PPC_PAGE_SIZES_REAL; | |
1702 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | |
1703 | info->flags |= KVM_PPC_1T_SEGMENTS; | |
1704 | info->slb_size = mmu_slb_size; | |
1705 | ||
1706 | /* We only support these sizes for now, and no muti-size segments */ | |
1707 | sps = &info->sps[0]; | |
1708 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); | |
1709 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); | |
1710 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); | |
1711 | ||
1712 | return 0; | |
1713 | } | |
1714 | ||
82ed3616 PM |
1715 | /* |
1716 | * Get (and clear) the dirty memory log for a memory slot. | |
1717 | */ | |
3a167bea AK |
1718 | static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, |
1719 | struct kvm_dirty_log *log) | |
82ed3616 PM |
1720 | { |
1721 | struct kvm_memory_slot *memslot; | |
1722 | int r; | |
1723 | unsigned long n; | |
1724 | ||
1725 | mutex_lock(&kvm->slots_lock); | |
1726 | ||
1727 | r = -EINVAL; | |
bbacc0c1 | 1728 | if (log->slot >= KVM_USER_MEM_SLOTS) |
82ed3616 PM |
1729 | goto out; |
1730 | ||
1731 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1732 | r = -ENOENT; | |
1733 | if (!memslot->dirty_bitmap) | |
1734 | goto out; | |
1735 | ||
1736 | n = kvm_dirty_bitmap_bytes(memslot); | |
1737 | memset(memslot->dirty_bitmap, 0, n); | |
1738 | ||
dfe49dbd | 1739 | r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap); |
82ed3616 PM |
1740 | if (r) |
1741 | goto out; | |
1742 | ||
1743 | r = -EFAULT; | |
1744 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | |
1745 | goto out; | |
1746 | ||
1747 | r = 0; | |
1748 | out: | |
1749 | mutex_unlock(&kvm->slots_lock); | |
1750 | return r; | |
1751 | } | |
1752 | ||
a66b48c3 | 1753 | static void unpin_slot(struct kvm_memory_slot *memslot) |
de56a948 | 1754 | { |
a66b48c3 PM |
1755 | unsigned long *physp; |
1756 | unsigned long j, npages, pfn; | |
1757 | struct page *page; | |
aa04b4cc | 1758 | |
a66b48c3 PM |
1759 | physp = memslot->arch.slot_phys; |
1760 | npages = memslot->npages; | |
1761 | if (!physp) | |
1762 | return; | |
1763 | for (j = 0; j < npages; j++) { | |
1764 | if (!(physp[j] & KVMPPC_GOT_PAGE)) | |
1765 | continue; | |
1766 | pfn = physp[j] >> PAGE_SHIFT; | |
1767 | page = pfn_to_page(pfn); | |
1768 | SetPageDirty(page); | |
1769 | put_page(page); | |
1770 | } | |
1771 | } | |
1772 | ||
3a167bea AK |
1773 | static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, |
1774 | struct kvm_memory_slot *dont) | |
a66b48c3 PM |
1775 | { |
1776 | if (!dont || free->arch.rmap != dont->arch.rmap) { | |
1777 | vfree(free->arch.rmap); | |
1778 | free->arch.rmap = NULL; | |
b2b2f165 | 1779 | } |
a66b48c3 PM |
1780 | if (!dont || free->arch.slot_phys != dont->arch.slot_phys) { |
1781 | unpin_slot(free); | |
1782 | vfree(free->arch.slot_phys); | |
1783 | free->arch.slot_phys = NULL; | |
1784 | } | |
1785 | } | |
1786 | ||
3a167bea AK |
1787 | static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, |
1788 | unsigned long npages) | |
a66b48c3 PM |
1789 | { |
1790 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); | |
1791 | if (!slot->arch.rmap) | |
1792 | return -ENOMEM; | |
1793 | slot->arch.slot_phys = NULL; | |
aa04b4cc | 1794 | |
c77162de PM |
1795 | return 0; |
1796 | } | |
aa04b4cc | 1797 | |
3a167bea AK |
1798 | static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, |
1799 | struct kvm_memory_slot *memslot, | |
1800 | struct kvm_userspace_memory_region *mem) | |
c77162de | 1801 | { |
a66b48c3 | 1802 | unsigned long *phys; |
c77162de | 1803 | |
a66b48c3 PM |
1804 | /* Allocate a slot_phys array if needed */ |
1805 | phys = memslot->arch.slot_phys; | |
1806 | if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) { | |
1807 | phys = vzalloc(memslot->npages * sizeof(unsigned long)); | |
1808 | if (!phys) | |
1809 | return -ENOMEM; | |
1810 | memslot->arch.slot_phys = phys; | |
aa04b4cc | 1811 | } |
a66b48c3 PM |
1812 | |
1813 | return 0; | |
c77162de PM |
1814 | } |
1815 | ||
3a167bea AK |
1816 | static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, |
1817 | struct kvm_userspace_memory_region *mem, | |
1818 | const struct kvm_memory_slot *old) | |
c77162de | 1819 | { |
dfe49dbd PM |
1820 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; |
1821 | struct kvm_memory_slot *memslot; | |
1822 | ||
8482644a | 1823 | if (npages && old->npages) { |
dfe49dbd PM |
1824 | /* |
1825 | * If modifying a memslot, reset all the rmap dirty bits. | |
1826 | * If this is a new memslot, we don't need to do anything | |
1827 | * since the rmap array starts out as all zeroes, | |
1828 | * i.e. no pages are dirty. | |
1829 | */ | |
1830 | memslot = id_to_memslot(kvm->memslots, mem->slot); | |
1831 | kvmppc_hv_get_dirty_log(kvm, memslot, NULL); | |
1832 | } | |
c77162de PM |
1833 | } |
1834 | ||
a0144e2a PM |
1835 | /* |
1836 | * Update LPCR values in kvm->arch and in vcores. | |
1837 | * Caller must hold kvm->lock. | |
1838 | */ | |
1839 | void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) | |
1840 | { | |
1841 | long int i; | |
1842 | u32 cores_done = 0; | |
1843 | ||
1844 | if ((kvm->arch.lpcr & mask) == lpcr) | |
1845 | return; | |
1846 | ||
1847 | kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; | |
1848 | ||
1849 | for (i = 0; i < KVM_MAX_VCORES; ++i) { | |
1850 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; | |
1851 | if (!vc) | |
1852 | continue; | |
1853 | spin_lock(&vc->lock); | |
1854 | vc->lpcr = (vc->lpcr & ~mask) | lpcr; | |
1855 | spin_unlock(&vc->lock); | |
1856 | if (++cores_done >= kvm->arch.online_vcores) | |
1857 | break; | |
1858 | } | |
1859 | } | |
1860 | ||
3a167bea AK |
1861 | static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) |
1862 | { | |
1863 | return; | |
1864 | } | |
1865 | ||
32fad281 | 1866 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) |
c77162de PM |
1867 | { |
1868 | int err = 0; | |
1869 | struct kvm *kvm = vcpu->kvm; | |
6c45b810 | 1870 | struct kvm_rma_info *ri = NULL; |
c77162de PM |
1871 | unsigned long hva; |
1872 | struct kvm_memory_slot *memslot; | |
1873 | struct vm_area_struct *vma; | |
a0144e2a PM |
1874 | unsigned long lpcr = 0, senc; |
1875 | unsigned long lpcr_mask = 0; | |
c77162de PM |
1876 | unsigned long psize, porder; |
1877 | unsigned long rma_size; | |
1878 | unsigned long rmls; | |
1879 | unsigned long *physp; | |
da9d1d7f | 1880 | unsigned long i, npages; |
2c9097e4 | 1881 | int srcu_idx; |
c77162de PM |
1882 | |
1883 | mutex_lock(&kvm->lock); | |
1884 | if (kvm->arch.rma_setup_done) | |
1885 | goto out; /* another vcpu beat us to it */ | |
aa04b4cc | 1886 | |
32fad281 PM |
1887 | /* Allocate hashed page table (if not done already) and reset it */ |
1888 | if (!kvm->arch.hpt_virt) { | |
1889 | err = kvmppc_alloc_hpt(kvm, NULL); | |
1890 | if (err) { | |
1891 | pr_err("KVM: Couldn't alloc HPT\n"); | |
1892 | goto out; | |
1893 | } | |
1894 | } | |
1895 | ||
c77162de | 1896 | /* Look up the memslot for guest physical address 0 */ |
2c9097e4 | 1897 | srcu_idx = srcu_read_lock(&kvm->srcu); |
c77162de | 1898 | memslot = gfn_to_memslot(kvm, 0); |
aa04b4cc | 1899 | |
c77162de PM |
1900 | /* We must have some memory at 0 by now */ |
1901 | err = -EINVAL; | |
1902 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
2c9097e4 | 1903 | goto out_srcu; |
c77162de PM |
1904 | |
1905 | /* Look up the VMA for the start of this memory slot */ | |
1906 | hva = memslot->userspace_addr; | |
1907 | down_read(¤t->mm->mmap_sem); | |
1908 | vma = find_vma(current->mm, hva); | |
1909 | if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) | |
1910 | goto up_out; | |
1911 | ||
1912 | psize = vma_kernel_pagesize(vma); | |
da9d1d7f | 1913 | porder = __ilog2(psize); |
c77162de PM |
1914 | |
1915 | /* Is this one of our preallocated RMAs? */ | |
1916 | if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops && | |
1917 | hva == vma->vm_start) | |
1918 | ri = vma->vm_file->private_data; | |
1919 | ||
1920 | up_read(¤t->mm->mmap_sem); | |
1921 | ||
1922 | if (!ri) { | |
1923 | /* On POWER7, use VRMA; on PPC970, give up */ | |
1924 | err = -EPERM; | |
1925 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { | |
1926 | pr_err("KVM: CPU requires an RMO\n"); | |
2c9097e4 | 1927 | goto out_srcu; |
c77162de PM |
1928 | } |
1929 | ||
da9d1d7f PM |
1930 | /* We can handle 4k, 64k or 16M pages in the VRMA */ |
1931 | err = -EINVAL; | |
1932 | if (!(psize == 0x1000 || psize == 0x10000 || | |
1933 | psize == 0x1000000)) | |
2c9097e4 | 1934 | goto out_srcu; |
da9d1d7f | 1935 | |
c77162de | 1936 | /* Update VRMASD field in the LPCR */ |
da9d1d7f | 1937 | senc = slb_pgsize_encoding(psize); |
697d3899 PM |
1938 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
1939 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
a0144e2a PM |
1940 | lpcr_mask = LPCR_VRMASD; |
1941 | /* the -4 is to account for senc values starting at 0x10 */ | |
1942 | lpcr = senc << (LPCR_VRMASD_SH - 4); | |
c77162de PM |
1943 | |
1944 | /* Create HPTEs in the hash page table for the VRMA */ | |
da9d1d7f | 1945 | kvmppc_map_vrma(vcpu, memslot, porder); |
c77162de PM |
1946 | |
1947 | } else { | |
1948 | /* Set up to use an RMO region */ | |
6c45b810 | 1949 | rma_size = kvm_rma_pages; |
c77162de PM |
1950 | if (rma_size > memslot->npages) |
1951 | rma_size = memslot->npages; | |
1952 | rma_size <<= PAGE_SHIFT; | |
aa04b4cc | 1953 | rmls = lpcr_rmls(rma_size); |
c77162de | 1954 | err = -EINVAL; |
5d226ae5 | 1955 | if ((long)rmls < 0) { |
c77162de | 1956 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); |
2c9097e4 | 1957 | goto out_srcu; |
aa04b4cc PM |
1958 | } |
1959 | atomic_inc(&ri->use_count); | |
1960 | kvm->arch.rma = ri; | |
9e368f29 PM |
1961 | |
1962 | /* Update LPCR and RMOR */ | |
9e368f29 PM |
1963 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
1964 | /* PPC970; insert RMLS value (split field) in HID4 */ | |
a0144e2a PM |
1965 | lpcr_mask = (1ul << HID4_RMLS0_SH) | |
1966 | (3ul << HID4_RMLS2_SH) | HID4_RMOR; | |
1967 | lpcr = ((rmls >> 2) << HID4_RMLS0_SH) | | |
9e368f29 PM |
1968 | ((rmls & 3) << HID4_RMLS2_SH); |
1969 | /* RMOR is also in HID4 */ | |
1970 | lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff) | |
1971 | << HID4_RMOR_SH; | |
1972 | } else { | |
1973 | /* POWER7 */ | |
a0144e2a PM |
1974 | lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS; |
1975 | lpcr = rmls << LPCR_RMLS_SH; | |
6c45b810 | 1976 | kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT; |
9e368f29 | 1977 | } |
c77162de | 1978 | pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", |
aa04b4cc | 1979 | ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); |
aa04b4cc | 1980 | |
c77162de | 1981 | /* Initialize phys addrs of pages in RMO */ |
6c45b810 | 1982 | npages = kvm_rma_pages; |
da9d1d7f | 1983 | porder = __ilog2(npages); |
a66b48c3 PM |
1984 | physp = memslot->arch.slot_phys; |
1985 | if (physp) { | |
1986 | if (npages > memslot->npages) | |
1987 | npages = memslot->npages; | |
1988 | spin_lock(&kvm->arch.slot_phys_lock); | |
1989 | for (i = 0; i < npages; ++i) | |
1990 | physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + | |
1991 | porder; | |
1992 | spin_unlock(&kvm->arch.slot_phys_lock); | |
1993 | } | |
aa04b4cc PM |
1994 | } |
1995 | ||
a0144e2a PM |
1996 | kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); |
1997 | ||
c77162de PM |
1998 | /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */ |
1999 | smp_wmb(); | |
2000 | kvm->arch.rma_setup_done = 1; | |
2001 | err = 0; | |
2c9097e4 PM |
2002 | out_srcu: |
2003 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
c77162de PM |
2004 | out: |
2005 | mutex_unlock(&kvm->lock); | |
2006 | return err; | |
b2b2f165 | 2007 | |
c77162de PM |
2008 | up_out: |
2009 | up_read(¤t->mm->mmap_sem); | |
505d6421 | 2010 | goto out_srcu; |
de56a948 PM |
2011 | } |
2012 | ||
3a167bea | 2013 | static int kvmppc_core_init_vm_hv(struct kvm *kvm) |
de56a948 | 2014 | { |
32fad281 | 2015 | unsigned long lpcr, lpid; |
de56a948 | 2016 | |
32fad281 PM |
2017 | /* Allocate the guest's logical partition ID */ |
2018 | ||
2019 | lpid = kvmppc_alloc_lpid(); | |
5d226ae5 | 2020 | if ((long)lpid < 0) |
32fad281 PM |
2021 | return -ENOMEM; |
2022 | kvm->arch.lpid = lpid; | |
de56a948 | 2023 | |
1b400ba0 PM |
2024 | /* |
2025 | * Since we don't flush the TLB when tearing down a VM, | |
2026 | * and this lpid might have previously been used, | |
2027 | * make sure we flush on each core before running the new VM. | |
2028 | */ | |
2029 | cpumask_setall(&kvm->arch.need_tlb_flush); | |
2030 | ||
aa04b4cc | 2031 | kvm->arch.rma = NULL; |
aa04b4cc | 2032 | |
9e368f29 | 2033 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); |
aa04b4cc | 2034 | |
9e368f29 PM |
2035 | if (cpu_has_feature(CPU_FTR_ARCH_201)) { |
2036 | /* PPC970; HID4 is effectively the LPCR */ | |
9e368f29 PM |
2037 | kvm->arch.host_lpid = 0; |
2038 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4); | |
2039 | lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH)); | |
2040 | lpcr |= ((lpid >> 4) << HID4_LPID1_SH) | | |
2041 | ((lpid & 0xf) << HID4_LPID5_SH); | |
2042 | } else { | |
2043 | /* POWER7; init LPCR for virtual RMA mode */ | |
2044 | kvm->arch.host_lpid = mfspr(SPRN_LPID); | |
2045 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); | |
2046 | lpcr &= LPCR_PECE | LPCR_LPES; | |
2047 | lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | | |
697d3899 PM |
2048 | LPCR_VPM0 | LPCR_VPM1; |
2049 | kvm->arch.vrma_slb_v = SLB_VSID_B_1T | | |
2050 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
9e368f29 PM |
2051 | } |
2052 | kvm->arch.lpcr = lpcr; | |
aa04b4cc | 2053 | |
342d3db7 | 2054 | kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206); |
c77162de | 2055 | spin_lock_init(&kvm->arch.slot_phys_lock); |
512691d4 PM |
2056 | |
2057 | /* | |
2058 | * Don't allow secondary CPU threads to come online | |
2059 | * while any KVM VMs exist. | |
2060 | */ | |
2061 | inhibit_secondary_onlining(); | |
2062 | ||
54738c09 | 2063 | return 0; |
de56a948 PM |
2064 | } |
2065 | ||
f1378b1c PM |
2066 | static void kvmppc_free_vcores(struct kvm *kvm) |
2067 | { | |
2068 | long int i; | |
2069 | ||
2070 | for (i = 0; i < KVM_MAX_VCORES; ++i) | |
2071 | kfree(kvm->arch.vcores[i]); | |
2072 | kvm->arch.online_vcores = 0; | |
2073 | } | |
2074 | ||
3a167bea | 2075 | static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) |
de56a948 | 2076 | { |
512691d4 PM |
2077 | uninhibit_secondary_onlining(); |
2078 | ||
f1378b1c | 2079 | kvmppc_free_vcores(kvm); |
aa04b4cc PM |
2080 | if (kvm->arch.rma) { |
2081 | kvm_release_rma(kvm->arch.rma); | |
2082 | kvm->arch.rma = NULL; | |
2083 | } | |
2084 | ||
de56a948 PM |
2085 | kvmppc_free_hpt(kvm); |
2086 | } | |
2087 | ||
3a167bea AK |
2088 | /* We don't need to emulate any privileged instructions or dcbz */ |
2089 | static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
2090 | unsigned int inst, int *advance) | |
de56a948 | 2091 | { |
3a167bea | 2092 | return EMULATE_FAIL; |
de56a948 PM |
2093 | } |
2094 | ||
3a167bea AK |
2095 | static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, |
2096 | ulong spr_val) | |
de56a948 PM |
2097 | { |
2098 | return EMULATE_FAIL; | |
2099 | } | |
2100 | ||
3a167bea AK |
2101 | static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, |
2102 | ulong *spr_val) | |
de56a948 PM |
2103 | { |
2104 | return EMULATE_FAIL; | |
2105 | } | |
2106 | ||
3a167bea | 2107 | static int kvmppc_core_check_processor_compat_hv(void) |
de56a948 | 2108 | { |
3a167bea AK |
2109 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
2110 | return -EIO; | |
2111 | return 0; | |
de56a948 PM |
2112 | } |
2113 | ||
3a167bea AK |
2114 | static long kvm_arch_vm_ioctl_hv(struct file *filp, |
2115 | unsigned int ioctl, unsigned long arg) | |
2116 | { | |
2117 | struct kvm *kvm __maybe_unused = filp->private_data; | |
2118 | void __user *argp = (void __user *)arg; | |
2119 | long r; | |
2120 | ||
2121 | switch (ioctl) { | |
2122 | ||
2123 | case KVM_ALLOCATE_RMA: { | |
2124 | struct kvm_allocate_rma rma; | |
2125 | struct kvm *kvm = filp->private_data; | |
2126 | ||
2127 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | |
2128 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | |
2129 | r = -EFAULT; | |
2130 | break; | |
2131 | } | |
2132 | ||
2133 | case KVM_PPC_ALLOCATE_HTAB: { | |
2134 | u32 htab_order; | |
2135 | ||
2136 | r = -EFAULT; | |
2137 | if (get_user(htab_order, (u32 __user *)argp)) | |
2138 | break; | |
2139 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | |
2140 | if (r) | |
2141 | break; | |
2142 | r = -EFAULT; | |
2143 | if (put_user(htab_order, (u32 __user *)argp)) | |
2144 | break; | |
2145 | r = 0; | |
2146 | break; | |
2147 | } | |
2148 | ||
2149 | case KVM_PPC_GET_HTAB_FD: { | |
2150 | struct kvm_get_htab_fd ghf; | |
2151 | ||
2152 | r = -EFAULT; | |
2153 | if (copy_from_user(&ghf, argp, sizeof(ghf))) | |
2154 | break; | |
2155 | r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); | |
2156 | break; | |
2157 | } | |
2158 | ||
2159 | default: | |
2160 | r = -ENOTTY; | |
2161 | } | |
2162 | ||
2163 | return r; | |
2164 | } | |
2165 | ||
cbbc58d4 | 2166 | static struct kvmppc_ops kvm_ops_hv = { |
3a167bea AK |
2167 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, |
2168 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, | |
2169 | .get_one_reg = kvmppc_get_one_reg_hv, | |
2170 | .set_one_reg = kvmppc_set_one_reg_hv, | |
2171 | .vcpu_load = kvmppc_core_vcpu_load_hv, | |
2172 | .vcpu_put = kvmppc_core_vcpu_put_hv, | |
2173 | .set_msr = kvmppc_set_msr_hv, | |
2174 | .vcpu_run = kvmppc_vcpu_run_hv, | |
2175 | .vcpu_create = kvmppc_core_vcpu_create_hv, | |
2176 | .vcpu_free = kvmppc_core_vcpu_free_hv, | |
2177 | .check_requests = kvmppc_core_check_requests_hv, | |
2178 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, | |
2179 | .flush_memslot = kvmppc_core_flush_memslot_hv, | |
2180 | .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, | |
2181 | .commit_memory_region = kvmppc_core_commit_memory_region_hv, | |
2182 | .unmap_hva = kvm_unmap_hva_hv, | |
2183 | .unmap_hva_range = kvm_unmap_hva_range_hv, | |
2184 | .age_hva = kvm_age_hva_hv, | |
2185 | .test_age_hva = kvm_test_age_hva_hv, | |
2186 | .set_spte_hva = kvm_set_spte_hva_hv, | |
2187 | .mmu_destroy = kvmppc_mmu_destroy_hv, | |
2188 | .free_memslot = kvmppc_core_free_memslot_hv, | |
2189 | .create_memslot = kvmppc_core_create_memslot_hv, | |
2190 | .init_vm = kvmppc_core_init_vm_hv, | |
2191 | .destroy_vm = kvmppc_core_destroy_vm_hv, | |
3a167bea AK |
2192 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, |
2193 | .emulate_op = kvmppc_core_emulate_op_hv, | |
2194 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, | |
2195 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, | |
2196 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, | |
2197 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, | |
2198 | }; | |
2199 | ||
2200 | static int kvmppc_book3s_init_hv(void) | |
de56a948 PM |
2201 | { |
2202 | int r; | |
cbbc58d4 AK |
2203 | /* |
2204 | * FIXME!! Do we need to check on all cpus ? | |
2205 | */ | |
2206 | r = kvmppc_core_check_processor_compat_hv(); | |
2207 | if (r < 0) | |
de56a948 PM |
2208 | return r; |
2209 | ||
cbbc58d4 AK |
2210 | kvm_ops_hv.owner = THIS_MODULE; |
2211 | kvmppc_hv_ops = &kvm_ops_hv; | |
de56a948 | 2212 | |
cbbc58d4 | 2213 | r = kvmppc_mmu_hv_init(); |
de56a948 PM |
2214 | return r; |
2215 | } | |
2216 | ||
3a167bea | 2217 | static void kvmppc_book3s_exit_hv(void) |
de56a948 | 2218 | { |
cbbc58d4 | 2219 | kvmppc_hv_ops = NULL; |
de56a948 PM |
2220 | } |
2221 | ||
3a167bea AK |
2222 | module_init(kvmppc_book3s_init_hv); |
2223 | module_exit(kvmppc_book3s_exit_hv); | |
2ba9f0d8 | 2224 | MODULE_LICENSE("GPL"); |