Merge branches 'locking-urgent-for-linus' and 'timers-urgent-for-linus' of git:/...
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_hv.c
CommitLineData
de56a948
PM
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
66b15db6 27#include <linux/export.h>
de56a948
PM
28#include <linux/fs.h>
29#include <linux/anon_inodes.h>
30#include <linux/cpumask.h>
aa04b4cc
PM
31#include <linux/spinlock.h>
32#include <linux/page-flags.h>
2c9097e4 33#include <linux/srcu.h>
398a76c6 34#include <linux/miscdevice.h>
de56a948
PM
35
36#include <asm/reg.h>
37#include <asm/cputable.h>
9678cdaa 38#include <asm/cache.h>
de56a948
PM
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41#include <asm/uaccess.h>
42#include <asm/io.h>
43#include <asm/kvm_ppc.h>
44#include <asm/kvm_book3s.h>
45#include <asm/mmu_context.h>
46#include <asm/lppaca.h>
47#include <asm/processor.h>
371fefd6 48#include <asm/cputhreads.h>
aa04b4cc 49#include <asm/page.h>
de1d9248 50#include <asm/hvcall.h>
ae3a197e 51#include <asm/switch_to.h>
512691d4 52#include <asm/smp.h>
de56a948 53#include <linux/gfp.h>
de56a948
PM
54#include <linux/vmalloc.h>
55#include <linux/highmem.h>
c77162de 56#include <linux/hugetlb.h>
2ba9f0d8 57#include <linux/module.h>
de56a948 58
3a167bea
AK
59#include "book3s.h"
60
de56a948
PM
61/* #define EXIT_DEBUG */
62/* #define EXIT_DEBUG_SIMPLE */
63/* #define EXIT_DEBUG_INT */
64
913d3ff9
PM
65/* Used to indicate that a guest page fault needs to be handled */
66#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
67
c7b67670
PM
68/* Used as a "null" value for timebase values */
69#define TB_NIL (~(u64)0)
70
699a0ea0
PM
71static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
72
9678cdaa
SS
73#if defined(CONFIG_PPC_64K_PAGES)
74#define MPP_BUFFER_ORDER 0
75#elif defined(CONFIG_PPC_4K_PAGES)
76#define MPP_BUFFER_ORDER 3
77#endif
78
79
19ccb76a 80static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
32fad281 81static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
19ccb76a 82
3a167bea 83static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
54695c30
BH
84{
85 int me;
86 int cpu = vcpu->cpu;
87 wait_queue_head_t *wqp;
88
89 wqp = kvm_arch_vcpu_wq(vcpu);
90 if (waitqueue_active(wqp)) {
91 wake_up_interruptible(wqp);
92 ++vcpu->stat.halt_wakeup;
93 }
94
95 me = get_cpu();
96
97 /* CPU points to the first thread of the core */
98 if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
7505258c 99#ifdef CONFIG_PPC_ICP_NATIVE
54695c30
BH
100 int real_cpu = cpu + vcpu->arch.ptid;
101 if (paca[real_cpu].kvm_hstate.xics_phys)
102 xics_wake_cpu(real_cpu);
48eaef05
AS
103 else
104#endif
105 if (cpu_online(cpu))
54695c30
BH
106 smp_send_reschedule(cpu);
107 }
108 put_cpu();
109}
110
c7b67670
PM
111/*
112 * We use the vcpu_load/put functions to measure stolen time.
113 * Stolen time is counted as time when either the vcpu is able to
114 * run as part of a virtual core, but the task running the vcore
115 * is preempted or sleeping, or when the vcpu needs something done
116 * in the kernel by the task running the vcpu, but that task is
117 * preempted or sleeping. Those two things have to be counted
118 * separately, since one of the vcpu tasks will take on the job
119 * of running the core, and the other vcpu tasks in the vcore will
120 * sleep waiting for it to do that, but that sleep shouldn't count
121 * as stolen time.
122 *
123 * Hence we accumulate stolen time when the vcpu can run as part of
124 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
125 * needs its task to do other things in the kernel (for example,
126 * service a page fault) in busy_stolen. We don't accumulate
127 * stolen time for a vcore when it is inactive, or for a vcpu
128 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
129 * a misnomer; it means that the vcpu task is not executing in
130 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
131 * the kernel. We don't have any way of dividing up that time
132 * between time that the vcpu is genuinely stopped, time that
133 * the task is actively working on behalf of the vcpu, and time
134 * that the task is preempted, so we don't count any of it as
135 * stolen.
136 *
137 * Updates to busy_stolen are protected by arch.tbacct_lock;
138 * updates to vc->stolen_tb are protected by the arch.tbacct_lock
139 * of the vcpu that has taken responsibility for running the vcore
140 * (i.e. vc->runner). The stolen times are measured in units of
141 * timebase ticks. (Note that the != TB_NIL checks below are
142 * purely defensive; they should never fail.)
143 */
144
3a167bea 145static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
de56a948 146{
0456ec4f 147 struct kvmppc_vcore *vc = vcpu->arch.vcore;
bf3d32e1 148 unsigned long flags;
0456ec4f 149
bf3d32e1 150 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
c7b67670
PM
151 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
152 vc->preempt_tb != TB_NIL) {
0456ec4f 153 vc->stolen_tb += mftb() - vc->preempt_tb;
c7b67670
PM
154 vc->preempt_tb = TB_NIL;
155 }
156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
157 vcpu->arch.busy_preempt != TB_NIL) {
158 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
159 vcpu->arch.busy_preempt = TB_NIL;
160 }
bf3d32e1 161 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
de56a948
PM
162}
163
3a167bea 164static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
de56a948 165{
0456ec4f 166 struct kvmppc_vcore *vc = vcpu->arch.vcore;
bf3d32e1 167 unsigned long flags;
0456ec4f 168
bf3d32e1 169 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
0456ec4f
PM
170 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
171 vc->preempt_tb = mftb();
c7b67670
PM
172 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
173 vcpu->arch.busy_preempt = mftb();
bf3d32e1 174 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
de56a948
PM
175}
176
3a167bea 177static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
de56a948
PM
178{
179 vcpu->arch.shregs.msr = msr;
19ccb76a 180 kvmppc_end_cede(vcpu);
de56a948
PM
181}
182
3a167bea 183void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
de56a948
PM
184{
185 vcpu->arch.pvr = pvr;
186}
187
388cc6e1
PM
188int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
189{
190 unsigned long pcr = 0;
191 struct kvmppc_vcore *vc = vcpu->arch.vcore;
192
193 if (arch_compat) {
194 if (!cpu_has_feature(CPU_FTR_ARCH_206))
195 return -EINVAL; /* 970 has no compat mode support */
196
197 switch (arch_compat) {
198 case PVR_ARCH_205:
5557ae0e
PM
199 /*
200 * If an arch bit is set in PCR, all the defined
201 * higher-order arch bits also have to be set.
202 */
203 pcr = PCR_ARCH_206 | PCR_ARCH_205;
388cc6e1
PM
204 break;
205 case PVR_ARCH_206:
206 case PVR_ARCH_206p:
5557ae0e
PM
207 pcr = PCR_ARCH_206;
208 break;
209 case PVR_ARCH_207:
388cc6e1
PM
210 break;
211 default:
212 return -EINVAL;
213 }
5557ae0e
PM
214
215 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
216 /* POWER7 can't emulate POWER8 */
217 if (!(pcr & PCR_ARCH_206))
218 return -EINVAL;
219 pcr &= ~PCR_ARCH_206;
220 }
388cc6e1
PM
221 }
222
223 spin_lock(&vc->lock);
224 vc->arch_compat = arch_compat;
225 vc->pcr = pcr;
226 spin_unlock(&vc->lock);
227
228 return 0;
229}
230
de56a948
PM
231void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
232{
233 int r;
234
235 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
236 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
237 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
238 for (r = 0; r < 16; ++r)
239 pr_err("r%2d = %.16lx r%d = %.16lx\n",
240 r, kvmppc_get_gpr(vcpu, r),
241 r+16, kvmppc_get_gpr(vcpu, r+16));
242 pr_err("ctr = %.16lx lr = %.16lx\n",
243 vcpu->arch.ctr, vcpu->arch.lr);
244 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
245 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
246 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
247 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
248 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
249 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
250 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
251 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
252 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
253 pr_err("fault dar = %.16lx dsisr = %.8x\n",
254 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
255 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
256 for (r = 0; r < vcpu->arch.slb_max; ++r)
257 pr_err(" ESID = %.16llx VSID = %.16llx\n",
258 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
259 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
a0144e2a 260 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
de56a948
PM
261 vcpu->arch.last_inst);
262}
263
a8606e20
PM
264struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
265{
266 int r;
267 struct kvm_vcpu *v, *ret = NULL;
268
269 mutex_lock(&kvm->lock);
270 kvm_for_each_vcpu(r, v, kvm) {
271 if (v->vcpu_id == id) {
272 ret = v;
273 break;
274 }
275 }
276 mutex_unlock(&kvm->lock);
277 return ret;
278}
279
280static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
281{
f13c13a0 282 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
02407552 283 vpa->yield_count = cpu_to_be32(1);
a8606e20
PM
284}
285
55b665b0
PM
286static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
287 unsigned long addr, unsigned long len)
288{
289 /* check address is cacheline aligned */
290 if (addr & (L1_CACHE_BYTES - 1))
291 return -EINVAL;
292 spin_lock(&vcpu->arch.vpa_update_lock);
293 if (v->next_gpa != addr || v->len != len) {
294 v->next_gpa = addr;
295 v->len = addr ? len : 0;
296 v->update_pending = 1;
297 }
298 spin_unlock(&vcpu->arch.vpa_update_lock);
299 return 0;
300}
301
2e25aa5f
PM
302/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
303struct reg_vpa {
304 u32 dummy;
305 union {
02407552
AG
306 __be16 hword;
307 __be32 word;
2e25aa5f
PM
308 } length;
309};
310
311static int vpa_is_registered(struct kvmppc_vpa *vpap)
312{
313 if (vpap->update_pending)
314 return vpap->next_gpa != 0;
315 return vpap->pinned_addr != NULL;
316}
317
a8606e20
PM
318static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
319 unsigned long flags,
320 unsigned long vcpuid, unsigned long vpa)
321{
322 struct kvm *kvm = vcpu->kvm;
93e60249 323 unsigned long len, nb;
a8606e20
PM
324 void *va;
325 struct kvm_vcpu *tvcpu;
2e25aa5f
PM
326 int err;
327 int subfunc;
328 struct kvmppc_vpa *vpap;
a8606e20
PM
329
330 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
331 if (!tvcpu)
332 return H_PARAMETER;
333
2e25aa5f
PM
334 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
335 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
336 subfunc == H_VPA_REG_SLB) {
337 /* Registering new area - address must be cache-line aligned */
338 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
a8606e20 339 return H_PARAMETER;
2e25aa5f
PM
340
341 /* convert logical addr to kernel addr and read length */
93e60249
PM
342 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
343 if (va == NULL)
b2b2f165 344 return H_PARAMETER;
2e25aa5f 345 if (subfunc == H_VPA_REG_VPA)
02407552 346 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
a8606e20 347 else
02407552 348 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
c35635ef 349 kvmppc_unpin_guest_page(kvm, va, vpa, false);
2e25aa5f
PM
350
351 /* Check length */
352 if (len > nb || len < sizeof(struct reg_vpa))
353 return H_PARAMETER;
354 } else {
355 vpa = 0;
356 len = 0;
357 }
358
359 err = H_PARAMETER;
360 vpap = NULL;
361 spin_lock(&tvcpu->arch.vpa_update_lock);
362
363 switch (subfunc) {
364 case H_VPA_REG_VPA: /* register VPA */
365 if (len < sizeof(struct lppaca))
a8606e20 366 break;
2e25aa5f
PM
367 vpap = &tvcpu->arch.vpa;
368 err = 0;
369 break;
370
371 case H_VPA_REG_DTL: /* register DTL */
372 if (len < sizeof(struct dtl_entry))
a8606e20 373 break;
2e25aa5f
PM
374 len -= len % sizeof(struct dtl_entry);
375
376 /* Check that they have previously registered a VPA */
377 err = H_RESOURCE;
378 if (!vpa_is_registered(&tvcpu->arch.vpa))
a8606e20 379 break;
2e25aa5f
PM
380
381 vpap = &tvcpu->arch.dtl;
382 err = 0;
383 break;
384
385 case H_VPA_REG_SLB: /* register SLB shadow buffer */
386 /* Check that they have previously registered a VPA */
387 err = H_RESOURCE;
388 if (!vpa_is_registered(&tvcpu->arch.vpa))
a8606e20 389 break;
2e25aa5f
PM
390
391 vpap = &tvcpu->arch.slb_shadow;
392 err = 0;
393 break;
394
395 case H_VPA_DEREG_VPA: /* deregister VPA */
396 /* Check they don't still have a DTL or SLB buf registered */
397 err = H_RESOURCE;
398 if (vpa_is_registered(&tvcpu->arch.dtl) ||
399 vpa_is_registered(&tvcpu->arch.slb_shadow))
a8606e20 400 break;
2e25aa5f
PM
401
402 vpap = &tvcpu->arch.vpa;
403 err = 0;
404 break;
405
406 case H_VPA_DEREG_DTL: /* deregister DTL */
407 vpap = &tvcpu->arch.dtl;
408 err = 0;
409 break;
410
411 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
412 vpap = &tvcpu->arch.slb_shadow;
413 err = 0;
414 break;
415 }
416
417 if (vpap) {
418 vpap->next_gpa = vpa;
419 vpap->len = len;
420 vpap->update_pending = 1;
a8606e20 421 }
93e60249 422
2e25aa5f
PM
423 spin_unlock(&tvcpu->arch.vpa_update_lock);
424
93e60249 425 return err;
a8606e20
PM
426}
427
081f323b 428static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
2e25aa5f 429{
081f323b 430 struct kvm *kvm = vcpu->kvm;
2e25aa5f
PM
431 void *va;
432 unsigned long nb;
081f323b 433 unsigned long gpa;
2e25aa5f 434
081f323b
PM
435 /*
436 * We need to pin the page pointed to by vpap->next_gpa,
437 * but we can't call kvmppc_pin_guest_page under the lock
438 * as it does get_user_pages() and down_read(). So we
439 * have to drop the lock, pin the page, then get the lock
440 * again and check that a new area didn't get registered
441 * in the meantime.
442 */
443 for (;;) {
444 gpa = vpap->next_gpa;
445 spin_unlock(&vcpu->arch.vpa_update_lock);
446 va = NULL;
447 nb = 0;
448 if (gpa)
c35635ef 449 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
081f323b
PM
450 spin_lock(&vcpu->arch.vpa_update_lock);
451 if (gpa == vpap->next_gpa)
452 break;
453 /* sigh... unpin that one and try again */
454 if (va)
c35635ef 455 kvmppc_unpin_guest_page(kvm, va, gpa, false);
081f323b
PM
456 }
457
458 vpap->update_pending = 0;
459 if (va && nb < vpap->len) {
460 /*
461 * If it's now too short, it must be that userspace
462 * has changed the mappings underlying guest memory,
463 * so unregister the region.
464 */
c35635ef 465 kvmppc_unpin_guest_page(kvm, va, gpa, false);
081f323b 466 va = NULL;
2e25aa5f
PM
467 }
468 if (vpap->pinned_addr)
c35635ef
PM
469 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
470 vpap->dirty);
471 vpap->gpa = gpa;
2e25aa5f 472 vpap->pinned_addr = va;
c35635ef 473 vpap->dirty = false;
2e25aa5f
PM
474 if (va)
475 vpap->pinned_end = va + vpap->len;
476}
477
478static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
479{
2f12f034
PM
480 if (!(vcpu->arch.vpa.update_pending ||
481 vcpu->arch.slb_shadow.update_pending ||
482 vcpu->arch.dtl.update_pending))
483 return;
484
2e25aa5f
PM
485 spin_lock(&vcpu->arch.vpa_update_lock);
486 if (vcpu->arch.vpa.update_pending) {
081f323b 487 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
55b665b0
PM
488 if (vcpu->arch.vpa.pinned_addr)
489 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
2e25aa5f
PM
490 }
491 if (vcpu->arch.dtl.update_pending) {
081f323b 492 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
2e25aa5f
PM
493 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
494 vcpu->arch.dtl_index = 0;
495 }
496 if (vcpu->arch.slb_shadow.update_pending)
081f323b 497 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
2e25aa5f
PM
498 spin_unlock(&vcpu->arch.vpa_update_lock);
499}
500
c7b67670
PM
501/*
502 * Return the accumulated stolen time for the vcore up until `now'.
503 * The caller should hold the vcore lock.
504 */
505static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
506{
507 u64 p;
508
509 /*
510 * If we are the task running the vcore, then since we hold
511 * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
512 * can't be updated, so we don't need the tbacct_lock.
513 * If the vcore is inactive, it can't become active (since we
514 * hold the vcore lock), so the vcpu load/put functions won't
515 * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
516 */
517 if (vc->vcore_state != VCORE_INACTIVE &&
518 vc->runner->arch.run_task != current) {
bf3d32e1 519 spin_lock_irq(&vc->runner->arch.tbacct_lock);
c7b67670
PM
520 p = vc->stolen_tb;
521 if (vc->preempt_tb != TB_NIL)
522 p += now - vc->preempt_tb;
bf3d32e1 523 spin_unlock_irq(&vc->runner->arch.tbacct_lock);
c7b67670
PM
524 } else {
525 p = vc->stolen_tb;
526 }
527 return p;
528}
529
0456ec4f
PM
530static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
531 struct kvmppc_vcore *vc)
532{
533 struct dtl_entry *dt;
534 struct lppaca *vpa;
c7b67670
PM
535 unsigned long stolen;
536 unsigned long core_stolen;
537 u64 now;
0456ec4f
PM
538
539 dt = vcpu->arch.dtl_ptr;
540 vpa = vcpu->arch.vpa.pinned_addr;
c7b67670
PM
541 now = mftb();
542 core_stolen = vcore_stolen_time(vc, now);
543 stolen = core_stolen - vcpu->arch.stolen_logged;
544 vcpu->arch.stolen_logged = core_stolen;
bf3d32e1 545 spin_lock_irq(&vcpu->arch.tbacct_lock);
c7b67670
PM
546 stolen += vcpu->arch.busy_stolen;
547 vcpu->arch.busy_stolen = 0;
bf3d32e1 548 spin_unlock_irq(&vcpu->arch.tbacct_lock);
0456ec4f
PM
549 if (!dt || !vpa)
550 return;
551 memset(dt, 0, sizeof(struct dtl_entry));
552 dt->dispatch_reason = 7;
02407552
AG
553 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
554 dt->timebase = cpu_to_be64(now + vc->tb_offset);
555 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
556 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
557 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
0456ec4f
PM
558 ++dt;
559 if (dt == vcpu->arch.dtl.pinned_end)
560 dt = vcpu->arch.dtl.pinned_addr;
561 vcpu->arch.dtl_ptr = dt;
562 /* order writing *dt vs. writing vpa->dtl_idx */
563 smp_wmb();
02407552 564 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
c35635ef 565 vcpu->arch.dtl.dirty = true;
0456ec4f
PM
566}
567
9642382e
MN
568static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
569{
570 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
571 return true;
572 if ((!vcpu->arch.vcore->arch_compat) &&
573 cpu_has_feature(CPU_FTR_ARCH_207S))
574 return true;
575 return false;
576}
577
578static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
579 unsigned long resource, unsigned long value1,
580 unsigned long value2)
581{
582 switch (resource) {
583 case H_SET_MODE_RESOURCE_SET_CIABR:
584 if (!kvmppc_power8_compatible(vcpu))
585 return H_P2;
586 if (value2)
587 return H_P4;
588 if (mflags)
589 return H_UNSUPPORTED_FLAG_START;
590 /* Guests can't breakpoint the hypervisor */
591 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
592 return H_P3;
593 vcpu->arch.ciabr = value1;
594 return H_SUCCESS;
595 case H_SET_MODE_RESOURCE_SET_DAWR:
596 if (!kvmppc_power8_compatible(vcpu))
597 return H_P2;
598 if (mflags)
599 return H_UNSUPPORTED_FLAG_START;
600 if (value2 & DABRX_HYP)
601 return H_P4;
602 vcpu->arch.dawr = value1;
603 vcpu->arch.dawrx = value2;
604 return H_SUCCESS;
605 default:
606 return H_TOO_HARD;
607 }
608}
609
a8606e20
PM
610int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
611{
612 unsigned long req = kvmppc_get_gpr(vcpu, 3);
613 unsigned long target, ret = H_SUCCESS;
614 struct kvm_vcpu *tvcpu;
8e591cb7 615 int idx, rc;
a8606e20 616
699a0ea0
PM
617 if (req <= MAX_HCALL_OPCODE &&
618 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
619 return RESUME_HOST;
620
a8606e20 621 switch (req) {
c77162de 622 case H_ENTER:
2c9097e4 623 idx = srcu_read_lock(&vcpu->kvm->srcu);
c77162de
PM
624 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
625 kvmppc_get_gpr(vcpu, 5),
626 kvmppc_get_gpr(vcpu, 6),
627 kvmppc_get_gpr(vcpu, 7));
2c9097e4 628 srcu_read_unlock(&vcpu->kvm->srcu, idx);
c77162de 629 break;
a8606e20 630 case H_CEDE:
a8606e20
PM
631 break;
632 case H_PROD:
633 target = kvmppc_get_gpr(vcpu, 4);
634 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
635 if (!tvcpu) {
636 ret = H_PARAMETER;
637 break;
638 }
639 tvcpu->arch.prodded = 1;
640 smp_mb();
641 if (vcpu->arch.ceded) {
642 if (waitqueue_active(&vcpu->wq)) {
643 wake_up_interruptible(&vcpu->wq);
644 vcpu->stat.halt_wakeup++;
645 }
646 }
647 break;
648 case H_CONFER:
42d7604d
PM
649 target = kvmppc_get_gpr(vcpu, 4);
650 if (target == -1)
651 break;
652 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
653 if (!tvcpu) {
654 ret = H_PARAMETER;
655 break;
656 }
657 kvm_vcpu_yield_to(tvcpu);
a8606e20
PM
658 break;
659 case H_REGISTER_VPA:
660 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
661 kvmppc_get_gpr(vcpu, 5),
662 kvmppc_get_gpr(vcpu, 6));
663 break;
8e591cb7
ME
664 case H_RTAS:
665 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
666 return RESUME_HOST;
667
c9438092 668 idx = srcu_read_lock(&vcpu->kvm->srcu);
8e591cb7 669 rc = kvmppc_rtas_hcall(vcpu);
c9438092 670 srcu_read_unlock(&vcpu->kvm->srcu, idx);
8e591cb7
ME
671
672 if (rc == -ENOENT)
673 return RESUME_HOST;
674 else if (rc == 0)
675 break;
676
677 /* Send the error out to userspace via KVM_RUN */
678 return rc;
9642382e
MN
679 case H_SET_MODE:
680 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
681 kvmppc_get_gpr(vcpu, 5),
682 kvmppc_get_gpr(vcpu, 6),
683 kvmppc_get_gpr(vcpu, 7));
684 if (ret == H_TOO_HARD)
685 return RESUME_HOST;
686 break;
bc5ad3f3
BH
687 case H_XIRR:
688 case H_CPPR:
689 case H_EOI:
690 case H_IPI:
8e44ddc3
PM
691 case H_IPOLL:
692 case H_XIRR_X:
bc5ad3f3
BH
693 if (kvmppc_xics_enabled(vcpu)) {
694 ret = kvmppc_xics_hcall(vcpu, req);
695 break;
696 } /* fallthrough */
a8606e20
PM
697 default:
698 return RESUME_HOST;
699 }
700 kvmppc_set_gpr(vcpu, 3, ret);
701 vcpu->arch.hcall_needed = 0;
702 return RESUME_GUEST;
703}
704
ae2113a4
PM
705static int kvmppc_hcall_impl_hv(unsigned long cmd)
706{
707 switch (cmd) {
708 case H_CEDE:
709 case H_PROD:
710 case H_CONFER:
711 case H_REGISTER_VPA:
9642382e 712 case H_SET_MODE:
ae2113a4
PM
713#ifdef CONFIG_KVM_XICS
714 case H_XIRR:
715 case H_CPPR:
716 case H_EOI:
717 case H_IPI:
718 case H_IPOLL:
719 case H_XIRR_X:
720#endif
721 return 1;
722 }
723
724 /* See if it's in the real-mode table */
725 return kvmppc_hcall_impl_hv_realmode(cmd);
726}
727
3a167bea
AK
728static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
729 struct task_struct *tsk)
de56a948
PM
730{
731 int r = RESUME_HOST;
732
733 vcpu->stat.sum_exits++;
734
735 run->exit_reason = KVM_EXIT_UNKNOWN;
736 run->ready_for_interrupt_injection = 1;
737 switch (vcpu->arch.trap) {
738 /* We're good on these - the host merely wanted to get our attention */
739 case BOOK3S_INTERRUPT_HV_DECREMENTER:
740 vcpu->stat.dec_exits++;
741 r = RESUME_GUEST;
742 break;
743 case BOOK3S_INTERRUPT_EXTERNAL:
5d00f66b 744 case BOOK3S_INTERRUPT_H_DOORBELL:
de56a948
PM
745 vcpu->stat.ext_intr_exits++;
746 r = RESUME_GUEST;
747 break;
748 case BOOK3S_INTERRUPT_PERFMON:
749 r = RESUME_GUEST;
750 break;
b4072df4
PM
751 case BOOK3S_INTERRUPT_MACHINE_CHECK:
752 /*
753 * Deliver a machine check interrupt to the guest.
754 * We have to do this, even if the host has handled the
755 * machine check, because machine checks use SRR0/1 and
756 * the interrupt might have trashed guest state in them.
757 */
758 kvmppc_book3s_queue_irqprio(vcpu,
759 BOOK3S_INTERRUPT_MACHINE_CHECK);
760 r = RESUME_GUEST;
761 break;
de56a948
PM
762 case BOOK3S_INTERRUPT_PROGRAM:
763 {
764 ulong flags;
765 /*
766 * Normally program interrupts are delivered directly
767 * to the guest by the hardware, but we can get here
768 * as a result of a hypervisor emulation interrupt
769 * (e40) getting turned into a 700 by BML RTAS.
770 */
771 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
772 kvmppc_core_queue_program(vcpu, flags);
773 r = RESUME_GUEST;
774 break;
775 }
776 case BOOK3S_INTERRUPT_SYSCALL:
777 {
778 /* hcall - punt to userspace */
779 int i;
780
27025a60
LPF
781 /* hypercall with MSR_PR has already been handled in rmode,
782 * and never reaches here.
783 */
784
de56a948
PM
785 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
786 for (i = 0; i < 9; ++i)
787 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
788 run->exit_reason = KVM_EXIT_PAPR_HCALL;
789 vcpu->arch.hcall_needed = 1;
790 r = RESUME_HOST;
791 break;
792 }
793 /*
342d3db7
PM
794 * We get these next two if the guest accesses a page which it thinks
795 * it has mapped but which is not actually present, either because
796 * it is for an emulated I/O device or because the corresonding
797 * host page has been paged out. Any other HDSI/HISI interrupts
798 * have been handled already.
de56a948
PM
799 */
800 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
913d3ff9 801 r = RESUME_PAGE_FAULT;
de56a948
PM
802 break;
803 case BOOK3S_INTERRUPT_H_INST_STORAGE:
913d3ff9
PM
804 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
805 vcpu->arch.fault_dsisr = 0;
806 r = RESUME_PAGE_FAULT;
de56a948
PM
807 break;
808 /*
809 * This occurs if the guest executes an illegal instruction.
810 * We just generate a program interrupt to the guest, since
811 * we don't emulate any guest instructions at this stage.
812 */
813 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
bd3048b8
ME
814 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
815 r = RESUME_GUEST;
816 break;
817 /*
818 * This occurs if the guest (kernel or userspace), does something that
819 * is prohibited by HFSCR. We just generate a program interrupt to
820 * the guest.
821 */
822 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
823 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
de56a948
PM
824 r = RESUME_GUEST;
825 break;
826 default:
827 kvmppc_dump_regs(vcpu);
828 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
829 vcpu->arch.trap, kvmppc_get_pc(vcpu),
830 vcpu->arch.shregs.msr);
f3271d4c 831 run->hw.hardware_exit_reason = vcpu->arch.trap;
de56a948 832 r = RESUME_HOST;
de56a948
PM
833 break;
834 }
835
de56a948
PM
836 return r;
837}
838
3a167bea
AK
839static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
840 struct kvm_sregs *sregs)
de56a948
PM
841{
842 int i;
843
de56a948 844 memset(sregs, 0, sizeof(struct kvm_sregs));
87916442 845 sregs->pvr = vcpu->arch.pvr;
de56a948
PM
846 for (i = 0; i < vcpu->arch.slb_max; i++) {
847 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
848 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
849 }
850
851 return 0;
852}
853
3a167bea
AK
854static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
855 struct kvm_sregs *sregs)
de56a948
PM
856{
857 int i, j;
858
3a167bea 859 kvmppc_set_pvr_hv(vcpu, sregs->pvr);
de56a948
PM
860
861 j = 0;
862 for (i = 0; i < vcpu->arch.slb_nr; i++) {
863 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
864 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
865 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
866 ++j;
867 }
868 }
869 vcpu->arch.slb_max = j;
870
871 return 0;
872}
873
a0840240
AK
874static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
875 bool preserve_top32)
a0144e2a
PM
876{
877 struct kvmppc_vcore *vc = vcpu->arch.vcore;
878 u64 mask;
879
880 spin_lock(&vc->lock);
d682916a
AB
881 /*
882 * If ILE (interrupt little-endian) has changed, update the
883 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
884 */
885 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
886 struct kvm *kvm = vcpu->kvm;
887 struct kvm_vcpu *vcpu;
888 int i;
889
890 mutex_lock(&kvm->lock);
891 kvm_for_each_vcpu(i, vcpu, kvm) {
892 if (vcpu->arch.vcore != vc)
893 continue;
894 if (new_lpcr & LPCR_ILE)
895 vcpu->arch.intr_msr |= MSR_LE;
896 else
897 vcpu->arch.intr_msr &= ~MSR_LE;
898 }
899 mutex_unlock(&kvm->lock);
900 }
901
a0144e2a
PM
902 /*
903 * Userspace can only modify DPFD (default prefetch depth),
904 * ILE (interrupt little-endian) and TC (translation control).
e0622bd9 905 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
a0144e2a
PM
906 */
907 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
e0622bd9
PM
908 if (cpu_has_feature(CPU_FTR_ARCH_207S))
909 mask |= LPCR_AIL;
a0840240
AK
910
911 /* Broken 32-bit version of LPCR must not clear top bits */
912 if (preserve_top32)
913 mask &= 0xFFFFFFFF;
a0144e2a
PM
914 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
915 spin_unlock(&vc->lock);
916}
917
3a167bea
AK
918static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
919 union kvmppc_one_reg *val)
31f3438e 920{
a136a8bd
PM
921 int r = 0;
922 long int i;
31f3438e 923
a136a8bd 924 switch (id) {
31f3438e 925 case KVM_REG_PPC_HIOR:
a136a8bd
PM
926 *val = get_reg_val(id, 0);
927 break;
928 case KVM_REG_PPC_DABR:
929 *val = get_reg_val(id, vcpu->arch.dabr);
930 break;
8563bf52
PM
931 case KVM_REG_PPC_DABRX:
932 *val = get_reg_val(id, vcpu->arch.dabrx);
933 break;
a136a8bd
PM
934 case KVM_REG_PPC_DSCR:
935 *val = get_reg_val(id, vcpu->arch.dscr);
936 break;
937 case KVM_REG_PPC_PURR:
938 *val = get_reg_val(id, vcpu->arch.purr);
939 break;
940 case KVM_REG_PPC_SPURR:
941 *val = get_reg_val(id, vcpu->arch.spurr);
942 break;
943 case KVM_REG_PPC_AMR:
944 *val = get_reg_val(id, vcpu->arch.amr);
945 break;
946 case KVM_REG_PPC_UAMOR:
947 *val = get_reg_val(id, vcpu->arch.uamor);
948 break;
b005255e 949 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
a136a8bd
PM
950 i = id - KVM_REG_PPC_MMCR0;
951 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
952 break;
953 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
954 i = id - KVM_REG_PPC_PMC1;
955 *val = get_reg_val(id, vcpu->arch.pmc[i]);
31f3438e 956 break;
b005255e
MN
957 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
958 i = id - KVM_REG_PPC_SPMC1;
959 *val = get_reg_val(id, vcpu->arch.spmc[i]);
960 break;
14941789
PM
961 case KVM_REG_PPC_SIAR:
962 *val = get_reg_val(id, vcpu->arch.siar);
963 break;
964 case KVM_REG_PPC_SDAR:
965 *val = get_reg_val(id, vcpu->arch.sdar);
966 break;
b005255e
MN
967 case KVM_REG_PPC_SIER:
968 *val = get_reg_val(id, vcpu->arch.sier);
a8bd19ef 969 break;
b005255e
MN
970 case KVM_REG_PPC_IAMR:
971 *val = get_reg_val(id, vcpu->arch.iamr);
972 break;
b005255e
MN
973 case KVM_REG_PPC_PSPB:
974 *val = get_reg_val(id, vcpu->arch.pspb);
975 break;
b005255e
MN
976 case KVM_REG_PPC_DPDES:
977 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
978 break;
979 case KVM_REG_PPC_DAWR:
980 *val = get_reg_val(id, vcpu->arch.dawr);
981 break;
982 case KVM_REG_PPC_DAWRX:
983 *val = get_reg_val(id, vcpu->arch.dawrx);
984 break;
985 case KVM_REG_PPC_CIABR:
986 *val = get_reg_val(id, vcpu->arch.ciabr);
987 break;
b005255e
MN
988 case KVM_REG_PPC_CSIGR:
989 *val = get_reg_val(id, vcpu->arch.csigr);
990 break;
991 case KVM_REG_PPC_TACR:
992 *val = get_reg_val(id, vcpu->arch.tacr);
993 break;
994 case KVM_REG_PPC_TCSCR:
995 *val = get_reg_val(id, vcpu->arch.tcscr);
996 break;
997 case KVM_REG_PPC_PID:
998 *val = get_reg_val(id, vcpu->arch.pid);
999 break;
1000 case KVM_REG_PPC_ACOP:
1001 *val = get_reg_val(id, vcpu->arch.acop);
1002 break;
1003 case KVM_REG_PPC_WORT:
1004 *val = get_reg_val(id, vcpu->arch.wort);
a8bd19ef 1005 break;
55b665b0
PM
1006 case KVM_REG_PPC_VPA_ADDR:
1007 spin_lock(&vcpu->arch.vpa_update_lock);
1008 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1009 spin_unlock(&vcpu->arch.vpa_update_lock);
1010 break;
1011 case KVM_REG_PPC_VPA_SLB:
1012 spin_lock(&vcpu->arch.vpa_update_lock);
1013 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1014 val->vpaval.length = vcpu->arch.slb_shadow.len;
1015 spin_unlock(&vcpu->arch.vpa_update_lock);
1016 break;
1017 case KVM_REG_PPC_VPA_DTL:
1018 spin_lock(&vcpu->arch.vpa_update_lock);
1019 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1020 val->vpaval.length = vcpu->arch.dtl.len;
1021 spin_unlock(&vcpu->arch.vpa_update_lock);
1022 break;
93b0f4dc
PM
1023 case KVM_REG_PPC_TB_OFFSET:
1024 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1025 break;
a0144e2a 1026 case KVM_REG_PPC_LPCR:
a0840240 1027 case KVM_REG_PPC_LPCR_64:
a0144e2a
PM
1028 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1029 break;
4b8473c9
PM
1030 case KVM_REG_PPC_PPR:
1031 *val = get_reg_val(id, vcpu->arch.ppr);
1032 break;
a7d80d01
MN
1033#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1034 case KVM_REG_PPC_TFHAR:
1035 *val = get_reg_val(id, vcpu->arch.tfhar);
1036 break;
1037 case KVM_REG_PPC_TFIAR:
1038 *val = get_reg_val(id, vcpu->arch.tfiar);
1039 break;
1040 case KVM_REG_PPC_TEXASR:
1041 *val = get_reg_val(id, vcpu->arch.texasr);
1042 break;
1043 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1044 i = id - KVM_REG_PPC_TM_GPR0;
1045 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1046 break;
1047 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1048 {
1049 int j;
1050 i = id - KVM_REG_PPC_TM_VSR0;
1051 if (i < 32)
1052 for (j = 0; j < TS_FPRWIDTH; j++)
1053 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1054 else {
1055 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1056 val->vval = vcpu->arch.vr_tm.vr[i-32];
1057 else
1058 r = -ENXIO;
1059 }
1060 break;
1061 }
1062 case KVM_REG_PPC_TM_CR:
1063 *val = get_reg_val(id, vcpu->arch.cr_tm);
1064 break;
1065 case KVM_REG_PPC_TM_LR:
1066 *val = get_reg_val(id, vcpu->arch.lr_tm);
1067 break;
1068 case KVM_REG_PPC_TM_CTR:
1069 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1070 break;
1071 case KVM_REG_PPC_TM_FPSCR:
1072 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1073 break;
1074 case KVM_REG_PPC_TM_AMR:
1075 *val = get_reg_val(id, vcpu->arch.amr_tm);
1076 break;
1077 case KVM_REG_PPC_TM_PPR:
1078 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1079 break;
1080 case KVM_REG_PPC_TM_VRSAVE:
1081 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1082 break;
1083 case KVM_REG_PPC_TM_VSCR:
1084 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1085 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1086 else
1087 r = -ENXIO;
1088 break;
1089 case KVM_REG_PPC_TM_DSCR:
1090 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1091 break;
1092 case KVM_REG_PPC_TM_TAR:
1093 *val = get_reg_val(id, vcpu->arch.tar_tm);
1094 break;
1095#endif
388cc6e1
PM
1096 case KVM_REG_PPC_ARCH_COMPAT:
1097 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1098 break;
31f3438e 1099 default:
a136a8bd 1100 r = -EINVAL;
31f3438e
PM
1101 break;
1102 }
1103
1104 return r;
1105}
1106
3a167bea
AK
1107static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1108 union kvmppc_one_reg *val)
31f3438e 1109{
a136a8bd
PM
1110 int r = 0;
1111 long int i;
55b665b0 1112 unsigned long addr, len;
31f3438e 1113
a136a8bd 1114 switch (id) {
31f3438e 1115 case KVM_REG_PPC_HIOR:
31f3438e 1116 /* Only allow this to be set to zero */
a136a8bd 1117 if (set_reg_val(id, *val))
31f3438e
PM
1118 r = -EINVAL;
1119 break;
a136a8bd
PM
1120 case KVM_REG_PPC_DABR:
1121 vcpu->arch.dabr = set_reg_val(id, *val);
1122 break;
8563bf52
PM
1123 case KVM_REG_PPC_DABRX:
1124 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1125 break;
a136a8bd
PM
1126 case KVM_REG_PPC_DSCR:
1127 vcpu->arch.dscr = set_reg_val(id, *val);
1128 break;
1129 case KVM_REG_PPC_PURR:
1130 vcpu->arch.purr = set_reg_val(id, *val);
1131 break;
1132 case KVM_REG_PPC_SPURR:
1133 vcpu->arch.spurr = set_reg_val(id, *val);
1134 break;
1135 case KVM_REG_PPC_AMR:
1136 vcpu->arch.amr = set_reg_val(id, *val);
1137 break;
1138 case KVM_REG_PPC_UAMOR:
1139 vcpu->arch.uamor = set_reg_val(id, *val);
1140 break;
b005255e 1141 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
a136a8bd
PM
1142 i = id - KVM_REG_PPC_MMCR0;
1143 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1144 break;
1145 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1146 i = id - KVM_REG_PPC_PMC1;
1147 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1148 break;
b005255e
MN
1149 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1150 i = id - KVM_REG_PPC_SPMC1;
1151 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1152 break;
14941789
PM
1153 case KVM_REG_PPC_SIAR:
1154 vcpu->arch.siar = set_reg_val(id, *val);
1155 break;
1156 case KVM_REG_PPC_SDAR:
1157 vcpu->arch.sdar = set_reg_val(id, *val);
1158 break;
b005255e
MN
1159 case KVM_REG_PPC_SIER:
1160 vcpu->arch.sier = set_reg_val(id, *val);
a8bd19ef 1161 break;
b005255e
MN
1162 case KVM_REG_PPC_IAMR:
1163 vcpu->arch.iamr = set_reg_val(id, *val);
1164 break;
b005255e
MN
1165 case KVM_REG_PPC_PSPB:
1166 vcpu->arch.pspb = set_reg_val(id, *val);
1167 break;
b005255e
MN
1168 case KVM_REG_PPC_DPDES:
1169 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1170 break;
1171 case KVM_REG_PPC_DAWR:
1172 vcpu->arch.dawr = set_reg_val(id, *val);
1173 break;
1174 case KVM_REG_PPC_DAWRX:
1175 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1176 break;
1177 case KVM_REG_PPC_CIABR:
1178 vcpu->arch.ciabr = set_reg_val(id, *val);
1179 /* Don't allow setting breakpoints in hypervisor code */
1180 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1181 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1182 break;
b005255e
MN
1183 case KVM_REG_PPC_CSIGR:
1184 vcpu->arch.csigr = set_reg_val(id, *val);
1185 break;
1186 case KVM_REG_PPC_TACR:
1187 vcpu->arch.tacr = set_reg_val(id, *val);
1188 break;
1189 case KVM_REG_PPC_TCSCR:
1190 vcpu->arch.tcscr = set_reg_val(id, *val);
1191 break;
1192 case KVM_REG_PPC_PID:
1193 vcpu->arch.pid = set_reg_val(id, *val);
1194 break;
1195 case KVM_REG_PPC_ACOP:
1196 vcpu->arch.acop = set_reg_val(id, *val);
1197 break;
1198 case KVM_REG_PPC_WORT:
1199 vcpu->arch.wort = set_reg_val(id, *val);
a8bd19ef 1200 break;
55b665b0
PM
1201 case KVM_REG_PPC_VPA_ADDR:
1202 addr = set_reg_val(id, *val);
1203 r = -EINVAL;
1204 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1205 vcpu->arch.dtl.next_gpa))
1206 break;
1207 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1208 break;
1209 case KVM_REG_PPC_VPA_SLB:
1210 addr = val->vpaval.addr;
1211 len = val->vpaval.length;
1212 r = -EINVAL;
1213 if (addr && !vcpu->arch.vpa.next_gpa)
1214 break;
1215 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1216 break;
1217 case KVM_REG_PPC_VPA_DTL:
1218 addr = val->vpaval.addr;
1219 len = val->vpaval.length;
1220 r = -EINVAL;
9f8c8c78
PM
1221 if (addr && (len < sizeof(struct dtl_entry) ||
1222 !vcpu->arch.vpa.next_gpa))
55b665b0
PM
1223 break;
1224 len -= len % sizeof(struct dtl_entry);
1225 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1226 break;
93b0f4dc
PM
1227 case KVM_REG_PPC_TB_OFFSET:
1228 /* round up to multiple of 2^24 */
1229 vcpu->arch.vcore->tb_offset =
1230 ALIGN(set_reg_val(id, *val), 1UL << 24);
1231 break;
a0144e2a 1232 case KVM_REG_PPC_LPCR:
a0840240
AK
1233 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1234 break;
1235 case KVM_REG_PPC_LPCR_64:
1236 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
a0144e2a 1237 break;
4b8473c9
PM
1238 case KVM_REG_PPC_PPR:
1239 vcpu->arch.ppr = set_reg_val(id, *val);
1240 break;
a7d80d01
MN
1241#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1242 case KVM_REG_PPC_TFHAR:
1243 vcpu->arch.tfhar = set_reg_val(id, *val);
1244 break;
1245 case KVM_REG_PPC_TFIAR:
1246 vcpu->arch.tfiar = set_reg_val(id, *val);
1247 break;
1248 case KVM_REG_PPC_TEXASR:
1249 vcpu->arch.texasr = set_reg_val(id, *val);
1250 break;
1251 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1252 i = id - KVM_REG_PPC_TM_GPR0;
1253 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1254 break;
1255 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1256 {
1257 int j;
1258 i = id - KVM_REG_PPC_TM_VSR0;
1259 if (i < 32)
1260 for (j = 0; j < TS_FPRWIDTH; j++)
1261 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1262 else
1263 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1264 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1265 else
1266 r = -ENXIO;
1267 break;
1268 }
1269 case KVM_REG_PPC_TM_CR:
1270 vcpu->arch.cr_tm = set_reg_val(id, *val);
1271 break;
1272 case KVM_REG_PPC_TM_LR:
1273 vcpu->arch.lr_tm = set_reg_val(id, *val);
1274 break;
1275 case KVM_REG_PPC_TM_CTR:
1276 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1277 break;
1278 case KVM_REG_PPC_TM_FPSCR:
1279 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1280 break;
1281 case KVM_REG_PPC_TM_AMR:
1282 vcpu->arch.amr_tm = set_reg_val(id, *val);
1283 break;
1284 case KVM_REG_PPC_TM_PPR:
1285 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1286 break;
1287 case KVM_REG_PPC_TM_VRSAVE:
1288 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1289 break;
1290 case KVM_REG_PPC_TM_VSCR:
1291 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1292 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1293 else
1294 r = - ENXIO;
1295 break;
1296 case KVM_REG_PPC_TM_DSCR:
1297 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1298 break;
1299 case KVM_REG_PPC_TM_TAR:
1300 vcpu->arch.tar_tm = set_reg_val(id, *val);
1301 break;
1302#endif
388cc6e1
PM
1303 case KVM_REG_PPC_ARCH_COMPAT:
1304 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1305 break;
31f3438e 1306 default:
a136a8bd 1307 r = -EINVAL;
31f3438e
PM
1308 break;
1309 }
1310
1311 return r;
1312}
1313
de9bdd1a
SS
1314static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1315{
1316 struct kvmppc_vcore *vcore;
1317
1318 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1319
1320 if (vcore == NULL)
1321 return NULL;
1322
1323 INIT_LIST_HEAD(&vcore->runnable_threads);
1324 spin_lock_init(&vcore->lock);
1325 init_waitqueue_head(&vcore->wq);
1326 vcore->preempt_tb = TB_NIL;
1327 vcore->lpcr = kvm->arch.lpcr;
1328 vcore->first_vcpuid = core * threads_per_subcore;
1329 vcore->kvm = kvm;
1330
9678cdaa
SS
1331 vcore->mpp_buffer_is_valid = false;
1332
1333 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1334 vcore->mpp_buffer = (void *)__get_free_pages(
1335 GFP_KERNEL|__GFP_ZERO,
1336 MPP_BUFFER_ORDER);
1337
de9bdd1a
SS
1338 return vcore;
1339}
1340
3a167bea
AK
1341static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1342 unsigned int id)
de56a948
PM
1343{
1344 struct kvm_vcpu *vcpu;
371fefd6
PM
1345 int err = -EINVAL;
1346 int core;
1347 struct kvmppc_vcore *vcore;
de56a948 1348
3102f784 1349 core = id / threads_per_subcore;
371fefd6
PM
1350 if (core >= KVM_MAX_VCORES)
1351 goto out;
1352
1353 err = -ENOMEM;
6b75e6bf 1354 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
de56a948
PM
1355 if (!vcpu)
1356 goto out;
1357
1358 err = kvm_vcpu_init(vcpu, kvm, id);
1359 if (err)
1360 goto free_vcpu;
1361
1362 vcpu->arch.shared = &vcpu->arch.shregs;
5deb8e7a
AG
1363#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1364 /*
1365 * The shared struct is never shared on HV,
1366 * so we can always use host endianness
1367 */
1368#ifdef __BIG_ENDIAN__
1369 vcpu->arch.shared_big_endian = true;
1370#else
1371 vcpu->arch.shared_big_endian = false;
1372#endif
1373#endif
de56a948
PM
1374 vcpu->arch.mmcr[0] = MMCR0_FC;
1375 vcpu->arch.ctrl = CTRL_RUNLATCH;
1376 /* default to host PVR, since we can't spoof it */
3a167bea 1377 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2e25aa5f 1378 spin_lock_init(&vcpu->arch.vpa_update_lock);
c7b67670
PM
1379 spin_lock_init(&vcpu->arch.tbacct_lock);
1380 vcpu->arch.busy_preempt = TB_NIL;
d682916a 1381 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
de56a948 1382
de56a948
PM
1383 kvmppc_mmu_book3s_hv_init(vcpu);
1384
8455d79e 1385 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
371fefd6
PM
1386
1387 init_waitqueue_head(&vcpu->arch.cpu_run);
1388
1389 mutex_lock(&kvm->lock);
1390 vcore = kvm->arch.vcores[core];
1391 if (!vcore) {
de9bdd1a 1392 vcore = kvmppc_vcore_create(kvm, core);
371fefd6 1393 kvm->arch.vcores[core] = vcore;
1b400ba0 1394 kvm->arch.online_vcores++;
371fefd6
PM
1395 }
1396 mutex_unlock(&kvm->lock);
1397
1398 if (!vcore)
1399 goto free_vcpu;
1400
1401 spin_lock(&vcore->lock);
1402 ++vcore->num_threads;
371fefd6
PM
1403 spin_unlock(&vcore->lock);
1404 vcpu->arch.vcore = vcore;
e0b7ec05 1405 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
371fefd6 1406
af8f38b3
AG
1407 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1408 kvmppc_sanity_check(vcpu);
1409
de56a948
PM
1410 return vcpu;
1411
1412free_vcpu:
6b75e6bf 1413 kmem_cache_free(kvm_vcpu_cache, vcpu);
de56a948
PM
1414out:
1415 return ERR_PTR(err);
1416}
1417
c35635ef
PM
1418static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
1419{
1420 if (vpa->pinned_addr)
1421 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
1422 vpa->dirty);
1423}
1424
3a167bea 1425static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
de56a948 1426{
2e25aa5f 1427 spin_lock(&vcpu->arch.vpa_update_lock);
c35635ef
PM
1428 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
1429 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
1430 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2e25aa5f 1431 spin_unlock(&vcpu->arch.vpa_update_lock);
de56a948 1432 kvm_vcpu_uninit(vcpu);
6b75e6bf 1433 kmem_cache_free(kvm_vcpu_cache, vcpu);
de56a948
PM
1434}
1435
3a167bea
AK
1436static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1437{
1438 /* Indicate we want to get back into the guest */
1439 return 1;
1440}
1441
19ccb76a 1442static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
371fefd6 1443{
19ccb76a 1444 unsigned long dec_nsec, now;
371fefd6 1445
19ccb76a
PM
1446 now = get_tb();
1447 if (now > vcpu->arch.dec_expires) {
1448 /* decrementer has already gone negative */
1449 kvmppc_core_queue_dec(vcpu);
7e28e60e 1450 kvmppc_core_prepare_to_enter(vcpu);
19ccb76a 1451 return;
371fefd6 1452 }
19ccb76a
PM
1453 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1454 / tb_ticks_per_sec;
1455 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
1456 HRTIMER_MODE_REL);
1457 vcpu->arch.timer_running = 1;
371fefd6
PM
1458}
1459
19ccb76a 1460static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
371fefd6 1461{
19ccb76a
PM
1462 vcpu->arch.ceded = 0;
1463 if (vcpu->arch.timer_running) {
1464 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1465 vcpu->arch.timer_running = 0;
1466 }
371fefd6
PM
1467}
1468
e0b7ec05 1469extern void __kvmppc_vcore_entry(void);
de56a948 1470
371fefd6
PM
1471static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1472 struct kvm_vcpu *vcpu)
de56a948 1473{
c7b67670
PM
1474 u64 now;
1475
371fefd6
PM
1476 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1477 return;
bf3d32e1 1478 spin_lock_irq(&vcpu->arch.tbacct_lock);
c7b67670
PM
1479 now = mftb();
1480 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1481 vcpu->arch.stolen_logged;
1482 vcpu->arch.busy_preempt = now;
1483 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
bf3d32e1 1484 spin_unlock_irq(&vcpu->arch.tbacct_lock);
371fefd6 1485 --vc->n_runnable;
371fefd6
PM
1486 list_del(&vcpu->arch.run_list);
1487}
1488
f0888f70
PM
1489static int kvmppc_grab_hwthread(int cpu)
1490{
1491 struct paca_struct *tpaca;
1492 long timeout = 1000;
1493
1494 tpaca = &paca[cpu];
1495
1496 /* Ensure the thread won't go into the kernel if it wakes */
1497 tpaca->kvm_hstate.hwthread_req = 1;
7b444c67 1498 tpaca->kvm_hstate.kvm_vcpu = NULL;
f0888f70
PM
1499
1500 /*
1501 * If the thread is already executing in the kernel (e.g. handling
1502 * a stray interrupt), wait for it to get back to nap mode.
1503 * The smp_mb() is to ensure that our setting of hwthread_req
1504 * is visible before we look at hwthread_state, so if this
1505 * races with the code at system_reset_pSeries and the thread
1506 * misses our setting of hwthread_req, we are sure to see its
1507 * setting of hwthread_state, and vice versa.
1508 */
1509 smp_mb();
1510 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1511 if (--timeout <= 0) {
1512 pr_err("KVM: couldn't grab cpu %d\n", cpu);
1513 return -EBUSY;
1514 }
1515 udelay(1);
1516 }
1517 return 0;
1518}
1519
1520static void kvmppc_release_hwthread(int cpu)
1521{
1522 struct paca_struct *tpaca;
1523
1524 tpaca = &paca[cpu];
1525 tpaca->kvm_hstate.hwthread_req = 0;
1526 tpaca->kvm_hstate.kvm_vcpu = NULL;
1527}
1528
371fefd6
PM
1529static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1530{
1531 int cpu;
1532 struct paca_struct *tpaca;
1533 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1534
19ccb76a
PM
1535 if (vcpu->arch.timer_running) {
1536 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1537 vcpu->arch.timer_running = 0;
1538 }
371fefd6
PM
1539 cpu = vc->pcpu + vcpu->arch.ptid;
1540 tpaca = &paca[cpu];
1541 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1542 tpaca->kvm_hstate.kvm_vcore = vc;
e0b7ec05 1543 tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
19ccb76a 1544 vcpu->cpu = vc->pcpu;
371fefd6 1545 smp_wmb();
251da038 1546#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
e0b7ec05 1547 if (cpu != smp_processor_id()) {
371fefd6 1548 xics_wake_cpu(cpu);
e0b7ec05
PM
1549 if (vcpu->arch.ptid)
1550 ++vc->n_woken;
de56a948 1551 }
371fefd6
PM
1552#endif
1553}
de56a948 1554
371fefd6
PM
1555static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
1556{
1557 int i;
1558
1559 HMT_low();
1560 i = 0;
1561 while (vc->nap_count < vc->n_woken) {
1562 if (++i >= 1000000) {
1563 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
1564 vc->nap_count, vc->n_woken);
1565 break;
1566 }
1567 cpu_relax();
1568 }
1569 HMT_medium();
1570}
1571
1572/*
1573 * Check that we are on thread 0 and that any other threads in
7b444c67
PM
1574 * this core are off-line. Then grab the threads so they can't
1575 * enter the kernel.
371fefd6
PM
1576 */
1577static int on_primary_thread(void)
1578{
1579 int cpu = smp_processor_id();
3102f784 1580 int thr;
371fefd6 1581
3102f784
ME
1582 /* Are we on a primary subcore? */
1583 if (cpu_thread_in_subcore(cpu))
371fefd6 1584 return 0;
3102f784
ME
1585
1586 thr = 0;
1587 while (++thr < threads_per_subcore)
371fefd6
PM
1588 if (cpu_online(cpu + thr))
1589 return 0;
7b444c67
PM
1590
1591 /* Grab all hw threads so they can't go into the kernel */
3102f784 1592 for (thr = 1; thr < threads_per_subcore; ++thr) {
7b444c67
PM
1593 if (kvmppc_grab_hwthread(cpu + thr)) {
1594 /* Couldn't grab one; let the others go */
1595 do {
1596 kvmppc_release_hwthread(cpu + thr);
1597 } while (--thr > 0);
1598 return 0;
1599 }
1600 }
371fefd6
PM
1601 return 1;
1602}
1603
9678cdaa
SS
1604static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1605{
1606 phys_addr_t phy_addr, mpp_addr;
1607
1608 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1609 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1610
1611 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1612 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1613
1614 vc->mpp_buffer_is_valid = true;
1615}
1616
1617static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1618{
1619 phys_addr_t phy_addr, mpp_addr;
1620
1621 phy_addr = virt_to_phys(vc->mpp_buffer);
1622 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1623
1624 /* We must abort any in-progress save operations to ensure
1625 * the table is valid so that prefetch engine knows when to
1626 * stop prefetching. */
1627 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1628 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1629}
1630
371fefd6
PM
1631/*
1632 * Run a set of guest threads on a physical core.
1633 * Called with vc->lock held.
1634 */
913d3ff9 1635static void kvmppc_run_core(struct kvmppc_vcore *vc)
371fefd6 1636{
e0b7ec05 1637 struct kvm_vcpu *vcpu, *vnext;
371fefd6
PM
1638 long ret;
1639 u64 now;
e0b7ec05 1640 int i, need_vpa_update;
2c9097e4 1641 int srcu_idx;
913d3ff9 1642 struct kvm_vcpu *vcpus_to_update[threads_per_core];
371fefd6
PM
1643
1644 /* don't start if any threads have a signal pending */
081f323b
PM
1645 need_vpa_update = 0;
1646 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
371fefd6 1647 if (signal_pending(vcpu->arch.run_task))
913d3ff9
PM
1648 return;
1649 if (vcpu->arch.vpa.update_pending ||
1650 vcpu->arch.slb_shadow.update_pending ||
1651 vcpu->arch.dtl.update_pending)
1652 vcpus_to_update[need_vpa_update++] = vcpu;
081f323b
PM
1653 }
1654
1655 /*
1656 * Initialize *vc, in particular vc->vcore_state, so we can
1657 * drop the vcore lock if necessary.
1658 */
1659 vc->n_woken = 0;
1660 vc->nap_count = 0;
1661 vc->entry_exit_count = 0;
2f12f034 1662 vc->vcore_state = VCORE_STARTING;
081f323b
PM
1663 vc->in_guest = 0;
1664 vc->napping_threads = 0;
1665
1666 /*
1667 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
1668 * which can't be called with any spinlocks held.
1669 */
1670 if (need_vpa_update) {
1671 spin_unlock(&vc->lock);
913d3ff9
PM
1672 for (i = 0; i < need_vpa_update; ++i)
1673 kvmppc_update_vpas(vcpus_to_update[i]);
081f323b
PM
1674 spin_lock(&vc->lock);
1675 }
de56a948 1676
7b444c67 1677 /*
3102f784
ME
1678 * Make sure we are running on primary threads, and that secondary
1679 * threads are offline. Also check if the number of threads in this
1680 * guest are greater than the current system threads per guest.
7b444c67 1681 */
3102f784
ME
1682 if ((threads_per_core > 1) &&
1683 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
7b444c67
PM
1684 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1685 vcpu->arch.ret = -EBUSY;
1686 goto out;
1687 }
1688
3102f784 1689
371fefd6 1690 vc->pcpu = smp_processor_id();
2e25aa5f 1691 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
371fefd6 1692 kvmppc_start_thread(vcpu);
0456ec4f 1693 kvmppc_create_dtl_entry(vcpu, vc);
2e25aa5f 1694 }
371fefd6 1695
e0b7ec05
PM
1696 /* Set this explicitly in case thread 0 doesn't have a vcpu */
1697 get_paca()->kvm_hstate.kvm_vcore = vc;
1698 get_paca()->kvm_hstate.ptid = 0;
1699
2f12f034 1700 vc->vcore_state = VCORE_RUNNING;
19ccb76a 1701 preempt_disable();
371fefd6 1702 spin_unlock(&vc->lock);
de56a948 1703
371fefd6 1704 kvm_guest_enter();
2c9097e4 1705
e0b7ec05 1706 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2c9097e4 1707
9678cdaa
SS
1708 if (vc->mpp_buffer_is_valid)
1709 kvmppc_start_restoring_l2_cache(vc);
1710
e0b7ec05 1711 __kvmppc_vcore_entry();
de56a948 1712
371fefd6 1713 spin_lock(&vc->lock);
9678cdaa
SS
1714
1715 if (vc->mpp_buffer)
1716 kvmppc_start_saving_l2_cache(vc);
1717
19ccb76a
PM
1718 /* disable sending of IPIs on virtual external irqs */
1719 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1720 vcpu->cpu = -1;
1721 /* wait for secondary threads to finish writing their state to memory */
371fefd6
PM
1722 if (vc->nap_count < vc->n_woken)
1723 kvmppc_wait_for_nap(vc);
3102f784 1724 for (i = 0; i < threads_per_subcore; ++i)
2f12f034 1725 kvmppc_release_hwthread(vc->pcpu + i);
371fefd6 1726 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
19ccb76a 1727 vc->vcore_state = VCORE_EXITING;
371fefd6
PM
1728 spin_unlock(&vc->lock);
1729
e0b7ec05 1730 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2c9097e4 1731
371fefd6
PM
1732 /* make sure updates to secondary vcpu structs are visible now */
1733 smp_mb();
de56a948
PM
1734 kvm_guest_exit();
1735
1736 preempt_enable();
c08ac06a 1737 cond_resched();
de56a948 1738
913d3ff9 1739 spin_lock(&vc->lock);
de56a948 1740 now = get_tb();
371fefd6
PM
1741 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1742 /* cancel pending dec exception if dec is positive */
1743 if (now < vcpu->arch.dec_expires &&
1744 kvmppc_core_pending_dec(vcpu))
1745 kvmppc_core_dequeue_dec(vcpu);
19ccb76a
PM
1746
1747 ret = RESUME_GUEST;
1748 if (vcpu->arch.trap)
3a167bea
AK
1749 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1750 vcpu->arch.run_task);
19ccb76a 1751
371fefd6
PM
1752 vcpu->arch.ret = ret;
1753 vcpu->arch.trap = 0;
19ccb76a
PM
1754
1755 if (vcpu->arch.ceded) {
e59d24e6 1756 if (!is_kvmppc_resume_guest(ret))
19ccb76a
PM
1757 kvmppc_end_cede(vcpu);
1758 else
1759 kvmppc_set_timer(vcpu);
1760 }
371fefd6 1761 }
de56a948
PM
1762
1763 out:
19ccb76a 1764 vc->vcore_state = VCORE_INACTIVE;
371fefd6
PM
1765 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1766 arch.run_list) {
e59d24e6 1767 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
371fefd6
PM
1768 kvmppc_remove_runnable(vc, vcpu);
1769 wake_up(&vcpu->arch.cpu_run);
1770 }
1771 }
371fefd6
PM
1772}
1773
19ccb76a
PM
1774/*
1775 * Wait for some other vcpu thread to execute us, and
1776 * wake us up when we need to handle something in the host.
1777 */
1778static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
371fefd6 1779{
371fefd6
PM
1780 DEFINE_WAIT(wait);
1781
19ccb76a
PM
1782 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
1783 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
1784 schedule();
1785 finish_wait(&vcpu->arch.cpu_run, &wait);
1786}
1787
1788/*
1789 * All the vcpus in this vcore are idle, so wait for a decrementer
1790 * or external interrupt to one of the vcpus. vc->lock is held.
1791 */
1792static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
1793{
1794 DEFINE_WAIT(wait);
19ccb76a
PM
1795
1796 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
1797 vc->vcore_state = VCORE_SLEEPING;
1798 spin_unlock(&vc->lock);
913d3ff9 1799 schedule();
19ccb76a
PM
1800 finish_wait(&vc->wq, &wait);
1801 spin_lock(&vc->lock);
1802 vc->vcore_state = VCORE_INACTIVE;
1803}
371fefd6 1804
19ccb76a
PM
1805static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1806{
1807 int n_ceded;
19ccb76a
PM
1808 struct kvmppc_vcore *vc;
1809 struct kvm_vcpu *v, *vn;
9e368f29 1810
371fefd6
PM
1811 kvm_run->exit_reason = 0;
1812 vcpu->arch.ret = RESUME_GUEST;
1813 vcpu->arch.trap = 0;
2f12f034 1814 kvmppc_update_vpas(vcpu);
371fefd6 1815
371fefd6
PM
1816 /*
1817 * Synchronize with other threads in this virtual core
1818 */
1819 vc = vcpu->arch.vcore;
1820 spin_lock(&vc->lock);
19ccb76a 1821 vcpu->arch.ceded = 0;
371fefd6
PM
1822 vcpu->arch.run_task = current;
1823 vcpu->arch.kvm_run = kvm_run;
c7b67670 1824 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
19ccb76a 1825 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
c7b67670 1826 vcpu->arch.busy_preempt = TB_NIL;
371fefd6
PM
1827 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
1828 ++vc->n_runnable;
1829
19ccb76a
PM
1830 /*
1831 * This happens the first time this is called for a vcpu.
1832 * If the vcore is already running, we may be able to start
1833 * this thread straight away and have it join in.
1834 */
8455d79e 1835 if (!signal_pending(current)) {
19ccb76a
PM
1836 if (vc->vcore_state == VCORE_RUNNING &&
1837 VCORE_EXIT_COUNT(vc) == 0) {
2f12f034 1838 kvmppc_create_dtl_entry(vcpu, vc);
19ccb76a 1839 kvmppc_start_thread(vcpu);
8455d79e
PM
1840 } else if (vc->vcore_state == VCORE_SLEEPING) {
1841 wake_up(&vc->wq);
371fefd6
PM
1842 }
1843
8455d79e 1844 }
371fefd6 1845
19ccb76a
PM
1846 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
1847 !signal_pending(current)) {
8455d79e 1848 if (vc->vcore_state != VCORE_INACTIVE) {
19ccb76a
PM
1849 spin_unlock(&vc->lock);
1850 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
1851 spin_lock(&vc->lock);
1852 continue;
1853 }
19ccb76a
PM
1854 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
1855 arch.run_list) {
7e28e60e 1856 kvmppc_core_prepare_to_enter(v);
19ccb76a
PM
1857 if (signal_pending(v->arch.run_task)) {
1858 kvmppc_remove_runnable(vc, v);
1859 v->stat.signal_exits++;
1860 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
1861 v->arch.ret = -EINTR;
1862 wake_up(&v->arch.cpu_run);
1863 }
1864 }
8455d79e
PM
1865 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1866 break;
1867 vc->runner = vcpu;
1868 n_ceded = 0;
4619ac88 1869 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
8455d79e
PM
1870 if (!v->arch.pending_exceptions)
1871 n_ceded += v->arch.ceded;
4619ac88
PM
1872 else
1873 v->arch.ceded = 0;
1874 }
8455d79e
PM
1875 if (n_ceded == vc->n_runnable)
1876 kvmppc_vcore_blocked(vc);
1877 else
1878 kvmppc_run_core(vc);
0456ec4f 1879 vc->runner = NULL;
19ccb76a 1880 }
371fefd6 1881
8455d79e
PM
1882 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
1883 (vc->vcore_state == VCORE_RUNNING ||
1884 vc->vcore_state == VCORE_EXITING)) {
1885 spin_unlock(&vc->lock);
1886 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
1887 spin_lock(&vc->lock);
1888 }
1889
1890 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
1891 kvmppc_remove_runnable(vc, vcpu);
1892 vcpu->stat.signal_exits++;
1893 kvm_run->exit_reason = KVM_EXIT_INTR;
1894 vcpu->arch.ret = -EINTR;
1895 }
1896
1897 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
1898 /* Wake up some vcpu to run the core */
1899 v = list_first_entry(&vc->runnable_threads,
1900 struct kvm_vcpu, arch.run_list);
1901 wake_up(&v->arch.cpu_run);
371fefd6
PM
1902 }
1903
371fefd6 1904 spin_unlock(&vc->lock);
371fefd6 1905 return vcpu->arch.ret;
de56a948
PM
1906}
1907
3a167bea 1908static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
a8606e20
PM
1909{
1910 int r;
913d3ff9 1911 int srcu_idx;
a8606e20 1912
af8f38b3
AG
1913 if (!vcpu->arch.sane) {
1914 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1915 return -EINVAL;
1916 }
1917
25051b5a
SW
1918 kvmppc_core_prepare_to_enter(vcpu);
1919
19ccb76a
PM
1920 /* No need to go into the guest when all we'll do is come back out */
1921 if (signal_pending(current)) {
1922 run->exit_reason = KVM_EXIT_INTR;
1923 return -EINTR;
1924 }
1925
32fad281
PM
1926 atomic_inc(&vcpu->kvm->arch.vcpus_running);
1927 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
1928 smp_mb();
1929
1930 /* On the first time here, set up HTAB and VRMA or RMA */
c77162de 1931 if (!vcpu->kvm->arch.rma_setup_done) {
32fad281 1932 r = kvmppc_hv_setup_htab_rma(vcpu);
c77162de 1933 if (r)
32fad281 1934 goto out;
c77162de 1935 }
19ccb76a
PM
1936
1937 flush_fp_to_thread(current);
1938 flush_altivec_to_thread(current);
1939 flush_vsx_to_thread(current);
1940 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
342d3db7 1941 vcpu->arch.pgdir = current->mm->pgd;
c7b67670 1942 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
19ccb76a 1943
a8606e20
PM
1944 do {
1945 r = kvmppc_run_vcpu(run, vcpu);
1946
1947 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
1948 !(vcpu->arch.shregs.msr & MSR_PR)) {
1949 r = kvmppc_pseries_do_hcall(vcpu);
7e28e60e 1950 kvmppc_core_prepare_to_enter(vcpu);
913d3ff9
PM
1951 } else if (r == RESUME_PAGE_FAULT) {
1952 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1953 r = kvmppc_book3s_hv_page_fault(run, vcpu,
1954 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
1955 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
a8606e20 1956 }
e59d24e6 1957 } while (is_kvmppc_resume_guest(r));
32fad281
PM
1958
1959 out:
c7b67670 1960 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
32fad281 1961 atomic_dec(&vcpu->kvm->arch.vcpus_running);
a8606e20
PM
1962 return r;
1963}
1964
54738c09 1965
aa04b4cc 1966/* Work out RMLS (real mode limit selector) field value for a given RMA size.
9e368f29 1967 Assumes POWER7 or PPC970. */
aa04b4cc
PM
1968static inline int lpcr_rmls(unsigned long rma_size)
1969{
1970 switch (rma_size) {
1971 case 32ul << 20: /* 32 MB */
9e368f29
PM
1972 if (cpu_has_feature(CPU_FTR_ARCH_206))
1973 return 8; /* only supported on POWER7 */
1974 return -1;
aa04b4cc
PM
1975 case 64ul << 20: /* 64 MB */
1976 return 3;
1977 case 128ul << 20: /* 128 MB */
1978 return 7;
1979 case 256ul << 20: /* 256 MB */
1980 return 4;
1981 case 1ul << 30: /* 1 GB */
1982 return 2;
1983 case 16ul << 30: /* 16 GB */
1984 return 1;
1985 case 256ul << 30: /* 256 GB */
1986 return 0;
1987 default:
1988 return -1;
1989 }
1990}
1991
1992static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1993{
aa04b4cc 1994 struct page *page;
6c45b810 1995 struct kvm_rma_info *ri = vma->vm_file->private_data;
aa04b4cc 1996
6c45b810 1997 if (vmf->pgoff >= kvm_rma_pages)
aa04b4cc
PM
1998 return VM_FAULT_SIGBUS;
1999
2000 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
2001 get_page(page);
2002 vmf->page = page;
2003 return 0;
2004}
2005
2006static const struct vm_operations_struct kvm_rma_vm_ops = {
2007 .fault = kvm_rma_fault,
2008};
2009
2010static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
2011{
314e51b9 2012 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
aa04b4cc
PM
2013 vma->vm_ops = &kvm_rma_vm_ops;
2014 return 0;
2015}
2016
2017static int kvm_rma_release(struct inode *inode, struct file *filp)
2018{
6c45b810 2019 struct kvm_rma_info *ri = filp->private_data;
aa04b4cc
PM
2020
2021 kvm_release_rma(ri);
2022 return 0;
2023}
2024
75ef9de1 2025static const struct file_operations kvm_rma_fops = {
aa04b4cc
PM
2026 .mmap = kvm_rma_mmap,
2027 .release = kvm_rma_release,
2028};
2029
3a167bea
AK
2030static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
2031 struct kvm_allocate_rma *ret)
aa04b4cc 2032{
aa04b4cc 2033 long fd;
6c45b810
AK
2034 struct kvm_rma_info *ri;
2035 /*
2036 * Only do this on PPC970 in HV mode
2037 */
2038 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
2039 !cpu_has_feature(CPU_FTR_ARCH_201))
2040 return -EINVAL;
2041
2042 if (!kvm_rma_pages)
2043 return -EINVAL;
aa04b4cc
PM
2044
2045 ri = kvm_alloc_rma();
2046 if (!ri)
2047 return -ENOMEM;
2048
2f84d5ea 2049 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
aa04b4cc
PM
2050 if (fd < 0)
2051 kvm_release_rma(ri);
2052
6c45b810 2053 ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
aa04b4cc
PM
2054 return fd;
2055}
2056
5b74716e
BH
2057static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2058 int linux_psize)
2059{
2060 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
2061
2062 if (!def->shift)
2063 return;
2064 (*sps)->page_shift = def->shift;
2065 (*sps)->slb_enc = def->sllp;
2066 (*sps)->enc[0].page_shift = def->shift;
b1022fbd 2067 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1f365bb0
AK
2068 /*
2069 * Add 16MB MPSS support if host supports it
2070 */
2071 if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
2072 (*sps)->enc[1].page_shift = 24;
2073 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
2074 }
5b74716e
BH
2075 (*sps)++;
2076}
2077
3a167bea
AK
2078static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2079 struct kvm_ppc_smmu_info *info)
5b74716e
BH
2080{
2081 struct kvm_ppc_one_seg_page_size *sps;
2082
2083 info->flags = KVM_PPC_PAGE_SIZES_REAL;
2084 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
2085 info->flags |= KVM_PPC_1T_SEGMENTS;
2086 info->slb_size = mmu_slb_size;
2087
2088 /* We only support these sizes for now, and no muti-size segments */
2089 sps = &info->sps[0];
2090 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
2091 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
2092 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
2093
2094 return 0;
2095}
2096
82ed3616
PM
2097/*
2098 * Get (and clear) the dirty memory log for a memory slot.
2099 */
3a167bea
AK
2100static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2101 struct kvm_dirty_log *log)
82ed3616
PM
2102{
2103 struct kvm_memory_slot *memslot;
2104 int r;
2105 unsigned long n;
2106
2107 mutex_lock(&kvm->slots_lock);
2108
2109 r = -EINVAL;
bbacc0c1 2110 if (log->slot >= KVM_USER_MEM_SLOTS)
82ed3616
PM
2111 goto out;
2112
2113 memslot = id_to_memslot(kvm->memslots, log->slot);
2114 r = -ENOENT;
2115 if (!memslot->dirty_bitmap)
2116 goto out;
2117
2118 n = kvm_dirty_bitmap_bytes(memslot);
2119 memset(memslot->dirty_bitmap, 0, n);
2120
dfe49dbd 2121 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
82ed3616
PM
2122 if (r)
2123 goto out;
2124
2125 r = -EFAULT;
2126 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
2127 goto out;
2128
2129 r = 0;
2130out:
2131 mutex_unlock(&kvm->slots_lock);
2132 return r;
2133}
2134
a66b48c3 2135static void unpin_slot(struct kvm_memory_slot *memslot)
de56a948 2136{
a66b48c3
PM
2137 unsigned long *physp;
2138 unsigned long j, npages, pfn;
2139 struct page *page;
aa04b4cc 2140
a66b48c3
PM
2141 physp = memslot->arch.slot_phys;
2142 npages = memslot->npages;
2143 if (!physp)
2144 return;
2145 for (j = 0; j < npages; j++) {
2146 if (!(physp[j] & KVMPPC_GOT_PAGE))
2147 continue;
2148 pfn = physp[j] >> PAGE_SHIFT;
2149 page = pfn_to_page(pfn);
2150 SetPageDirty(page);
2151 put_page(page);
2152 }
2153}
2154
3a167bea
AK
2155static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2156 struct kvm_memory_slot *dont)
a66b48c3
PM
2157{
2158 if (!dont || free->arch.rmap != dont->arch.rmap) {
2159 vfree(free->arch.rmap);
2160 free->arch.rmap = NULL;
b2b2f165 2161 }
a66b48c3
PM
2162 if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
2163 unpin_slot(free);
2164 vfree(free->arch.slot_phys);
2165 free->arch.slot_phys = NULL;
2166 }
2167}
2168
3a167bea
AK
2169static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
2170 unsigned long npages)
a66b48c3
PM
2171{
2172 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
2173 if (!slot->arch.rmap)
2174 return -ENOMEM;
2175 slot->arch.slot_phys = NULL;
aa04b4cc 2176
c77162de
PM
2177 return 0;
2178}
aa04b4cc 2179
3a167bea
AK
2180static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
2181 struct kvm_memory_slot *memslot,
2182 struct kvm_userspace_memory_region *mem)
c77162de 2183{
a66b48c3 2184 unsigned long *phys;
c77162de 2185
a66b48c3
PM
2186 /* Allocate a slot_phys array if needed */
2187 phys = memslot->arch.slot_phys;
2188 if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
2189 phys = vzalloc(memslot->npages * sizeof(unsigned long));
2190 if (!phys)
2191 return -ENOMEM;
2192 memslot->arch.slot_phys = phys;
aa04b4cc 2193 }
a66b48c3
PM
2194
2195 return 0;
c77162de
PM
2196}
2197
3a167bea
AK
2198static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2199 struct kvm_userspace_memory_region *mem,
2200 const struct kvm_memory_slot *old)
c77162de 2201{
dfe49dbd
PM
2202 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
2203 struct kvm_memory_slot *memslot;
2204
8482644a 2205 if (npages && old->npages) {
dfe49dbd
PM
2206 /*
2207 * If modifying a memslot, reset all the rmap dirty bits.
2208 * If this is a new memslot, we don't need to do anything
2209 * since the rmap array starts out as all zeroes,
2210 * i.e. no pages are dirty.
2211 */
2212 memslot = id_to_memslot(kvm->memslots, mem->slot);
2213 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
2214 }
c77162de
PM
2215}
2216
a0144e2a
PM
2217/*
2218 * Update LPCR values in kvm->arch and in vcores.
2219 * Caller must hold kvm->lock.
2220 */
2221void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
2222{
2223 long int i;
2224 u32 cores_done = 0;
2225
2226 if ((kvm->arch.lpcr & mask) == lpcr)
2227 return;
2228
2229 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
2230
2231 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2232 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2233 if (!vc)
2234 continue;
2235 spin_lock(&vc->lock);
2236 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
2237 spin_unlock(&vc->lock);
2238 if (++cores_done >= kvm->arch.online_vcores)
2239 break;
2240 }
2241}
2242
3a167bea
AK
2243static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
2244{
2245 return;
2246}
2247
32fad281 2248static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
c77162de
PM
2249{
2250 int err = 0;
2251 struct kvm *kvm = vcpu->kvm;
6c45b810 2252 struct kvm_rma_info *ri = NULL;
c77162de
PM
2253 unsigned long hva;
2254 struct kvm_memory_slot *memslot;
2255 struct vm_area_struct *vma;
a0144e2a
PM
2256 unsigned long lpcr = 0, senc;
2257 unsigned long lpcr_mask = 0;
c77162de
PM
2258 unsigned long psize, porder;
2259 unsigned long rma_size;
2260 unsigned long rmls;
2261 unsigned long *physp;
da9d1d7f 2262 unsigned long i, npages;
2c9097e4 2263 int srcu_idx;
c77162de
PM
2264
2265 mutex_lock(&kvm->lock);
2266 if (kvm->arch.rma_setup_done)
2267 goto out; /* another vcpu beat us to it */
aa04b4cc 2268
32fad281
PM
2269 /* Allocate hashed page table (if not done already) and reset it */
2270 if (!kvm->arch.hpt_virt) {
2271 err = kvmppc_alloc_hpt(kvm, NULL);
2272 if (err) {
2273 pr_err("KVM: Couldn't alloc HPT\n");
2274 goto out;
2275 }
2276 }
2277
c77162de 2278 /* Look up the memslot for guest physical address 0 */
2c9097e4 2279 srcu_idx = srcu_read_lock(&kvm->srcu);
c77162de 2280 memslot = gfn_to_memslot(kvm, 0);
aa04b4cc 2281
c77162de
PM
2282 /* We must have some memory at 0 by now */
2283 err = -EINVAL;
2284 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
2c9097e4 2285 goto out_srcu;
c77162de
PM
2286
2287 /* Look up the VMA for the start of this memory slot */
2288 hva = memslot->userspace_addr;
2289 down_read(&current->mm->mmap_sem);
2290 vma = find_vma(current->mm, hva);
2291 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
2292 goto up_out;
2293
2294 psize = vma_kernel_pagesize(vma);
da9d1d7f 2295 porder = __ilog2(psize);
c77162de
PM
2296
2297 /* Is this one of our preallocated RMAs? */
2298 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
2299 hva == vma->vm_start)
2300 ri = vma->vm_file->private_data;
2301
2302 up_read(&current->mm->mmap_sem);
2303
2304 if (!ri) {
2305 /* On POWER7, use VRMA; on PPC970, give up */
2306 err = -EPERM;
2307 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
2308 pr_err("KVM: CPU requires an RMO\n");
2c9097e4 2309 goto out_srcu;
c77162de
PM
2310 }
2311
da9d1d7f
PM
2312 /* We can handle 4k, 64k or 16M pages in the VRMA */
2313 err = -EINVAL;
2314 if (!(psize == 0x1000 || psize == 0x10000 ||
2315 psize == 0x1000000))
2c9097e4 2316 goto out_srcu;
da9d1d7f 2317
c77162de 2318 /* Update VRMASD field in the LPCR */
da9d1d7f 2319 senc = slb_pgsize_encoding(psize);
697d3899
PM
2320 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
2321 (VRMA_VSID << SLB_VSID_SHIFT_1T);
a0144e2a
PM
2322 lpcr_mask = LPCR_VRMASD;
2323 /* the -4 is to account for senc values starting at 0x10 */
2324 lpcr = senc << (LPCR_VRMASD_SH - 4);
c77162de
PM
2325
2326 /* Create HPTEs in the hash page table for the VRMA */
da9d1d7f 2327 kvmppc_map_vrma(vcpu, memslot, porder);
c77162de
PM
2328
2329 } else {
2330 /* Set up to use an RMO region */
6c45b810 2331 rma_size = kvm_rma_pages;
c77162de
PM
2332 if (rma_size > memslot->npages)
2333 rma_size = memslot->npages;
2334 rma_size <<= PAGE_SHIFT;
aa04b4cc 2335 rmls = lpcr_rmls(rma_size);
c77162de 2336 err = -EINVAL;
5d226ae5 2337 if ((long)rmls < 0) {
c77162de 2338 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
2c9097e4 2339 goto out_srcu;
aa04b4cc
PM
2340 }
2341 atomic_inc(&ri->use_count);
2342 kvm->arch.rma = ri;
9e368f29
PM
2343
2344 /* Update LPCR and RMOR */
9e368f29
PM
2345 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
2346 /* PPC970; insert RMLS value (split field) in HID4 */
a0144e2a
PM
2347 lpcr_mask = (1ul << HID4_RMLS0_SH) |
2348 (3ul << HID4_RMLS2_SH) | HID4_RMOR;
2349 lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
9e368f29
PM
2350 ((rmls & 3) << HID4_RMLS2_SH);
2351 /* RMOR is also in HID4 */
2352 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
2353 << HID4_RMOR_SH;
2354 } else {
2355 /* POWER7 */
a0144e2a
PM
2356 lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
2357 lpcr = rmls << LPCR_RMLS_SH;
6c45b810 2358 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
9e368f29 2359 }
c77162de 2360 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
aa04b4cc 2361 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
aa04b4cc 2362
c77162de 2363 /* Initialize phys addrs of pages in RMO */
6c45b810 2364 npages = kvm_rma_pages;
da9d1d7f 2365 porder = __ilog2(npages);
a66b48c3
PM
2366 physp = memslot->arch.slot_phys;
2367 if (physp) {
2368 if (npages > memslot->npages)
2369 npages = memslot->npages;
2370 spin_lock(&kvm->arch.slot_phys_lock);
2371 for (i = 0; i < npages; ++i)
2372 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
2373 porder;
2374 spin_unlock(&kvm->arch.slot_phys_lock);
2375 }
aa04b4cc
PM
2376 }
2377
a0144e2a
PM
2378 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
2379
c77162de
PM
2380 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
2381 smp_wmb();
2382 kvm->arch.rma_setup_done = 1;
2383 err = 0;
2c9097e4
PM
2384 out_srcu:
2385 srcu_read_unlock(&kvm->srcu, srcu_idx);
c77162de
PM
2386 out:
2387 mutex_unlock(&kvm->lock);
2388 return err;
b2b2f165 2389
c77162de
PM
2390 up_out:
2391 up_read(&current->mm->mmap_sem);
505d6421 2392 goto out_srcu;
de56a948
PM
2393}
2394
3a167bea 2395static int kvmppc_core_init_vm_hv(struct kvm *kvm)
de56a948 2396{
32fad281 2397 unsigned long lpcr, lpid;
de56a948 2398
32fad281
PM
2399 /* Allocate the guest's logical partition ID */
2400
2401 lpid = kvmppc_alloc_lpid();
5d226ae5 2402 if ((long)lpid < 0)
32fad281
PM
2403 return -ENOMEM;
2404 kvm->arch.lpid = lpid;
de56a948 2405
1b400ba0
PM
2406 /*
2407 * Since we don't flush the TLB when tearing down a VM,
2408 * and this lpid might have previously been used,
2409 * make sure we flush on each core before running the new VM.
2410 */
2411 cpumask_setall(&kvm->arch.need_tlb_flush);
2412
699a0ea0
PM
2413 /* Start out with the default set of hcalls enabled */
2414 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2415 sizeof(kvm->arch.enabled_hcalls));
2416
aa04b4cc 2417 kvm->arch.rma = NULL;
aa04b4cc 2418
9e368f29 2419 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
aa04b4cc 2420
9e368f29
PM
2421 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
2422 /* PPC970; HID4 is effectively the LPCR */
9e368f29
PM
2423 kvm->arch.host_lpid = 0;
2424 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
2425 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
2426 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
2427 ((lpid & 0xf) << HID4_LPID5_SH);
2428 } else {
2429 /* POWER7; init LPCR for virtual RMA mode */
2430 kvm->arch.host_lpid = mfspr(SPRN_LPID);
2431 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
2432 lpcr &= LPCR_PECE | LPCR_LPES;
2433 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
697d3899
PM
2434 LPCR_VPM0 | LPCR_VPM1;
2435 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
2436 (VRMA_VSID << SLB_VSID_SHIFT_1T);
e0622bd9
PM
2437 /* On POWER8 turn on online bit to enable PURR/SPURR */
2438 if (cpu_has_feature(CPU_FTR_ARCH_207S))
2439 lpcr |= LPCR_ONL;
9e368f29
PM
2440 }
2441 kvm->arch.lpcr = lpcr;
aa04b4cc 2442
342d3db7 2443 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
c77162de 2444 spin_lock_init(&kvm->arch.slot_phys_lock);
512691d4
PM
2445
2446 /*
441c19c8
ME
2447 * Track that we now have a HV mode VM active. This blocks secondary
2448 * CPU threads from coming online.
512691d4 2449 */
441c19c8 2450 kvm_hv_vm_activated();
512691d4 2451
54738c09 2452 return 0;
de56a948
PM
2453}
2454
f1378b1c
PM
2455static void kvmppc_free_vcores(struct kvm *kvm)
2456{
2457 long int i;
2458
9678cdaa
SS
2459 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2460 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
2461 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2462 free_pages((unsigned long)vc->mpp_buffer,
2463 MPP_BUFFER_ORDER);
2464 }
f1378b1c 2465 kfree(kvm->arch.vcores[i]);
9678cdaa 2466 }
f1378b1c
PM
2467 kvm->arch.online_vcores = 0;
2468}
2469
3a167bea 2470static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
de56a948 2471{
441c19c8 2472 kvm_hv_vm_deactivated();
512691d4 2473
f1378b1c 2474 kvmppc_free_vcores(kvm);
aa04b4cc
PM
2475 if (kvm->arch.rma) {
2476 kvm_release_rma(kvm->arch.rma);
2477 kvm->arch.rma = NULL;
2478 }
2479
de56a948
PM
2480 kvmppc_free_hpt(kvm);
2481}
2482
3a167bea
AK
2483/* We don't need to emulate any privileged instructions or dcbz */
2484static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
2485 unsigned int inst, int *advance)
de56a948 2486{
3a167bea 2487 return EMULATE_FAIL;
de56a948
PM
2488}
2489
3a167bea
AK
2490static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
2491 ulong spr_val)
de56a948
PM
2492{
2493 return EMULATE_FAIL;
2494}
2495
3a167bea
AK
2496static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
2497 ulong *spr_val)
de56a948
PM
2498{
2499 return EMULATE_FAIL;
2500}
2501
3a167bea 2502static int kvmppc_core_check_processor_compat_hv(void)
de56a948 2503{
3a167bea
AK
2504 if (!cpu_has_feature(CPU_FTR_HVMODE))
2505 return -EIO;
2506 return 0;
de56a948
PM
2507}
2508
3a167bea
AK
2509static long kvm_arch_vm_ioctl_hv(struct file *filp,
2510 unsigned int ioctl, unsigned long arg)
2511{
2512 struct kvm *kvm __maybe_unused = filp->private_data;
2513 void __user *argp = (void __user *)arg;
2514 long r;
2515
2516 switch (ioctl) {
2517
2518 case KVM_ALLOCATE_RMA: {
2519 struct kvm_allocate_rma rma;
2520 struct kvm *kvm = filp->private_data;
2521
2522 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
2523 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
2524 r = -EFAULT;
2525 break;
2526 }
2527
2528 case KVM_PPC_ALLOCATE_HTAB: {
2529 u32 htab_order;
2530
2531 r = -EFAULT;
2532 if (get_user(htab_order, (u32 __user *)argp))
2533 break;
2534 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
2535 if (r)
2536 break;
2537 r = -EFAULT;
2538 if (put_user(htab_order, (u32 __user *)argp))
2539 break;
2540 r = 0;
2541 break;
2542 }
2543
2544 case KVM_PPC_GET_HTAB_FD: {
2545 struct kvm_get_htab_fd ghf;
2546
2547 r = -EFAULT;
2548 if (copy_from_user(&ghf, argp, sizeof(ghf)))
2549 break;
2550 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
2551 break;
2552 }
2553
2554 default:
2555 r = -ENOTTY;
2556 }
2557
2558 return r;
2559}
2560
699a0ea0
PM
2561/*
2562 * List of hcall numbers to enable by default.
2563 * For compatibility with old userspace, we enable by default
2564 * all hcalls that were implemented before the hcall-enabling
2565 * facility was added. Note this list should not include H_RTAS.
2566 */
2567static unsigned int default_hcall_list[] = {
2568 H_REMOVE,
2569 H_ENTER,
2570 H_READ,
2571 H_PROTECT,
2572 H_BULK_REMOVE,
2573 H_GET_TCE,
2574 H_PUT_TCE,
2575 H_SET_DABR,
2576 H_SET_XDABR,
2577 H_CEDE,
2578 H_PROD,
2579 H_CONFER,
2580 H_REGISTER_VPA,
2581#ifdef CONFIG_KVM_XICS
2582 H_EOI,
2583 H_CPPR,
2584 H_IPI,
2585 H_IPOLL,
2586 H_XIRR,
2587 H_XIRR_X,
2588#endif
2589 0
2590};
2591
2592static void init_default_hcalls(void)
2593{
2594 int i;
ae2113a4 2595 unsigned int hcall;
699a0ea0 2596
ae2113a4
PM
2597 for (i = 0; default_hcall_list[i]; ++i) {
2598 hcall = default_hcall_list[i];
2599 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
2600 __set_bit(hcall / 4, default_enabled_hcalls);
2601 }
699a0ea0
PM
2602}
2603
cbbc58d4 2604static struct kvmppc_ops kvm_ops_hv = {
3a167bea
AK
2605 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2606 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
2607 .get_one_reg = kvmppc_get_one_reg_hv,
2608 .set_one_reg = kvmppc_set_one_reg_hv,
2609 .vcpu_load = kvmppc_core_vcpu_load_hv,
2610 .vcpu_put = kvmppc_core_vcpu_put_hv,
2611 .set_msr = kvmppc_set_msr_hv,
2612 .vcpu_run = kvmppc_vcpu_run_hv,
2613 .vcpu_create = kvmppc_core_vcpu_create_hv,
2614 .vcpu_free = kvmppc_core_vcpu_free_hv,
2615 .check_requests = kvmppc_core_check_requests_hv,
2616 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
2617 .flush_memslot = kvmppc_core_flush_memslot_hv,
2618 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
2619 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
2620 .unmap_hva = kvm_unmap_hva_hv,
2621 .unmap_hva_range = kvm_unmap_hva_range_hv,
2622 .age_hva = kvm_age_hva_hv,
2623 .test_age_hva = kvm_test_age_hva_hv,
2624 .set_spte_hva = kvm_set_spte_hva_hv,
2625 .mmu_destroy = kvmppc_mmu_destroy_hv,
2626 .free_memslot = kvmppc_core_free_memslot_hv,
2627 .create_memslot = kvmppc_core_create_memslot_hv,
2628 .init_vm = kvmppc_core_init_vm_hv,
2629 .destroy_vm = kvmppc_core_destroy_vm_hv,
3a167bea
AK
2630 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
2631 .emulate_op = kvmppc_core_emulate_op_hv,
2632 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
2633 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2634 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2635 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
ae2113a4 2636 .hcall_implemented = kvmppc_hcall_impl_hv,
3a167bea
AK
2637};
2638
2639static int kvmppc_book3s_init_hv(void)
de56a948
PM
2640{
2641 int r;
cbbc58d4
AK
2642 /*
2643 * FIXME!! Do we need to check on all cpus ?
2644 */
2645 r = kvmppc_core_check_processor_compat_hv();
2646 if (r < 0)
739e2425 2647 return -ENODEV;
de56a948 2648
cbbc58d4
AK
2649 kvm_ops_hv.owner = THIS_MODULE;
2650 kvmppc_hv_ops = &kvm_ops_hv;
de56a948 2651
699a0ea0
PM
2652 init_default_hcalls();
2653
cbbc58d4 2654 r = kvmppc_mmu_hv_init();
de56a948
PM
2655 return r;
2656}
2657
3a167bea 2658static void kvmppc_book3s_exit_hv(void)
de56a948 2659{
cbbc58d4 2660 kvmppc_hv_ops = NULL;
de56a948
PM
2661}
2662
3a167bea
AK
2663module_init(kvmppc_book3s_init_hv);
2664module_exit(kvmppc_book3s_exit_hv);
2ba9f0d8 2665MODULE_LICENSE("GPL");
398a76c6
AG
2666MODULE_ALIAS_MISCDEV(KVM_MINOR);
2667MODULE_ALIAS("devname:kvm");