2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
33 #include <linux/srcu.h>
34 #include <linux/miscdevice.h>
35 #include <linux/debugfs.h>
38 #include <asm/cputable.h>
39 #include <asm/cache.h>
40 #include <asm/cacheflush.h>
41 #include <asm/tlbflush.h>
42 #include <asm/uaccess.h>
44 #include <asm/kvm_ppc.h>
45 #include <asm/kvm_book3s.h>
46 #include <asm/mmu_context.h>
47 #include <asm/lppaca.h>
48 #include <asm/processor.h>
49 #include <asm/cputhreads.h>
51 #include <asm/hvcall.h>
52 #include <asm/switch_to.h>
54 #include <asm/dbell.h>
55 #include <linux/gfp.h>
56 #include <linux/vmalloc.h>
57 #include <linux/highmem.h>
58 #include <linux/hugetlb.h>
59 #include <linux/module.h>
63 #define CREATE_TRACE_POINTS
66 /* #define EXIT_DEBUG */
67 /* #define EXIT_DEBUG_SIMPLE */
68 /* #define EXIT_DEBUG_INT */
70 /* Used to indicate that a guest page fault needs to be handled */
71 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
73 /* Used as a "null" value for timebase values */
74 #define TB_NIL (~(u64)0)
76 static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
78 #if defined(CONFIG_PPC_64K_PAGES)
79 #define MPP_BUFFER_ORDER 0
80 #elif defined(CONFIG_PPC_4K_PAGES)
81 #define MPP_BUFFER_ORDER 3
85 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
86 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
88 static bool kvmppc_ipi_thread(int cpu)
90 /* On POWER8 for IPIs to threads in the same core, use msgsnd */
91 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
93 if (cpu_first_thread_sibling(cpu) ==
94 cpu_first_thread_sibling(smp_processor_id())) {
95 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
96 msg |= cpu_thread_in_core(cpu);
98 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
105 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
106 if (cpu >= 0 && cpu < nr_cpu_ids && paca[cpu].kvm_hstate.xics_phys) {
115 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
118 wait_queue_head_t *wqp;
120 wqp = kvm_arch_vcpu_wq(vcpu);
121 if (waitqueue_active(wqp)) {
122 wake_up_interruptible(wqp);
123 ++vcpu->stat.halt_wakeup;
126 if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
129 /* CPU points to the first thread of the core */
130 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
131 smp_send_reschedule(cpu);
135 * We use the vcpu_load/put functions to measure stolen time.
136 * Stolen time is counted as time when either the vcpu is able to
137 * run as part of a virtual core, but the task running the vcore
138 * is preempted or sleeping, or when the vcpu needs something done
139 * in the kernel by the task running the vcpu, but that task is
140 * preempted or sleeping. Those two things have to be counted
141 * separately, since one of the vcpu tasks will take on the job
142 * of running the core, and the other vcpu tasks in the vcore will
143 * sleep waiting for it to do that, but that sleep shouldn't count
146 * Hence we accumulate stolen time when the vcpu can run as part of
147 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
148 * needs its task to do other things in the kernel (for example,
149 * service a page fault) in busy_stolen. We don't accumulate
150 * stolen time for a vcore when it is inactive, or for a vcpu
151 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
152 * a misnomer; it means that the vcpu task is not executing in
153 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
154 * the kernel. We don't have any way of dividing up that time
155 * between time that the vcpu is genuinely stopped, time that
156 * the task is actively working on behalf of the vcpu, and time
157 * that the task is preempted, so we don't count any of it as
160 * Updates to busy_stolen are protected by arch.tbacct_lock;
161 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
162 * lock. The stolen times are measured in units of timebase ticks.
163 * (Note that the != TB_NIL checks below are purely defensive;
164 * they should never fail.)
167 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
169 struct kvmppc_vcore *vc = vcpu->arch.vcore;
173 * We can test vc->runner without taking the vcore lock,
174 * because only this task ever sets vc->runner to this
175 * vcpu, and once it is set to this vcpu, only this task
176 * ever sets it to NULL.
178 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
179 spin_lock_irqsave(&vc->stoltb_lock, flags);
180 if (vc->preempt_tb != TB_NIL) {
181 vc->stolen_tb += mftb() - vc->preempt_tb;
182 vc->preempt_tb = TB_NIL;
184 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
186 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
187 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
188 vcpu->arch.busy_preempt != TB_NIL) {
189 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
190 vcpu->arch.busy_preempt = TB_NIL;
192 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
195 static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
197 struct kvmppc_vcore *vc = vcpu->arch.vcore;
200 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
201 spin_lock_irqsave(&vc->stoltb_lock, flags);
202 vc->preempt_tb = mftb();
203 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
205 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
206 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
207 vcpu->arch.busy_preempt = mftb();
208 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
211 static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
213 vcpu->arch.shregs.msr = msr;
214 kvmppc_end_cede(vcpu);
217 void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
219 vcpu->arch.pvr = pvr;
222 int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
224 unsigned long pcr = 0;
225 struct kvmppc_vcore *vc = vcpu->arch.vcore;
228 switch (arch_compat) {
231 * If an arch bit is set in PCR, all the defined
232 * higher-order arch bits also have to be set.
234 pcr = PCR_ARCH_206 | PCR_ARCH_205;
246 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
247 /* POWER7 can't emulate POWER8 */
248 if (!(pcr & PCR_ARCH_206))
250 pcr &= ~PCR_ARCH_206;
254 spin_lock(&vc->lock);
255 vc->arch_compat = arch_compat;
257 spin_unlock(&vc->lock);
262 void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
266 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
267 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
268 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
269 for (r = 0; r < 16; ++r)
270 pr_err("r%2d = %.16lx r%d = %.16lx\n",
271 r, kvmppc_get_gpr(vcpu, r),
272 r+16, kvmppc_get_gpr(vcpu, r+16));
273 pr_err("ctr = %.16lx lr = %.16lx\n",
274 vcpu->arch.ctr, vcpu->arch.lr);
275 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
276 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
277 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
278 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
279 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
280 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
281 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
282 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
283 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
284 pr_err("fault dar = %.16lx dsisr = %.8x\n",
285 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
286 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
287 for (r = 0; r < vcpu->arch.slb_max; ++r)
288 pr_err(" ESID = %.16llx VSID = %.16llx\n",
289 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
290 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
291 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
292 vcpu->arch.last_inst);
295 struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
298 struct kvm_vcpu *v, *ret = NULL;
300 mutex_lock(&kvm->lock);
301 kvm_for_each_vcpu(r, v, kvm) {
302 if (v->vcpu_id == id) {
307 mutex_unlock(&kvm->lock);
311 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
313 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
314 vpa->yield_count = cpu_to_be32(1);
317 static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
318 unsigned long addr, unsigned long len)
320 /* check address is cacheline aligned */
321 if (addr & (L1_CACHE_BYTES - 1))
323 spin_lock(&vcpu->arch.vpa_update_lock);
324 if (v->next_gpa != addr || v->len != len) {
326 v->len = addr ? len : 0;
327 v->update_pending = 1;
329 spin_unlock(&vcpu->arch.vpa_update_lock);
333 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
342 static int vpa_is_registered(struct kvmppc_vpa *vpap)
344 if (vpap->update_pending)
345 return vpap->next_gpa != 0;
346 return vpap->pinned_addr != NULL;
349 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
351 unsigned long vcpuid, unsigned long vpa)
353 struct kvm *kvm = vcpu->kvm;
354 unsigned long len, nb;
356 struct kvm_vcpu *tvcpu;
359 struct kvmppc_vpa *vpap;
361 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
365 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
366 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
367 subfunc == H_VPA_REG_SLB) {
368 /* Registering new area - address must be cache-line aligned */
369 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
372 /* convert logical addr to kernel addr and read length */
373 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
376 if (subfunc == H_VPA_REG_VPA)
377 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
379 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
380 kvmppc_unpin_guest_page(kvm, va, vpa, false);
383 if (len > nb || len < sizeof(struct reg_vpa))
392 spin_lock(&tvcpu->arch.vpa_update_lock);
395 case H_VPA_REG_VPA: /* register VPA */
396 if (len < sizeof(struct lppaca))
398 vpap = &tvcpu->arch.vpa;
402 case H_VPA_REG_DTL: /* register DTL */
403 if (len < sizeof(struct dtl_entry))
405 len -= len % sizeof(struct dtl_entry);
407 /* Check that they have previously registered a VPA */
409 if (!vpa_is_registered(&tvcpu->arch.vpa))
412 vpap = &tvcpu->arch.dtl;
416 case H_VPA_REG_SLB: /* register SLB shadow buffer */
417 /* Check that they have previously registered a VPA */
419 if (!vpa_is_registered(&tvcpu->arch.vpa))
422 vpap = &tvcpu->arch.slb_shadow;
426 case H_VPA_DEREG_VPA: /* deregister VPA */
427 /* Check they don't still have a DTL or SLB buf registered */
429 if (vpa_is_registered(&tvcpu->arch.dtl) ||
430 vpa_is_registered(&tvcpu->arch.slb_shadow))
433 vpap = &tvcpu->arch.vpa;
437 case H_VPA_DEREG_DTL: /* deregister DTL */
438 vpap = &tvcpu->arch.dtl;
442 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
443 vpap = &tvcpu->arch.slb_shadow;
449 vpap->next_gpa = vpa;
451 vpap->update_pending = 1;
454 spin_unlock(&tvcpu->arch.vpa_update_lock);
459 static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
461 struct kvm *kvm = vcpu->kvm;
467 * We need to pin the page pointed to by vpap->next_gpa,
468 * but we can't call kvmppc_pin_guest_page under the lock
469 * as it does get_user_pages() and down_read(). So we
470 * have to drop the lock, pin the page, then get the lock
471 * again and check that a new area didn't get registered
475 gpa = vpap->next_gpa;
476 spin_unlock(&vcpu->arch.vpa_update_lock);
480 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
481 spin_lock(&vcpu->arch.vpa_update_lock);
482 if (gpa == vpap->next_gpa)
484 /* sigh... unpin that one and try again */
486 kvmppc_unpin_guest_page(kvm, va, gpa, false);
489 vpap->update_pending = 0;
490 if (va && nb < vpap->len) {
492 * If it's now too short, it must be that userspace
493 * has changed the mappings underlying guest memory,
494 * so unregister the region.
496 kvmppc_unpin_guest_page(kvm, va, gpa, false);
499 if (vpap->pinned_addr)
500 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
503 vpap->pinned_addr = va;
506 vpap->pinned_end = va + vpap->len;
509 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
511 if (!(vcpu->arch.vpa.update_pending ||
512 vcpu->arch.slb_shadow.update_pending ||
513 vcpu->arch.dtl.update_pending))
516 spin_lock(&vcpu->arch.vpa_update_lock);
517 if (vcpu->arch.vpa.update_pending) {
518 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
519 if (vcpu->arch.vpa.pinned_addr)
520 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
522 if (vcpu->arch.dtl.update_pending) {
523 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
524 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
525 vcpu->arch.dtl_index = 0;
527 if (vcpu->arch.slb_shadow.update_pending)
528 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
529 spin_unlock(&vcpu->arch.vpa_update_lock);
533 * Return the accumulated stolen time for the vcore up until `now'.
534 * The caller should hold the vcore lock.
536 static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
541 spin_lock_irqsave(&vc->stoltb_lock, flags);
543 if (vc->vcore_state != VCORE_INACTIVE &&
544 vc->preempt_tb != TB_NIL)
545 p += now - vc->preempt_tb;
546 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
550 static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
551 struct kvmppc_vcore *vc)
553 struct dtl_entry *dt;
555 unsigned long stolen;
556 unsigned long core_stolen;
559 dt = vcpu->arch.dtl_ptr;
560 vpa = vcpu->arch.vpa.pinned_addr;
562 core_stolen = vcore_stolen_time(vc, now);
563 stolen = core_stolen - vcpu->arch.stolen_logged;
564 vcpu->arch.stolen_logged = core_stolen;
565 spin_lock_irq(&vcpu->arch.tbacct_lock);
566 stolen += vcpu->arch.busy_stolen;
567 vcpu->arch.busy_stolen = 0;
568 spin_unlock_irq(&vcpu->arch.tbacct_lock);
571 memset(dt, 0, sizeof(struct dtl_entry));
572 dt->dispatch_reason = 7;
573 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
574 dt->timebase = cpu_to_be64(now + vc->tb_offset);
575 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
576 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
577 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
579 if (dt == vcpu->arch.dtl.pinned_end)
580 dt = vcpu->arch.dtl.pinned_addr;
581 vcpu->arch.dtl_ptr = dt;
582 /* order writing *dt vs. writing vpa->dtl_idx */
584 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
585 vcpu->arch.dtl.dirty = true;
588 static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
590 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
592 if ((!vcpu->arch.vcore->arch_compat) &&
593 cpu_has_feature(CPU_FTR_ARCH_207S))
598 static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
599 unsigned long resource, unsigned long value1,
600 unsigned long value2)
603 case H_SET_MODE_RESOURCE_SET_CIABR:
604 if (!kvmppc_power8_compatible(vcpu))
609 return H_UNSUPPORTED_FLAG_START;
610 /* Guests can't breakpoint the hypervisor */
611 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
613 vcpu->arch.ciabr = value1;
615 case H_SET_MODE_RESOURCE_SET_DAWR:
616 if (!kvmppc_power8_compatible(vcpu))
619 return H_UNSUPPORTED_FLAG_START;
620 if (value2 & DABRX_HYP)
622 vcpu->arch.dawr = value1;
623 vcpu->arch.dawrx = value2;
630 static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
632 struct kvmppc_vcore *vcore = target->arch.vcore;
635 * We expect to have been called by the real mode handler
636 * (kvmppc_rm_h_confer()) which would have directly returned
637 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
638 * have useful work to do and should not confer) so we don't
642 spin_lock(&vcore->lock);
643 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
644 vcore->vcore_state != VCORE_INACTIVE)
645 target = vcore->runner;
646 spin_unlock(&vcore->lock);
648 return kvm_vcpu_yield_to(target);
651 static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
654 struct lppaca *lppaca;
656 spin_lock(&vcpu->arch.vpa_update_lock);
657 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
659 yield_count = be32_to_cpu(lppaca->yield_count);
660 spin_unlock(&vcpu->arch.vpa_update_lock);
664 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
666 unsigned long req = kvmppc_get_gpr(vcpu, 3);
667 unsigned long target, ret = H_SUCCESS;
669 struct kvm_vcpu *tvcpu;
672 if (req <= MAX_HCALL_OPCODE &&
673 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
680 target = kvmppc_get_gpr(vcpu, 4);
681 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
686 tvcpu->arch.prodded = 1;
688 if (vcpu->arch.ceded) {
689 if (waitqueue_active(&vcpu->wq)) {
690 wake_up_interruptible(&vcpu->wq);
691 vcpu->stat.halt_wakeup++;
696 target = kvmppc_get_gpr(vcpu, 4);
699 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
704 yield_count = kvmppc_get_gpr(vcpu, 5);
705 if (kvmppc_get_yield_count(tvcpu) != yield_count)
707 kvm_arch_vcpu_yield_to(tvcpu);
710 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
711 kvmppc_get_gpr(vcpu, 5),
712 kvmppc_get_gpr(vcpu, 6));
715 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
718 idx = srcu_read_lock(&vcpu->kvm->srcu);
719 rc = kvmppc_rtas_hcall(vcpu);
720 srcu_read_unlock(&vcpu->kvm->srcu, idx);
727 /* Send the error out to userspace via KVM_RUN */
729 case H_LOGICAL_CI_LOAD:
730 ret = kvmppc_h_logical_ci_load(vcpu);
731 if (ret == H_TOO_HARD)
734 case H_LOGICAL_CI_STORE:
735 ret = kvmppc_h_logical_ci_store(vcpu);
736 if (ret == H_TOO_HARD)
740 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
741 kvmppc_get_gpr(vcpu, 5),
742 kvmppc_get_gpr(vcpu, 6),
743 kvmppc_get_gpr(vcpu, 7));
744 if (ret == H_TOO_HARD)
753 if (kvmppc_xics_enabled(vcpu)) {
754 ret = kvmppc_xics_hcall(vcpu, req);
760 kvmppc_set_gpr(vcpu, 3, ret);
761 vcpu->arch.hcall_needed = 0;
765 static int kvmppc_hcall_impl_hv(unsigned long cmd)
773 case H_LOGICAL_CI_LOAD:
774 case H_LOGICAL_CI_STORE:
775 #ifdef CONFIG_KVM_XICS
786 /* See if it's in the real-mode table */
787 return kvmppc_hcall_impl_hv_realmode(cmd);
790 static int kvmppc_emulate_debug_inst(struct kvm_run *run,
791 struct kvm_vcpu *vcpu)
795 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
798 * Fetch failed, so return to guest and
799 * try executing it again.
804 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
805 run->exit_reason = KVM_EXIT_DEBUG;
806 run->debug.arch.address = kvmppc_get_pc(vcpu);
809 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
814 static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
815 struct task_struct *tsk)
819 vcpu->stat.sum_exits++;
821 run->exit_reason = KVM_EXIT_UNKNOWN;
822 run->ready_for_interrupt_injection = 1;
823 switch (vcpu->arch.trap) {
824 /* We're good on these - the host merely wanted to get our attention */
825 case BOOK3S_INTERRUPT_HV_DECREMENTER:
826 vcpu->stat.dec_exits++;
829 case BOOK3S_INTERRUPT_EXTERNAL:
830 case BOOK3S_INTERRUPT_H_DOORBELL:
831 vcpu->stat.ext_intr_exits++;
834 /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
835 case BOOK3S_INTERRUPT_HMI:
836 case BOOK3S_INTERRUPT_PERFMON:
839 case BOOK3S_INTERRUPT_MACHINE_CHECK:
841 * Deliver a machine check interrupt to the guest.
842 * We have to do this, even if the host has handled the
843 * machine check, because machine checks use SRR0/1 and
844 * the interrupt might have trashed guest state in them.
846 kvmppc_book3s_queue_irqprio(vcpu,
847 BOOK3S_INTERRUPT_MACHINE_CHECK);
850 case BOOK3S_INTERRUPT_PROGRAM:
854 * Normally program interrupts are delivered directly
855 * to the guest by the hardware, but we can get here
856 * as a result of a hypervisor emulation interrupt
857 * (e40) getting turned into a 700 by BML RTAS.
859 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
860 kvmppc_core_queue_program(vcpu, flags);
864 case BOOK3S_INTERRUPT_SYSCALL:
866 /* hcall - punt to userspace */
869 /* hypercall with MSR_PR has already been handled in rmode,
870 * and never reaches here.
873 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
874 for (i = 0; i < 9; ++i)
875 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
876 run->exit_reason = KVM_EXIT_PAPR_HCALL;
877 vcpu->arch.hcall_needed = 1;
882 * We get these next two if the guest accesses a page which it thinks
883 * it has mapped but which is not actually present, either because
884 * it is for an emulated I/O device or because the corresonding
885 * host page has been paged out. Any other HDSI/HISI interrupts
886 * have been handled already.
888 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
889 r = RESUME_PAGE_FAULT;
891 case BOOK3S_INTERRUPT_H_INST_STORAGE:
892 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
893 vcpu->arch.fault_dsisr = 0;
894 r = RESUME_PAGE_FAULT;
897 * This occurs if the guest executes an illegal instruction.
898 * If the guest debug is disabled, generate a program interrupt
899 * to the guest. If guest debug is enabled, we need to check
900 * whether the instruction is a software breakpoint instruction.
901 * Accordingly return to Guest or Host.
903 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
904 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
905 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
906 swab32(vcpu->arch.emul_inst) :
907 vcpu->arch.emul_inst;
908 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
909 r = kvmppc_emulate_debug_inst(run, vcpu);
911 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
916 * This occurs if the guest (kernel or userspace), does something that
917 * is prohibited by HFSCR. We just generate a program interrupt to
920 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
921 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
925 kvmppc_dump_regs(vcpu);
926 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
927 vcpu->arch.trap, kvmppc_get_pc(vcpu),
928 vcpu->arch.shregs.msr);
929 run->hw.hardware_exit_reason = vcpu->arch.trap;
937 static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
938 struct kvm_sregs *sregs)
942 memset(sregs, 0, sizeof(struct kvm_sregs));
943 sregs->pvr = vcpu->arch.pvr;
944 for (i = 0; i < vcpu->arch.slb_max; i++) {
945 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
946 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
952 static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
953 struct kvm_sregs *sregs)
957 /* Only accept the same PVR as the host's, since we can't spoof it */
958 if (sregs->pvr != vcpu->arch.pvr)
962 for (i = 0; i < vcpu->arch.slb_nr; i++) {
963 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
964 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
965 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
969 vcpu->arch.slb_max = j;
974 static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
977 struct kvm *kvm = vcpu->kvm;
978 struct kvmppc_vcore *vc = vcpu->arch.vcore;
981 mutex_lock(&kvm->lock);
982 spin_lock(&vc->lock);
984 * If ILE (interrupt little-endian) has changed, update the
985 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
987 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
988 struct kvm_vcpu *vcpu;
991 kvm_for_each_vcpu(i, vcpu, kvm) {
992 if (vcpu->arch.vcore != vc)
994 if (new_lpcr & LPCR_ILE)
995 vcpu->arch.intr_msr |= MSR_LE;
997 vcpu->arch.intr_msr &= ~MSR_LE;
1002 * Userspace can only modify DPFD (default prefetch depth),
1003 * ILE (interrupt little-endian) and TC (translation control).
1004 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
1006 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
1007 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1010 /* Broken 32-bit version of LPCR must not clear top bits */
1013 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1014 spin_unlock(&vc->lock);
1015 mutex_unlock(&kvm->lock);
1018 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1019 union kvmppc_one_reg *val)
1025 case KVM_REG_PPC_DEBUG_INST:
1026 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1028 case KVM_REG_PPC_HIOR:
1029 *val = get_reg_val(id, 0);
1031 case KVM_REG_PPC_DABR:
1032 *val = get_reg_val(id, vcpu->arch.dabr);
1034 case KVM_REG_PPC_DABRX:
1035 *val = get_reg_val(id, vcpu->arch.dabrx);
1037 case KVM_REG_PPC_DSCR:
1038 *val = get_reg_val(id, vcpu->arch.dscr);
1040 case KVM_REG_PPC_PURR:
1041 *val = get_reg_val(id, vcpu->arch.purr);
1043 case KVM_REG_PPC_SPURR:
1044 *val = get_reg_val(id, vcpu->arch.spurr);
1046 case KVM_REG_PPC_AMR:
1047 *val = get_reg_val(id, vcpu->arch.amr);
1049 case KVM_REG_PPC_UAMOR:
1050 *val = get_reg_val(id, vcpu->arch.uamor);
1052 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1053 i = id - KVM_REG_PPC_MMCR0;
1054 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1056 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1057 i = id - KVM_REG_PPC_PMC1;
1058 *val = get_reg_val(id, vcpu->arch.pmc[i]);
1060 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1061 i = id - KVM_REG_PPC_SPMC1;
1062 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1064 case KVM_REG_PPC_SIAR:
1065 *val = get_reg_val(id, vcpu->arch.siar);
1067 case KVM_REG_PPC_SDAR:
1068 *val = get_reg_val(id, vcpu->arch.sdar);
1070 case KVM_REG_PPC_SIER:
1071 *val = get_reg_val(id, vcpu->arch.sier);
1073 case KVM_REG_PPC_IAMR:
1074 *val = get_reg_val(id, vcpu->arch.iamr);
1076 case KVM_REG_PPC_PSPB:
1077 *val = get_reg_val(id, vcpu->arch.pspb);
1079 case KVM_REG_PPC_DPDES:
1080 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1082 case KVM_REG_PPC_DAWR:
1083 *val = get_reg_val(id, vcpu->arch.dawr);
1085 case KVM_REG_PPC_DAWRX:
1086 *val = get_reg_val(id, vcpu->arch.dawrx);
1088 case KVM_REG_PPC_CIABR:
1089 *val = get_reg_val(id, vcpu->arch.ciabr);
1091 case KVM_REG_PPC_CSIGR:
1092 *val = get_reg_val(id, vcpu->arch.csigr);
1094 case KVM_REG_PPC_TACR:
1095 *val = get_reg_val(id, vcpu->arch.tacr);
1097 case KVM_REG_PPC_TCSCR:
1098 *val = get_reg_val(id, vcpu->arch.tcscr);
1100 case KVM_REG_PPC_PID:
1101 *val = get_reg_val(id, vcpu->arch.pid);
1103 case KVM_REG_PPC_ACOP:
1104 *val = get_reg_val(id, vcpu->arch.acop);
1106 case KVM_REG_PPC_WORT:
1107 *val = get_reg_val(id, vcpu->arch.wort);
1109 case KVM_REG_PPC_VPA_ADDR:
1110 spin_lock(&vcpu->arch.vpa_update_lock);
1111 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1112 spin_unlock(&vcpu->arch.vpa_update_lock);
1114 case KVM_REG_PPC_VPA_SLB:
1115 spin_lock(&vcpu->arch.vpa_update_lock);
1116 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1117 val->vpaval.length = vcpu->arch.slb_shadow.len;
1118 spin_unlock(&vcpu->arch.vpa_update_lock);
1120 case KVM_REG_PPC_VPA_DTL:
1121 spin_lock(&vcpu->arch.vpa_update_lock);
1122 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1123 val->vpaval.length = vcpu->arch.dtl.len;
1124 spin_unlock(&vcpu->arch.vpa_update_lock);
1126 case KVM_REG_PPC_TB_OFFSET:
1127 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1129 case KVM_REG_PPC_LPCR:
1130 case KVM_REG_PPC_LPCR_64:
1131 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1133 case KVM_REG_PPC_PPR:
1134 *val = get_reg_val(id, vcpu->arch.ppr);
1136 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1137 case KVM_REG_PPC_TFHAR:
1138 *val = get_reg_val(id, vcpu->arch.tfhar);
1140 case KVM_REG_PPC_TFIAR:
1141 *val = get_reg_val(id, vcpu->arch.tfiar);
1143 case KVM_REG_PPC_TEXASR:
1144 *val = get_reg_val(id, vcpu->arch.texasr);
1146 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1147 i = id - KVM_REG_PPC_TM_GPR0;
1148 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1150 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1153 i = id - KVM_REG_PPC_TM_VSR0;
1155 for (j = 0; j < TS_FPRWIDTH; j++)
1156 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1158 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1159 val->vval = vcpu->arch.vr_tm.vr[i-32];
1165 case KVM_REG_PPC_TM_CR:
1166 *val = get_reg_val(id, vcpu->arch.cr_tm);
1168 case KVM_REG_PPC_TM_LR:
1169 *val = get_reg_val(id, vcpu->arch.lr_tm);
1171 case KVM_REG_PPC_TM_CTR:
1172 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1174 case KVM_REG_PPC_TM_FPSCR:
1175 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1177 case KVM_REG_PPC_TM_AMR:
1178 *val = get_reg_val(id, vcpu->arch.amr_tm);
1180 case KVM_REG_PPC_TM_PPR:
1181 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1183 case KVM_REG_PPC_TM_VRSAVE:
1184 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1186 case KVM_REG_PPC_TM_VSCR:
1187 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1188 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1192 case KVM_REG_PPC_TM_DSCR:
1193 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1195 case KVM_REG_PPC_TM_TAR:
1196 *val = get_reg_val(id, vcpu->arch.tar_tm);
1199 case KVM_REG_PPC_ARCH_COMPAT:
1200 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1210 static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1211 union kvmppc_one_reg *val)
1215 unsigned long addr, len;
1218 case KVM_REG_PPC_HIOR:
1219 /* Only allow this to be set to zero */
1220 if (set_reg_val(id, *val))
1223 case KVM_REG_PPC_DABR:
1224 vcpu->arch.dabr = set_reg_val(id, *val);
1226 case KVM_REG_PPC_DABRX:
1227 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1229 case KVM_REG_PPC_DSCR:
1230 vcpu->arch.dscr = set_reg_val(id, *val);
1232 case KVM_REG_PPC_PURR:
1233 vcpu->arch.purr = set_reg_val(id, *val);
1235 case KVM_REG_PPC_SPURR:
1236 vcpu->arch.spurr = set_reg_val(id, *val);
1238 case KVM_REG_PPC_AMR:
1239 vcpu->arch.amr = set_reg_val(id, *val);
1241 case KVM_REG_PPC_UAMOR:
1242 vcpu->arch.uamor = set_reg_val(id, *val);
1244 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
1245 i = id - KVM_REG_PPC_MMCR0;
1246 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1248 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1249 i = id - KVM_REG_PPC_PMC1;
1250 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1252 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1253 i = id - KVM_REG_PPC_SPMC1;
1254 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1256 case KVM_REG_PPC_SIAR:
1257 vcpu->arch.siar = set_reg_val(id, *val);
1259 case KVM_REG_PPC_SDAR:
1260 vcpu->arch.sdar = set_reg_val(id, *val);
1262 case KVM_REG_PPC_SIER:
1263 vcpu->arch.sier = set_reg_val(id, *val);
1265 case KVM_REG_PPC_IAMR:
1266 vcpu->arch.iamr = set_reg_val(id, *val);
1268 case KVM_REG_PPC_PSPB:
1269 vcpu->arch.pspb = set_reg_val(id, *val);
1271 case KVM_REG_PPC_DPDES:
1272 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1274 case KVM_REG_PPC_DAWR:
1275 vcpu->arch.dawr = set_reg_val(id, *val);
1277 case KVM_REG_PPC_DAWRX:
1278 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1280 case KVM_REG_PPC_CIABR:
1281 vcpu->arch.ciabr = set_reg_val(id, *val);
1282 /* Don't allow setting breakpoints in hypervisor code */
1283 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1284 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1286 case KVM_REG_PPC_CSIGR:
1287 vcpu->arch.csigr = set_reg_val(id, *val);
1289 case KVM_REG_PPC_TACR:
1290 vcpu->arch.tacr = set_reg_val(id, *val);
1292 case KVM_REG_PPC_TCSCR:
1293 vcpu->arch.tcscr = set_reg_val(id, *val);
1295 case KVM_REG_PPC_PID:
1296 vcpu->arch.pid = set_reg_val(id, *val);
1298 case KVM_REG_PPC_ACOP:
1299 vcpu->arch.acop = set_reg_val(id, *val);
1301 case KVM_REG_PPC_WORT:
1302 vcpu->arch.wort = set_reg_val(id, *val);
1304 case KVM_REG_PPC_VPA_ADDR:
1305 addr = set_reg_val(id, *val);
1307 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1308 vcpu->arch.dtl.next_gpa))
1310 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1312 case KVM_REG_PPC_VPA_SLB:
1313 addr = val->vpaval.addr;
1314 len = val->vpaval.length;
1316 if (addr && !vcpu->arch.vpa.next_gpa)
1318 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1320 case KVM_REG_PPC_VPA_DTL:
1321 addr = val->vpaval.addr;
1322 len = val->vpaval.length;
1324 if (addr && (len < sizeof(struct dtl_entry) ||
1325 !vcpu->arch.vpa.next_gpa))
1327 len -= len % sizeof(struct dtl_entry);
1328 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1330 case KVM_REG_PPC_TB_OFFSET:
1331 /* round up to multiple of 2^24 */
1332 vcpu->arch.vcore->tb_offset =
1333 ALIGN(set_reg_val(id, *val), 1UL << 24);
1335 case KVM_REG_PPC_LPCR:
1336 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1338 case KVM_REG_PPC_LPCR_64:
1339 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
1341 case KVM_REG_PPC_PPR:
1342 vcpu->arch.ppr = set_reg_val(id, *val);
1344 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1345 case KVM_REG_PPC_TFHAR:
1346 vcpu->arch.tfhar = set_reg_val(id, *val);
1348 case KVM_REG_PPC_TFIAR:
1349 vcpu->arch.tfiar = set_reg_val(id, *val);
1351 case KVM_REG_PPC_TEXASR:
1352 vcpu->arch.texasr = set_reg_val(id, *val);
1354 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1355 i = id - KVM_REG_PPC_TM_GPR0;
1356 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1358 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1361 i = id - KVM_REG_PPC_TM_VSR0;
1363 for (j = 0; j < TS_FPRWIDTH; j++)
1364 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1366 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1367 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1372 case KVM_REG_PPC_TM_CR:
1373 vcpu->arch.cr_tm = set_reg_val(id, *val);
1375 case KVM_REG_PPC_TM_LR:
1376 vcpu->arch.lr_tm = set_reg_val(id, *val);
1378 case KVM_REG_PPC_TM_CTR:
1379 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1381 case KVM_REG_PPC_TM_FPSCR:
1382 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1384 case KVM_REG_PPC_TM_AMR:
1385 vcpu->arch.amr_tm = set_reg_val(id, *val);
1387 case KVM_REG_PPC_TM_PPR:
1388 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1390 case KVM_REG_PPC_TM_VRSAVE:
1391 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1393 case KVM_REG_PPC_TM_VSCR:
1394 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1395 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1399 case KVM_REG_PPC_TM_DSCR:
1400 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1402 case KVM_REG_PPC_TM_TAR:
1403 vcpu->arch.tar_tm = set_reg_val(id, *val);
1406 case KVM_REG_PPC_ARCH_COMPAT:
1407 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1417 static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1419 struct kvmppc_vcore *vcore;
1421 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1426 INIT_LIST_HEAD(&vcore->runnable_threads);
1427 spin_lock_init(&vcore->lock);
1428 spin_lock_init(&vcore->stoltb_lock);
1429 init_waitqueue_head(&vcore->wq);
1430 vcore->preempt_tb = TB_NIL;
1431 vcore->lpcr = kvm->arch.lpcr;
1432 vcore->first_vcpuid = core * threads_per_subcore;
1435 vcore->mpp_buffer_is_valid = false;
1437 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1438 vcore->mpp_buffer = (void *)__get_free_pages(
1439 GFP_KERNEL|__GFP_ZERO,
1445 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1446 static struct debugfs_timings_element {
1450 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1451 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1452 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1453 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1454 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1457 #define N_TIMINGS (sizeof(timings) / sizeof(timings[0]))
1459 struct debugfs_timings_state {
1460 struct kvm_vcpu *vcpu;
1461 unsigned int buflen;
1462 char buf[N_TIMINGS * 100];
1465 static int debugfs_timings_open(struct inode *inode, struct file *file)
1467 struct kvm_vcpu *vcpu = inode->i_private;
1468 struct debugfs_timings_state *p;
1470 p = kzalloc(sizeof(*p), GFP_KERNEL);
1474 kvm_get_kvm(vcpu->kvm);
1476 file->private_data = p;
1478 return nonseekable_open(inode, file);
1481 static int debugfs_timings_release(struct inode *inode, struct file *file)
1483 struct debugfs_timings_state *p = file->private_data;
1485 kvm_put_kvm(p->vcpu->kvm);
1490 static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1491 size_t len, loff_t *ppos)
1493 struct debugfs_timings_state *p = file->private_data;
1494 struct kvm_vcpu *vcpu = p->vcpu;
1496 struct kvmhv_tb_accumulator tb;
1505 buf_end = s + sizeof(p->buf);
1506 for (i = 0; i < N_TIMINGS; ++i) {
1507 struct kvmhv_tb_accumulator *acc;
1509 acc = (struct kvmhv_tb_accumulator *)
1510 ((unsigned long)vcpu + timings[i].offset);
1512 for (loops = 0; loops < 1000; ++loops) {
1513 count = acc->seqcount;
1518 if (count == acc->seqcount) {
1526 snprintf(s, buf_end - s, "%s: stuck\n",
1529 snprintf(s, buf_end - s,
1530 "%s: %llu %llu %llu %llu\n",
1531 timings[i].name, count / 2,
1532 tb_to_ns(tb.tb_total),
1533 tb_to_ns(tb.tb_min),
1534 tb_to_ns(tb.tb_max));
1537 p->buflen = s - p->buf;
1541 if (pos >= p->buflen)
1543 if (len > p->buflen - pos)
1544 len = p->buflen - pos;
1545 n = copy_to_user(buf, p->buf + pos, len);
1555 static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1556 size_t len, loff_t *ppos)
1561 static const struct file_operations debugfs_timings_ops = {
1562 .owner = THIS_MODULE,
1563 .open = debugfs_timings_open,
1564 .release = debugfs_timings_release,
1565 .read = debugfs_timings_read,
1566 .write = debugfs_timings_write,
1567 .llseek = generic_file_llseek,
1570 /* Create a debugfs directory for the vcpu */
1571 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1574 struct kvm *kvm = vcpu->kvm;
1576 snprintf(buf, sizeof(buf), "vcpu%u", id);
1577 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1579 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1580 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1582 vcpu->arch.debugfs_timings =
1583 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1584 vcpu, &debugfs_timings_ops);
1587 #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1588 static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1591 #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1593 static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1596 struct kvm_vcpu *vcpu;
1599 struct kvmppc_vcore *vcore;
1601 core = id / threads_per_subcore;
1602 if (core >= KVM_MAX_VCORES)
1606 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1610 err = kvm_vcpu_init(vcpu, kvm, id);
1614 vcpu->arch.shared = &vcpu->arch.shregs;
1615 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1617 * The shared struct is never shared on HV,
1618 * so we can always use host endianness
1620 #ifdef __BIG_ENDIAN__
1621 vcpu->arch.shared_big_endian = true;
1623 vcpu->arch.shared_big_endian = false;
1626 vcpu->arch.mmcr[0] = MMCR0_FC;
1627 vcpu->arch.ctrl = CTRL_RUNLATCH;
1628 /* default to host PVR, since we can't spoof it */
1629 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1630 spin_lock_init(&vcpu->arch.vpa_update_lock);
1631 spin_lock_init(&vcpu->arch.tbacct_lock);
1632 vcpu->arch.busy_preempt = TB_NIL;
1633 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
1635 kvmppc_mmu_book3s_hv_init(vcpu);
1637 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
1639 init_waitqueue_head(&vcpu->arch.cpu_run);
1641 mutex_lock(&kvm->lock);
1642 vcore = kvm->arch.vcores[core];
1644 vcore = kvmppc_vcore_create(kvm, core);
1645 kvm->arch.vcores[core] = vcore;
1646 kvm->arch.online_vcores++;
1648 mutex_unlock(&kvm->lock);
1653 spin_lock(&vcore->lock);
1654 ++vcore->num_threads;
1655 spin_unlock(&vcore->lock);
1656 vcpu->arch.vcore = vcore;
1657 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
1659 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1660 kvmppc_sanity_check(vcpu);
1662 debugfs_vcpu_init(vcpu, id);
1667 kmem_cache_free(kvm_vcpu_cache, vcpu);
1669 return ERR_PTR(err);
1672 static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
1674 if (vpa->pinned_addr)
1675 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
1679 static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
1681 spin_lock(&vcpu->arch.vpa_update_lock);
1682 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
1683 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
1684 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
1685 spin_unlock(&vcpu->arch.vpa_update_lock);
1686 kvm_vcpu_uninit(vcpu);
1687 kmem_cache_free(kvm_vcpu_cache, vcpu);
1690 static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1692 /* Indicate we want to get back into the guest */
1696 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
1698 unsigned long dec_nsec, now;
1701 if (now > vcpu->arch.dec_expires) {
1702 /* decrementer has already gone negative */
1703 kvmppc_core_queue_dec(vcpu);
1704 kvmppc_core_prepare_to_enter(vcpu);
1707 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1709 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
1711 vcpu->arch.timer_running = 1;
1714 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
1716 vcpu->arch.ceded = 0;
1717 if (vcpu->arch.timer_running) {
1718 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1719 vcpu->arch.timer_running = 0;
1723 extern void __kvmppc_vcore_entry(void);
1725 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1726 struct kvm_vcpu *vcpu)
1730 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1732 spin_lock_irq(&vcpu->arch.tbacct_lock);
1734 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1735 vcpu->arch.stolen_logged;
1736 vcpu->arch.busy_preempt = now;
1737 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1738 spin_unlock_irq(&vcpu->arch.tbacct_lock);
1740 list_del(&vcpu->arch.run_list);
1743 static int kvmppc_grab_hwthread(int cpu)
1745 struct paca_struct *tpaca;
1746 long timeout = 10000;
1750 /* Ensure the thread won't go into the kernel if it wakes */
1751 tpaca->kvm_hstate.kvm_vcpu = NULL;
1752 tpaca->kvm_hstate.napping = 0;
1754 tpaca->kvm_hstate.hwthread_req = 1;
1757 * If the thread is already executing in the kernel (e.g. handling
1758 * a stray interrupt), wait for it to get back to nap mode.
1759 * The smp_mb() is to ensure that our setting of hwthread_req
1760 * is visible before we look at hwthread_state, so if this
1761 * races with the code at system_reset_pSeries and the thread
1762 * misses our setting of hwthread_req, we are sure to see its
1763 * setting of hwthread_state, and vice versa.
1766 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1767 if (--timeout <= 0) {
1768 pr_err("KVM: couldn't grab cpu %d\n", cpu);
1776 static void kvmppc_release_hwthread(int cpu)
1778 struct paca_struct *tpaca;
1781 tpaca->kvm_hstate.hwthread_req = 0;
1782 tpaca->kvm_hstate.kvm_vcpu = NULL;
1785 static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
1788 struct paca_struct *tpaca;
1789 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1791 if (vcpu->arch.timer_running) {
1792 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1793 vcpu->arch.timer_running = 0;
1795 cpu = vc->pcpu + vcpu->arch.ptid;
1797 tpaca->kvm_hstate.kvm_vcore = vc;
1798 tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
1799 vcpu->cpu = vc->pcpu;
1800 /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
1802 tpaca->kvm_hstate.kvm_vcpu = vcpu;
1803 if (cpu != smp_processor_id())
1804 kvmppc_ipi_thread(cpu);
1807 static void kvmppc_wait_for_nap(void)
1809 int cpu = smp_processor_id();
1812 for (loops = 0; loops < 1000000; ++loops) {
1814 * Check if all threads are finished.
1815 * We set the vcpu pointer when starting a thread
1816 * and the thread clears it when finished, so we look
1817 * for any threads that still have a non-NULL vcpu ptr.
1819 for (i = 1; i < threads_per_subcore; ++i)
1820 if (paca[cpu + i].kvm_hstate.kvm_vcpu)
1822 if (i == threads_per_subcore) {
1829 for (i = 1; i < threads_per_subcore; ++i)
1830 if (paca[cpu + i].kvm_hstate.kvm_vcpu)
1831 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
1835 * Check that we are on thread 0 and that any other threads in
1836 * this core are off-line. Then grab the threads so they can't
1839 static int on_primary_thread(void)
1841 int cpu = smp_processor_id();
1844 /* Are we on a primary subcore? */
1845 if (cpu_thread_in_subcore(cpu))
1849 while (++thr < threads_per_subcore)
1850 if (cpu_online(cpu + thr))
1853 /* Grab all hw threads so they can't go into the kernel */
1854 for (thr = 1; thr < threads_per_subcore; ++thr) {
1855 if (kvmppc_grab_hwthread(cpu + thr)) {
1856 /* Couldn't grab one; let the others go */
1858 kvmppc_release_hwthread(cpu + thr);
1859 } while (--thr > 0);
1866 static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc)
1868 phys_addr_t phy_addr, mpp_addr;
1870 phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer);
1871 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1873 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT);
1874 logmpp(mpp_addr | PPC_LOGMPP_LOG_L2);
1876 vc->mpp_buffer_is_valid = true;
1879 static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
1881 phys_addr_t phy_addr, mpp_addr;
1883 phy_addr = virt_to_phys(vc->mpp_buffer);
1884 mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK;
1886 /* We must abort any in-progress save operations to ensure
1887 * the table is valid so that prefetch engine knows when to
1888 * stop prefetching. */
1889 logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT);
1890 mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
1893 static void prepare_threads(struct kvmppc_vcore *vc)
1895 struct kvm_vcpu *vcpu, *vnext;
1897 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1899 if (signal_pending(vcpu->arch.run_task))
1900 vcpu->arch.ret = -EINTR;
1901 else if (vcpu->arch.vpa.update_pending ||
1902 vcpu->arch.slb_shadow.update_pending ||
1903 vcpu->arch.dtl.update_pending)
1904 vcpu->arch.ret = RESUME_GUEST;
1907 kvmppc_remove_runnable(vc, vcpu);
1908 wake_up(&vcpu->arch.cpu_run);
1912 static void post_guest_process(struct kvmppc_vcore *vc)
1916 struct kvm_vcpu *vcpu, *vnext;
1919 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
1921 /* cancel pending dec exception if dec is positive */
1922 if (now < vcpu->arch.dec_expires &&
1923 kvmppc_core_pending_dec(vcpu))
1924 kvmppc_core_dequeue_dec(vcpu);
1926 trace_kvm_guest_exit(vcpu);
1929 if (vcpu->arch.trap)
1930 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
1931 vcpu->arch.run_task);
1933 vcpu->arch.ret = ret;
1934 vcpu->arch.trap = 0;
1936 if (vcpu->arch.ceded) {
1937 if (!is_kvmppc_resume_guest(ret))
1938 kvmppc_end_cede(vcpu);
1940 kvmppc_set_timer(vcpu);
1942 if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
1943 kvmppc_remove_runnable(vc, vcpu);
1944 wake_up(&vcpu->arch.cpu_run);
1950 * Run a set of guest threads on a physical core.
1951 * Called with vc->lock held.
1953 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
1955 struct kvm_vcpu *vcpu;
1960 * Remove from the list any threads that have a signal pending
1961 * or need a VPA update done
1963 prepare_threads(vc);
1965 /* if the runner is no longer runnable, let the caller pick a new one */
1966 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
1972 vc->entry_exit_map = 0;
1973 vc->preempt_tb = TB_NIL;
1975 vc->napping_threads = 0;
1976 vc->conferring_threads = 0;
1979 * Make sure we are running on primary threads, and that secondary
1980 * threads are offline. Also check if the number of threads in this
1981 * guest are greater than the current system threads per guest.
1983 if ((threads_per_core > 1) &&
1984 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1985 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1986 vcpu->arch.ret = -EBUSY;
1987 kvmppc_remove_runnable(vc, vcpu);
1988 wake_up(&vcpu->arch.cpu_run);
1994 vc->pcpu = smp_processor_id();
1995 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1996 kvmppc_start_thread(vcpu);
1997 kvmppc_create_dtl_entry(vcpu, vc);
1998 trace_kvm_guest_enter(vcpu);
2001 /* Set this explicitly in case thread 0 doesn't have a vcpu */
2002 get_paca()->kvm_hstate.kvm_vcore = vc;
2003 get_paca()->kvm_hstate.ptid = 0;
2005 vc->vcore_state = VCORE_RUNNING;
2008 trace_kvmppc_run_core(vc, 0);
2010 spin_unlock(&vc->lock);
2014 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2016 if (vc->mpp_buffer_is_valid)
2017 kvmppc_start_restoring_l2_cache(vc);
2019 __kvmppc_vcore_entry();
2021 spin_lock(&vc->lock);
2024 kvmppc_start_saving_l2_cache(vc);
2026 /* disable sending of IPIs on virtual external irqs */
2027 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
2029 /* wait for secondary threads to finish writing their state to memory */
2030 kvmppc_wait_for_nap();
2031 for (i = 0; i < threads_per_subcore; ++i)
2032 kvmppc_release_hwthread(vc->pcpu + i);
2033 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
2034 vc->vcore_state = VCORE_EXITING;
2035 spin_unlock(&vc->lock);
2037 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2039 /* make sure updates to secondary vcpu structs are visible now */
2045 spin_lock(&vc->lock);
2046 post_guest_process(vc);
2049 vc->vcore_state = VCORE_INACTIVE;
2050 trace_kvmppc_run_core(vc, 1);
2054 * Wait for some other vcpu thread to execute us, and
2055 * wake us up when we need to handle something in the host.
2057 static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
2061 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
2062 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
2064 finish_wait(&vcpu->arch.cpu_run, &wait);
2068 * All the vcpus in this vcore are idle, so wait for a decrementer
2069 * or external interrupt to one of the vcpus. vc->lock is held.
2071 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
2073 struct kvm_vcpu *vcpu;
2078 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
2081 * Check one last time for pending exceptions and ceded state after
2082 * we put ourselves on the wait queue
2084 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
2085 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
2092 finish_wait(&vc->wq, &wait);
2096 vc->vcore_state = VCORE_SLEEPING;
2097 trace_kvmppc_vcore_blocked(vc, 0);
2098 spin_unlock(&vc->lock);
2100 finish_wait(&vc->wq, &wait);
2101 spin_lock(&vc->lock);
2102 vc->vcore_state = VCORE_INACTIVE;
2103 trace_kvmppc_vcore_blocked(vc, 1);
2106 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2109 struct kvmppc_vcore *vc;
2110 struct kvm_vcpu *v, *vn;
2112 trace_kvmppc_run_vcpu_enter(vcpu);
2114 kvm_run->exit_reason = 0;
2115 vcpu->arch.ret = RESUME_GUEST;
2116 vcpu->arch.trap = 0;
2117 kvmppc_update_vpas(vcpu);
2120 * Synchronize with other threads in this virtual core
2122 vc = vcpu->arch.vcore;
2123 spin_lock(&vc->lock);
2124 vcpu->arch.ceded = 0;
2125 vcpu->arch.run_task = current;
2126 vcpu->arch.kvm_run = kvm_run;
2127 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
2128 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
2129 vcpu->arch.busy_preempt = TB_NIL;
2130 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
2134 * This happens the first time this is called for a vcpu.
2135 * If the vcore is already running, we may be able to start
2136 * this thread straight away and have it join in.
2138 if (!signal_pending(current)) {
2139 if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
2140 kvmppc_create_dtl_entry(vcpu, vc);
2141 kvmppc_start_thread(vcpu);
2142 trace_kvm_guest_enter(vcpu);
2143 } else if (vc->vcore_state == VCORE_SLEEPING) {
2149 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2150 !signal_pending(current)) {
2151 if (vc->vcore_state != VCORE_INACTIVE) {
2152 spin_unlock(&vc->lock);
2153 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
2154 spin_lock(&vc->lock);
2157 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
2159 kvmppc_core_prepare_to_enter(v);
2160 if (signal_pending(v->arch.run_task)) {
2161 kvmppc_remove_runnable(vc, v);
2162 v->stat.signal_exits++;
2163 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
2164 v->arch.ret = -EINTR;
2165 wake_up(&v->arch.cpu_run);
2168 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2171 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
2172 if (!v->arch.pending_exceptions)
2173 n_ceded += v->arch.ceded;
2178 if (n_ceded == vc->n_runnable) {
2179 kvmppc_vcore_blocked(vc);
2180 } else if (should_resched()) {
2181 vc->vcore_state = VCORE_PREEMPT;
2182 /* Let something else run */
2183 cond_resched_lock(&vc->lock);
2184 vc->vcore_state = VCORE_INACTIVE;
2186 kvmppc_run_core(vc);
2191 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2192 (vc->vcore_state == VCORE_RUNNING ||
2193 vc->vcore_state == VCORE_EXITING)) {
2194 spin_unlock(&vc->lock);
2195 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
2196 spin_lock(&vc->lock);
2199 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2200 kvmppc_remove_runnable(vc, vcpu);
2201 vcpu->stat.signal_exits++;
2202 kvm_run->exit_reason = KVM_EXIT_INTR;
2203 vcpu->arch.ret = -EINTR;
2206 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
2207 /* Wake up some vcpu to run the core */
2208 v = list_first_entry(&vc->runnable_threads,
2209 struct kvm_vcpu, arch.run_list);
2210 wake_up(&v->arch.cpu_run);
2213 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
2214 spin_unlock(&vc->lock);
2215 return vcpu->arch.ret;
2218 static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
2223 if (!vcpu->arch.sane) {
2224 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2228 kvmppc_core_prepare_to_enter(vcpu);
2230 /* No need to go into the guest when all we'll do is come back out */
2231 if (signal_pending(current)) {
2232 run->exit_reason = KVM_EXIT_INTR;
2236 atomic_inc(&vcpu->kvm->arch.vcpus_running);
2237 /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
2240 /* On the first time here, set up HTAB and VRMA */
2241 if (!vcpu->kvm->arch.hpte_setup_done) {
2242 r = kvmppc_hv_setup_htab_rma(vcpu);
2247 flush_fp_to_thread(current);
2248 flush_altivec_to_thread(current);
2249 flush_vsx_to_thread(current);
2250 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
2251 vcpu->arch.pgdir = current->mm->pgd;
2252 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
2255 r = kvmppc_run_vcpu(run, vcpu);
2257 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
2258 !(vcpu->arch.shregs.msr & MSR_PR)) {
2259 trace_kvm_hcall_enter(vcpu);
2260 r = kvmppc_pseries_do_hcall(vcpu);
2261 trace_kvm_hcall_exit(vcpu, r);
2262 kvmppc_core_prepare_to_enter(vcpu);
2263 } else if (r == RESUME_PAGE_FAULT) {
2264 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2265 r = kvmppc_book3s_hv_page_fault(run, vcpu,
2266 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
2267 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2269 } while (is_kvmppc_resume_guest(r));
2272 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
2273 atomic_dec(&vcpu->kvm->arch.vcpus_running);
2277 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2280 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
2284 (*sps)->page_shift = def->shift;
2285 (*sps)->slb_enc = def->sllp;
2286 (*sps)->enc[0].page_shift = def->shift;
2287 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
2289 * Add 16MB MPSS support if host supports it
2291 if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
2292 (*sps)->enc[1].page_shift = 24;
2293 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
2298 static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2299 struct kvm_ppc_smmu_info *info)
2301 struct kvm_ppc_one_seg_page_size *sps;
2303 info->flags = KVM_PPC_PAGE_SIZES_REAL;
2304 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
2305 info->flags |= KVM_PPC_1T_SEGMENTS;
2306 info->slb_size = mmu_slb_size;
2308 /* We only support these sizes for now, and no muti-size segments */
2309 sps = &info->sps[0];
2310 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
2311 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
2312 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
2318 * Get (and clear) the dirty memory log for a memory slot.
2320 static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
2321 struct kvm_dirty_log *log)
2323 struct kvm_memory_slot *memslot;
2327 mutex_lock(&kvm->slots_lock);
2330 if (log->slot >= KVM_USER_MEM_SLOTS)
2333 memslot = id_to_memslot(kvm->memslots, log->slot);
2335 if (!memslot->dirty_bitmap)
2338 n = kvm_dirty_bitmap_bytes(memslot);
2339 memset(memslot->dirty_bitmap, 0, n);
2341 r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap);
2346 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
2351 mutex_unlock(&kvm->slots_lock);
2355 static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
2356 struct kvm_memory_slot *dont)
2358 if (!dont || free->arch.rmap != dont->arch.rmap) {
2359 vfree(free->arch.rmap);
2360 free->arch.rmap = NULL;
2364 static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
2365 unsigned long npages)
2367 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
2368 if (!slot->arch.rmap)
2374 static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
2375 struct kvm_memory_slot *memslot,
2376 struct kvm_userspace_memory_region *mem)
2381 static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
2382 struct kvm_userspace_memory_region *mem,
2383 const struct kvm_memory_slot *old)
2385 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
2386 struct kvm_memory_slot *memslot;
2388 if (npages && old->npages) {
2390 * If modifying a memslot, reset all the rmap dirty bits.
2391 * If this is a new memslot, we don't need to do anything
2392 * since the rmap array starts out as all zeroes,
2393 * i.e. no pages are dirty.
2395 memslot = id_to_memslot(kvm->memslots, mem->slot);
2396 kvmppc_hv_get_dirty_log(kvm, memslot, NULL);
2401 * Update LPCR values in kvm->arch and in vcores.
2402 * Caller must hold kvm->lock.
2404 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
2409 if ((kvm->arch.lpcr & mask) == lpcr)
2412 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
2414 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2415 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2418 spin_lock(&vc->lock);
2419 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
2420 spin_unlock(&vc->lock);
2421 if (++cores_done >= kvm->arch.online_vcores)
2426 static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
2431 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
2434 struct kvm *kvm = vcpu->kvm;
2436 struct kvm_memory_slot *memslot;
2437 struct vm_area_struct *vma;
2438 unsigned long lpcr = 0, senc;
2439 unsigned long psize, porder;
2442 mutex_lock(&kvm->lock);
2443 if (kvm->arch.hpte_setup_done)
2444 goto out; /* another vcpu beat us to it */
2446 /* Allocate hashed page table (if not done already) and reset it */
2447 if (!kvm->arch.hpt_virt) {
2448 err = kvmppc_alloc_hpt(kvm, NULL);
2450 pr_err("KVM: Couldn't alloc HPT\n");
2455 /* Look up the memslot for guest physical address 0 */
2456 srcu_idx = srcu_read_lock(&kvm->srcu);
2457 memslot = gfn_to_memslot(kvm, 0);
2459 /* We must have some memory at 0 by now */
2461 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
2464 /* Look up the VMA for the start of this memory slot */
2465 hva = memslot->userspace_addr;
2466 down_read(¤t->mm->mmap_sem);
2467 vma = find_vma(current->mm, hva);
2468 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
2471 psize = vma_kernel_pagesize(vma);
2472 porder = __ilog2(psize);
2474 up_read(¤t->mm->mmap_sem);
2476 /* We can handle 4k, 64k or 16M pages in the VRMA */
2478 if (!(psize == 0x1000 || psize == 0x10000 ||
2479 psize == 0x1000000))
2482 /* Update VRMASD field in the LPCR */
2483 senc = slb_pgsize_encoding(psize);
2484 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
2485 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2486 /* the -4 is to account for senc values starting at 0x10 */
2487 lpcr = senc << (LPCR_VRMASD_SH - 4);
2489 /* Create HPTEs in the hash page table for the VRMA */
2490 kvmppc_map_vrma(vcpu, memslot, porder);
2492 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
2494 /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
2496 kvm->arch.hpte_setup_done = 1;
2499 srcu_read_unlock(&kvm->srcu, srcu_idx);
2501 mutex_unlock(&kvm->lock);
2505 up_read(¤t->mm->mmap_sem);
2509 static int kvmppc_core_init_vm_hv(struct kvm *kvm)
2511 unsigned long lpcr, lpid;
2514 /* Allocate the guest's logical partition ID */
2516 lpid = kvmppc_alloc_lpid();
2519 kvm->arch.lpid = lpid;
2522 * Since we don't flush the TLB when tearing down a VM,
2523 * and this lpid might have previously been used,
2524 * make sure we flush on each core before running the new VM.
2526 cpumask_setall(&kvm->arch.need_tlb_flush);
2528 /* Start out with the default set of hcalls enabled */
2529 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
2530 sizeof(kvm->arch.enabled_hcalls));
2532 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
2534 /* Init LPCR for virtual RMA mode */
2535 kvm->arch.host_lpid = mfspr(SPRN_LPID);
2536 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
2537 lpcr &= LPCR_PECE | LPCR_LPES;
2538 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
2539 LPCR_VPM0 | LPCR_VPM1;
2540 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
2541 (VRMA_VSID << SLB_VSID_SHIFT_1T);
2542 /* On POWER8 turn on online bit to enable PURR/SPURR */
2543 if (cpu_has_feature(CPU_FTR_ARCH_207S))
2545 kvm->arch.lpcr = lpcr;
2548 * Track that we now have a HV mode VM active. This blocks secondary
2549 * CPU threads from coming online.
2551 kvm_hv_vm_activated();
2554 * Create a debugfs directory for the VM
2556 snprintf(buf, sizeof(buf), "vm%d", current->pid);
2557 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
2558 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
2559 kvmppc_mmu_debugfs_init(kvm);
2564 static void kvmppc_free_vcores(struct kvm *kvm)
2568 for (i = 0; i < KVM_MAX_VCORES; ++i) {
2569 if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) {
2570 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
2571 free_pages((unsigned long)vc->mpp_buffer,
2574 kfree(kvm->arch.vcores[i]);
2576 kvm->arch.online_vcores = 0;
2579 static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
2581 debugfs_remove_recursive(kvm->arch.debugfs_dir);
2583 kvm_hv_vm_deactivated();
2585 kvmppc_free_vcores(kvm);
2587 kvmppc_free_hpt(kvm);
2590 /* We don't need to emulate any privileged instructions or dcbz */
2591 static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
2592 unsigned int inst, int *advance)
2594 return EMULATE_FAIL;
2597 static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
2600 return EMULATE_FAIL;
2603 static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
2606 return EMULATE_FAIL;
2609 static int kvmppc_core_check_processor_compat_hv(void)
2611 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
2612 !cpu_has_feature(CPU_FTR_ARCH_206))
2617 static long kvm_arch_vm_ioctl_hv(struct file *filp,
2618 unsigned int ioctl, unsigned long arg)
2620 struct kvm *kvm __maybe_unused = filp->private_data;
2621 void __user *argp = (void __user *)arg;
2626 case KVM_PPC_ALLOCATE_HTAB: {
2630 if (get_user(htab_order, (u32 __user *)argp))
2632 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
2636 if (put_user(htab_order, (u32 __user *)argp))
2642 case KVM_PPC_GET_HTAB_FD: {
2643 struct kvm_get_htab_fd ghf;
2646 if (copy_from_user(&ghf, argp, sizeof(ghf)))
2648 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
2660 * List of hcall numbers to enable by default.
2661 * For compatibility with old userspace, we enable by default
2662 * all hcalls that were implemented before the hcall-enabling
2663 * facility was added. Note this list should not include H_RTAS.
2665 static unsigned int default_hcall_list[] = {
2679 #ifdef CONFIG_KVM_XICS
2690 static void init_default_hcalls(void)
2695 for (i = 0; default_hcall_list[i]; ++i) {
2696 hcall = default_hcall_list[i];
2697 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
2698 __set_bit(hcall / 4, default_enabled_hcalls);
2702 static struct kvmppc_ops kvm_ops_hv = {
2703 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
2704 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
2705 .get_one_reg = kvmppc_get_one_reg_hv,
2706 .set_one_reg = kvmppc_set_one_reg_hv,
2707 .vcpu_load = kvmppc_core_vcpu_load_hv,
2708 .vcpu_put = kvmppc_core_vcpu_put_hv,
2709 .set_msr = kvmppc_set_msr_hv,
2710 .vcpu_run = kvmppc_vcpu_run_hv,
2711 .vcpu_create = kvmppc_core_vcpu_create_hv,
2712 .vcpu_free = kvmppc_core_vcpu_free_hv,
2713 .check_requests = kvmppc_core_check_requests_hv,
2714 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
2715 .flush_memslot = kvmppc_core_flush_memslot_hv,
2716 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
2717 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
2718 .unmap_hva = kvm_unmap_hva_hv,
2719 .unmap_hva_range = kvm_unmap_hva_range_hv,
2720 .age_hva = kvm_age_hva_hv,
2721 .test_age_hva = kvm_test_age_hva_hv,
2722 .set_spte_hva = kvm_set_spte_hva_hv,
2723 .mmu_destroy = kvmppc_mmu_destroy_hv,
2724 .free_memslot = kvmppc_core_free_memslot_hv,
2725 .create_memslot = kvmppc_core_create_memslot_hv,
2726 .init_vm = kvmppc_core_init_vm_hv,
2727 .destroy_vm = kvmppc_core_destroy_vm_hv,
2728 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
2729 .emulate_op = kvmppc_core_emulate_op_hv,
2730 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
2731 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
2732 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
2733 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
2734 .hcall_implemented = kvmppc_hcall_impl_hv,
2737 static int kvmppc_book3s_init_hv(void)
2741 * FIXME!! Do we need to check on all cpus ?
2743 r = kvmppc_core_check_processor_compat_hv();
2747 kvm_ops_hv.owner = THIS_MODULE;
2748 kvmppc_hv_ops = &kvm_ops_hv;
2750 init_default_hcalls();
2752 r = kvmppc_mmu_hv_init();
2756 static void kvmppc_book3s_exit_hv(void)
2758 kvmppc_hv_ops = NULL;
2761 module_init(kvmppc_book3s_init_hv);
2762 module_exit(kvmppc_book3s_exit_hv);
2763 MODULE_LICENSE("GPL");
2764 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2765 MODULE_ALIAS("devname:kvm");