KVM: arm/arm64: implement kvm_arm_[halt,resume]_guest
authorEric Auger <eric.auger@linaro.org>
Fri, 25 Sep 2015 21:41:17 +0000 (23:41 +0200)
committerChristoffer Dall <christoffer.dall@linaro.org>
Thu, 22 Oct 2015 21:01:46 +0000 (23:01 +0200)
We introduce kvm_arm_halt_guest and resume functions. They
will be used for IRQ forward state change.

Halt is synchronous and prevents the guest from being re-entered.
We use the same mechanism put in place for PSCI former pause,
now renamed power_off. A new flag is introduced in arch vcpu state,
pause, only meant to be used by those functions.

Signed-off-by: Eric Auger <eric.auger@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
arch/arm/include/asm/kvm_host.h
arch/arm/kvm/arm.c
arch/arm64/include/asm/kvm_host.h

index 107374f986fdcae4882bc7e8092a098e82fd9100..6692982c9b575db476bc12a37e2d69b27c00d9fb 100644 (file)
@@ -129,6 +129,9 @@ struct kvm_vcpu_arch {
        /* vcpu power-off state */
        bool power_off;
 
+        /* Don't run the guest (internal implementation need) */
+       bool pause;
+
        /* IO related fields */
        struct kvm_decode mmio_decode;
 
index 3b3384c37e2412a0608eebf3fb5a7caffe209666..ed1574724caff7c29560007efcd6ad8f34cbb3bc 100644 (file)
@@ -353,7 +353,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
 {
        return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v))
-               && !v->arch.power_off);
+               && !v->arch.power_off && !v->arch.pause);
 }
 
 /* Just ensure a guest exit from a particular CPU */
@@ -479,11 +479,38 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
        return vgic_initialized(kvm);
 }
 
+static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
+static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
+
+static void kvm_arm_halt_guest(struct kvm *kvm)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.pause = true;
+       force_vm_exit(cpu_all_mask);
+}
+
+static void kvm_arm_resume_guest(struct kvm *kvm)
+{
+       int i;
+       struct kvm_vcpu *vcpu;
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+
+               vcpu->arch.pause = false;
+               wake_up_interruptible(wq);
+       }
+}
+
 static void vcpu_sleep(struct kvm_vcpu *vcpu)
 {
        wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
 
-       wait_event_interruptible(*wq, !vcpu->arch.power_off);
+       wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+                                      (!vcpu->arch.pause)));
 }
 
 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
@@ -533,7 +560,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
                update_vttbr(vcpu->kvm);
 
-               if (vcpu->arch.power_off)
+               if (vcpu->arch.power_off || vcpu->arch.pause)
                        vcpu_sleep(vcpu);
 
                /*
@@ -561,7 +588,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                }
 
                if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
-                       vcpu->arch.power_off) {
+                       vcpu->arch.power_off || vcpu->arch.pause) {
                        local_irq_enable();
                        kvm_timer_sync_hwstate(vcpu);
                        kvm_vgic_sync_hwstate(vcpu);
index 4b4157b3b9833d95407c0e26f9640e592e332efd..a35ce7266aac3688fa6460bace61f90477448aa6 100644 (file)
@@ -152,6 +152,9 @@ struct kvm_vcpu_arch {
        /* vcpu power-off state */
        bool power_off;
 
+       /* Don't run the guest (internal implementation need) */
+       bool pause;
+
        /* IO related fields */
        struct kvm_decode mmio_decode;