KVM: s390: factor out and fix setting of guest TOD clock
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Tue, 12 May 2015 07:49:14 +0000 (09:49 +0200)
committerChristian Borntraeger <borntraeger@de.ibm.com>
Tue, 13 Oct 2015 13:50:35 +0000 (15:50 +0200)
Let's move that whole logic into one function. We now always use unsigned
values when calculating the epoch (to avoid over/underflow defined).
Also, we always have to get all VCPUs out of SIE before doing the update
to avoid running differing VCPUs with different TODs.

Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c

index a0907795f31db30d8d5a45682f4fad5977c55c6c..87bd602f326c02053542a7544f3d5d171f97b14e 100644 (file)
@@ -521,22 +521,12 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
 
 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
 {
-       struct kvm_vcpu *cur_vcpu;
-       unsigned int vcpu_idx;
        u64 gtod;
 
        if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
                return -EFAULT;
 
-       mutex_lock(&kvm->lock);
-       preempt_disable();
-       kvm->arch.epoch = gtod - get_tod_clock();
-       kvm_s390_vcpu_block_all(kvm);
-       kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
-               cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
-       kvm_s390_vcpu_unblock_all(kvm);
-       preempt_enable();
-       mutex_unlock(&kvm->lock);
+       kvm_s390_set_tod_clock(kvm, gtod);
        VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
        return 0;
 }
@@ -1906,6 +1896,22 @@ retry:
        return 0;
 }
 
+void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
+{
+       struct kvm_vcpu *vcpu;
+       int i;
+
+       mutex_lock(&kvm->lock);
+       preempt_disable();
+       kvm->arch.epoch = tod - get_tod_clock();
+       kvm_s390_vcpu_block_all(kvm);
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.sie_block->epoch = kvm->arch.epoch;
+       kvm_s390_vcpu_unblock_all(kvm);
+       preempt_enable();
+       mutex_unlock(&kvm->lock);
+}
+
 /**
  * kvm_arch_fault_in_page - fault-in guest page if necessary
  * @vcpu: The corresponding virtual cpu
index 3a368d2a6114bec97cfaf0afa7f23170ac4e1f8c..cc15ea3a150ec51dd24ad4252ae7e2d55e681c4c 100644 (file)
@@ -231,6 +231,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
 
 /* implemented in kvm-s390.c */
+void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
index b253de5b894561a72c11ba4ac577049bcc60a7d6..77191b85ea7af4dd96dc6a1ae819f27faa25233b 100644 (file)
 /* Handle SCK (SET CLOCK) interception */
 static int handle_set_clock(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *cpup;
-       s64 val;
-       int i, rc;
+       int rc;
        ar_t ar;
-       u64 op2;
+       u64 op2, val;
 
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
@@ -50,14 +48,7 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
                return kvm_s390_inject_prog_cond(vcpu, rc);
 
        VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
-
-       mutex_lock(&vcpu->kvm->lock);
-       preempt_disable();
-       val = (val - get_tod_clock()) & ~0x3fUL;
-       kvm_for_each_vcpu(i, cpup, vcpu->kvm)
-               cpup->arch.sie_block->epoch = val;
-       preempt_enable();
-       mutex_unlock(&vcpu->kvm->lock);
+       kvm_s390_set_tod_clock(vcpu->kvm, val);
 
        kvm_s390_set_psw_cc(vcpu, 0);
        return 0;