Merge branch 'x86/hyperv' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / arch / x86 / kvm / x86.c
index c13cd14c47809376656296b0e1ebe331d70b908a..0e27ee573bd52dec1b3801155c2f73289d5e42c6 100644 (file)
@@ -67,6 +67,8 @@
 #include <asm/pvclock.h>
 #include <asm/div64.h>
 #include <asm/irq_remapping.h>
+#include <asm/mshyperv.h>
+#include <asm/hypervisor.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -1381,6 +1383,11 @@ static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
        return tsc;
 }
 
+static inline int gtod_is_based_on_tsc(int mode)
+{
+       return mode == VCLOCK_TSC || mode == VCLOCK_HVCLOCK;
+}
+
 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
 {
 #ifdef CONFIG_X86_64
@@ -1400,7 +1407,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
         * perform request to enable masterclock.
         */
        if (ka->use_master_clock ||
-           (gtod->clock.vclock_mode == VCLOCK_TSC && vcpus_matched))
+           (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
                kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
 
        trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
@@ -1463,6 +1470,19 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        vcpu->arch.tsc_offset = offset;
 }
 
+static inline bool kvm_check_tsc_unstable(void)
+{
+#ifdef CONFIG_X86_64
+       /*
+        * TSC is marked unstable when we're running on Hyper-V,
+        * 'TSC page' clocksource is good.
+        */
+       if (pvclock_gtod_data.clock.vclock_mode == VCLOCK_HVCLOCK)
+               return false;
+#endif
+       return check_tsc_unstable();
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1508,7 +1528,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
          */
        if (synchronizing &&
            vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
-               if (!check_tsc_unstable()) {
+               if (!kvm_check_tsc_unstable()) {
                        offset = kvm->arch.cur_tsc_offset;
                        pr_debug("kvm: matched tsc offset for %llu\n", data);
                } else {
@@ -1608,18 +1628,43 @@ static u64 read_tsc(void)
        return last;
 }
 
-static inline u64 vgettsc(u64 *cycle_now)
+static inline u64 vgettsc(u64 *tsc_timestamp, int *mode)
 {
        long v;
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
+       u64 tsc_pg_val;
+
+       switch (gtod->clock.vclock_mode) {
+       case VCLOCK_HVCLOCK:
+               tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
+                                                 tsc_timestamp);
+               if (tsc_pg_val != U64_MAX) {
+                       /* TSC page valid */
+                       *mode = VCLOCK_HVCLOCK;
+                       v = (tsc_pg_val - gtod->clock.cycle_last) &
+                               gtod->clock.mask;
+               } else {
+                       /* TSC page invalid */
+                       *mode = VCLOCK_NONE;
+               }
+               break;
+       case VCLOCK_TSC:
+               *mode = VCLOCK_TSC;
+               *tsc_timestamp = read_tsc();
+               v = (*tsc_timestamp - gtod->clock.cycle_last) &
+                       gtod->clock.mask;
+               break;
+       default:
+               *mode = VCLOCK_NONE;
+       }
 
-       *cycle_now = read_tsc();
+       if (*mode == VCLOCK_NONE)
+               *tsc_timestamp = v = 0;
 
-       v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
        return v * gtod->clock.mult;
 }
 
-static int do_monotonic_boot(s64 *t, u64 *cycle_now)
+static int do_monotonic_boot(s64 *t, u64 *tsc_timestamp)
 {
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
        unsigned long seq;
@@ -1628,9 +1673,8 @@ static int do_monotonic_boot(s64 *t, u64 *cycle_now)
 
        do {
                seq = read_seqcount_begin(&gtod->seq);
-               mode = gtod->clock.vclock_mode;
                ns = gtod->nsec_base;
-               ns += vgettsc(cycle_now);
+               ns += vgettsc(tsc_timestamp, &mode);
                ns >>= gtod->clock.shift;
                ns += gtod->boot_ns;
        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -1639,7 +1683,7 @@ static int do_monotonic_boot(s64 *t, u64 *cycle_now)
        return mode;
 }
 
-static int do_realtime(struct timespec *ts, u64 *cycle_now)
+static int do_realtime(struct timespec *ts, u64 *tsc_timestamp)
 {
        struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
        unsigned long seq;
@@ -1648,10 +1692,9 @@ static int do_realtime(struct timespec *ts, u64 *cycle_now)
 
        do {
                seq = read_seqcount_begin(&gtod->seq);
-               mode = gtod->clock.vclock_mode;
                ts->tv_sec = gtod->wall_time_sec;
                ns = gtod->nsec_base;
-               ns += vgettsc(cycle_now);
+               ns += vgettsc(tsc_timestamp, &mode);
                ns >>= gtod->clock.shift;
        } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
 
@@ -1661,25 +1704,26 @@ static int do_realtime(struct timespec *ts, u64 *cycle_now)
        return mode;
 }
 
-/* returns true if host is using tsc clocksource */
-static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now)
+/* returns true if host is using TSC based clocksource */
+static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
 {
        /* checked again under seqlock below */
-       if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
+       if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
                return false;
 
-       return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC;
+       return gtod_is_based_on_tsc(do_monotonic_boot(kernel_ns,
+                                                     tsc_timestamp));
 }
 
-/* returns true if host is using tsc clocksource */
+/* returns true if host is using TSC based clocksource */
 static bool kvm_get_walltime_and_clockread(struct timespec *ts,
-                                          u64 *cycle_now)
+                                          u64 *tsc_timestamp)
 {
        /* checked again under seqlock below */
-       if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
+       if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
                return false;
 
-       return do_realtime(ts, cycle_now) == VCLOCK_TSC;
+       return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
 }
 #endif
 
@@ -2892,13 +2936,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
        }
 
-       if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
+       if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
                s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
                                rdtsc() - vcpu->arch.last_host_tsc;
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
 
-               if (check_tsc_unstable()) {
+               if (kvm_check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_vcpu_write_tsc_offset(vcpu, offset);
@@ -4453,7 +4497,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
                                         addr, n, v))
                    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
-               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
+               trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
                handled += n;
                addr += n;
                len -= n;
@@ -4712,7 +4756,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
 {
        if (vcpu->mmio_read_completed) {
                trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
-                              vcpu->mmio_fragments[0].gpa, *(u64 *)val);
+                              vcpu->mmio_fragments[0].gpa, val);
                vcpu->mmio_read_completed = 0;
                return 1;
        }
@@ -4734,14 +4778,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
 
 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
+       trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
        return vcpu_mmio_write(vcpu, gpa, bytes, val);
 }
 
 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
                          void *val, int bytes)
 {
-       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
+       trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
        return X86EMUL_IO_NEEDED;
 }
 
@@ -5959,6 +6003,43 @@ static void tsc_khz_changed(void *data)
        __this_cpu_write(cpu_tsc_khz, khz);
 }
 
+#ifdef CONFIG_X86_64
+static void kvm_hyperv_tsc_notifier(void)
+{
+       struct kvm *kvm;
+       struct kvm_vcpu *vcpu;
+       int cpu;
+
+       spin_lock(&kvm_lock);
+       list_for_each_entry(kvm, &vm_list, vm_list)
+               kvm_make_mclock_inprogress_request(kvm);
+
+       hyperv_stop_tsc_emulation();
+
+       /* TSC frequency always matches when on Hyper-V */
+       for_each_present_cpu(cpu)
+               per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
+       kvm_max_guest_tsc_khz = tsc_khz;
+
+       list_for_each_entry(kvm, &vm_list, vm_list) {
+               struct kvm_arch *ka = &kvm->arch;
+
+               spin_lock(&ka->pvclock_gtod_sync_lock);
+
+               pvclock_update_vm_gtod_copy(kvm);
+
+               kvm_for_each_vcpu(cpu, vcpu, kvm)
+                       kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
+
+               kvm_for_each_vcpu(cpu, vcpu, kvm)
+                       kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
+
+               spin_unlock(&ka->pvclock_gtod_sync_lock);
+       }
+       spin_unlock(&kvm_lock);
+}
+#endif
+
 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                                     void *data)
 {
@@ -6180,9 +6261,9 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
        update_pvclock_gtod(tk);
 
        /* disable master clock if host does not trust, or does not
-        * use, TSC clocksource
+        * use, TSC based clocksource.
         */
-       if (gtod->clock.vclock_mode != VCLOCK_TSC &&
+       if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
            atomic_read(&kvm_guest_has_master_clock) != 0)
                queue_work(system_long_wq, &pvclock_gtod_work);
 
@@ -6244,6 +6325,9 @@ int kvm_arch_init(void *opaque)
        kvm_lapic_init();
 #ifdef CONFIG_X86_64
        pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
+
+       if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
+               set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
 #endif
 
        return 0;
@@ -6256,6 +6340,10 @@ out:
 
 void kvm_arch_exit(void)
 {
+#ifdef CONFIG_X86_64
+       if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
+               clear_hv_tscchange_cb();
+#endif
        kvm_lapic_exit();
        perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
 
@@ -7579,6 +7667,29 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);
 
+int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+       if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
+               /*
+                * When EFER.LME and CR0.PG are set, the processor is in
+                * 64-bit mode (though maybe in a 32-bit code segment).
+                * CR4.PAE and EFER.LMA must be set.
+                */
+               if (!(sregs->cr4 & X86_CR4_PAE)
+                   || !(sregs->efer & EFER_LMA))
+                       return -EINVAL;
+       } else {
+               /*
+                * Not in 64-bit mode: EFER.LMA is clear and the code
+                * segment cannot be 64-bit.
+                */
+               if (sregs->efer & EFER_LMA || sregs->cs.l)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
@@ -7594,6 +7705,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        (sregs->cr4 & X86_CR4_OSXSAVE))
                goto out;
 
+       if (kvm_valid_sregs(vcpu, sregs))
+               return -EINVAL;
+
        apic_base_msr.data = sregs->apic_base;
        apic_base_msr.host_initiated = true;
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
@@ -7844,7 +7958,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 {
        struct kvm_vcpu *vcpu;
 
-       if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
+       if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
                printk_once(KERN_WARNING
                "kvm: SMP vm created on host with unstable TSC; "
                "guest TSC will not be reliable\n");
@@ -7998,7 +8112,7 @@ int kvm_arch_hardware_enable(void)
                return ret;
 
        local_tsc = rdtsc();
-       stable = !check_tsc_unstable();
+       stable = !kvm_check_tsc_unstable();
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (!stable && vcpu->cpu == smp_processor_id())