Merge branch 'kvm-ppc-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus...
[linux-2.6-block.git] / virt / kvm / kvm_main.c
index 4e19bc812c29109bc5d978e116b509b1909cae92..f0fe9d02f6bb2c47d909cf493ad6e19778cc666c 100644 (file)
@@ -165,6 +165,24 @@ void vcpu_put(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(vcpu_put);
 
+/* TODO: merge with kvm_arch_vcpu_should_kick */
+static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
+{
+       int mode = kvm_vcpu_exiting_guest_mode(vcpu);
+
+       /*
+        * We need to wait for the VCPU to reenable interrupts and get out of
+        * READING_SHADOW_PAGE_TABLES mode.
+        */
+       if (req & KVM_REQUEST_WAIT)
+               return mode != OUTSIDE_GUEST_MODE;
+
+       /*
+        * Need to kick a running VCPU, but otherwise there is nothing to do.
+        */
+       return mode == IN_GUEST_MODE;
+}
+
 static void ack_flush(void *_completed)
 {
 }
@@ -174,6 +192,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
        int i, cpu, me;
        cpumask_var_t cpus;
        bool called = true;
+       bool wait = req & KVM_REQUEST_WAIT;
        struct kvm_vcpu *vcpu;
 
        zalloc_cpumask_var(&cpus, GFP_ATOMIC);
@@ -183,17 +202,17 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
                kvm_make_request(req, vcpu);
                cpu = vcpu->cpu;
 
-               /* Set ->requests bit before we read ->mode. */
-               smp_mb__after_atomic();
+               if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
+                       continue;
 
                if (cpus != NULL && cpu != -1 && cpu != me &&
-                     kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
+                   kvm_request_needs_ipi(vcpu, req))
                        cpumask_set_cpu(cpu, cpus);
        }
        if (unlikely(cpus == NULL))
-               smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+               smp_call_function_many(cpu_online_mask, ack_flush, NULL, wait);
        else if (!cpumask_empty(cpus))
-               smp_call_function_many(cpus, ack_flush, NULL, 1);
+               smp_call_function_many(cpus, ack_flush, NULL, wait);
        else
                called = false;
        put_cpu();
@@ -504,7 +523,7 @@ static struct kvm_memslots *kvm_alloc_memslots(void)
        int i;
        struct kvm_memslots *slots;
 
-       slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+       slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!slots)
                return NULL;
 
@@ -689,18 +708,6 @@ out_err_no_disable:
        return ERR_PTR(r);
 }
 
-/*
- * Avoid using vmalloc for a small buffer.
- * Should not be used when the size is statically known.
- */
-void *kvm_kvzalloc(unsigned long size)
-{
-       if (size > PAGE_SIZE)
-               return vzalloc(size);
-       else
-               return kzalloc(size, GFP_KERNEL);
-}
-
 static void kvm_destroy_devices(struct kvm *kvm)
 {
        struct kvm_device *dev, *tmp;
@@ -782,7 +789,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
 {
        unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
 
-       memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
+       memslot->dirty_bitmap = kvzalloc(dirty_bytes, GFP_KERNEL);
        if (!memslot->dirty_bitmap)
                return -ENOMEM;
 
@@ -1008,7 +1015,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                        goto out_free;
        }
 
-       slots = kvm_kvzalloc(sizeof(struct kvm_memslots));
+       slots = kvzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
        if (!slots)
                goto out_free;
        memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots));
@@ -1956,18 +1963,18 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
        return 0;
 }
 
-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
                              gpa_t gpa, unsigned long len)
 {
-       struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+       struct kvm_memslots *slots = kvm_memslots(kvm);
        return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva_cache_init);
+EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
 
-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
-                                      void *data, int offset, unsigned long len)
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                          void *data, int offset, unsigned long len)
 {
-       struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+       struct kvm_memslots *slots = kvm_memslots(kvm);
        int r;
        gpa_t gpa = ghc->gpa + offset;
 
@@ -1977,7 +1984,7 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_
                __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
        if (unlikely(!ghc->memslot))
-               return kvm_vcpu_write_guest(vcpu, gpa, data, len);
+               return kvm_write_guest(kvm, gpa, data, len);
 
        if (kvm_is_error_hva(ghc->hva))
                return -EFAULT;
@@ -1989,19 +1996,19 @@ int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_offset_cached);
+EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
 
-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
-                              void *data, unsigned long len)
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                          void *data, unsigned long len)
 {
-       return kvm_vcpu_write_guest_offset_cached(vcpu, ghc, data, 0, len);
+       return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
 
-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
-                              void *data, unsigned long len)
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+                          void *data, unsigned long len)
 {
-       struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
+       struct kvm_memslots *slots = kvm_memslots(kvm);
        int r;
 
        BUG_ON(len > ghc->len);
@@ -2010,7 +2017,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g
                __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
 
        if (unlikely(!ghc->memslot))
-               return kvm_vcpu_read_guest(vcpu, ghc->gpa, data, len);
+               return kvm_read_guest(kvm, ghc->gpa, data, len);
 
        if (kvm_is_error_hva(ghc->hva))
                return -EFAULT;
@@ -2021,7 +2028,7 @@ int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *g
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_cached);
+EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
 
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
@@ -2195,8 +2202,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_block);
 
-#ifndef CONFIG_S390
-void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
 {
        struct swait_queue_head *wqp;
 
@@ -2204,11 +2210,14 @@ void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
        if (swait_active(wqp)) {
                swake_up(wqp);
                ++vcpu->stat.halt_wakeup;
+               return true;
        }
 
+       return false;
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
 
+#ifndef CONFIG_S390
 /*
  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
  */
@@ -2217,7 +2226,9 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
        int me;
        int cpu = vcpu->cpu;
 
-       kvm_vcpu_wake_up(vcpu);
+       if (kvm_vcpu_wake_up(vcpu))
+               return;
+
        me = get_cpu();
        if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
                if (kvm_arch_vcpu_should_kick(vcpu))
@@ -3050,6 +3061,8 @@ static long kvm_vm_ioctl(struct file *filp,
                if (copy_from_user(&routing, argp, sizeof(routing)))
                        goto out;
                r = -EINVAL;
+               if (!kvm_arch_can_set_irq_routing(kvm))
+                       goto out;
                if (routing.nr > KVM_MAX_IRQ_ROUTES)
                        goto out;
                if (routing.flags)
@@ -3065,11 +3078,8 @@ static long kvm_vm_ioctl(struct file *filp,
                                           routing.nr * sizeof(*entries)))
                                goto out_free_irq_routing;
                }
-               /* avoid races with KVM_CREATE_IRQCHIP on x86 */
-               mutex_lock(&kvm->lock);
                r = kvm_set_irq_routing(kvm, entries, routing.nr,
                                        routing.flags);
-               mutex_unlock(&kvm->lock);
 out_free_irq_routing:
                vfree(entries);
                break;
@@ -3701,7 +3711,7 @@ static const struct file_operations vm_stat_get_per_vm_fops = {
        .release = kvm_debugfs_release,
        .read    = simple_attr_read,
        .write   = simple_attr_write,
-       .llseek  = generic_file_llseek,
+       .llseek  = no_llseek,
 };
 
 static int vcpu_stat_get_per_vm(void *data, u64 *val)
@@ -3746,7 +3756,7 @@ static const struct file_operations vcpu_stat_get_per_vm_fops = {
        .release = kvm_debugfs_release,
        .read    = simple_attr_read,
        .write   = simple_attr_write,
-       .llseek  = generic_file_llseek,
+       .llseek  = no_llseek,
 };
 
 static const struct file_operations *stat_fops_per_vm[] = {