KVM: MMU: Make gfn_to_page() always safe
[linux-block.git] / drivers / kvm / kvm_main.c
index 8da13a462e3ce1867b0e84ddf952c8df755e7536..47000be25479f4030245ceea5b08d8a51765db0f 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include "kvm.h"
+#include "x86.h"
 #include "x86_emulate.h"
 #include "segment_descriptor.h"
 #include "irq.h"
@@ -38,6 +39,9 @@
 #include <linux/cpumask.h>
 #include <linux/smp.h>
 #include <linux/anon_inodes.h>
+#include <linux/profile.h>
+#include <linux/kvm_para.h>
+#include <linux/pagemap.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
@@ -53,7 +57,7 @@ static LIST_HEAD(vm_list);
 
 static cpumask_t cpus_hardware_enabled;
 
-struct kvm_arch_ops *kvm_arch_ops;
+struct kvm_x86_ops *kvm_x86_ops;
 struct kmem_cache *kvm_vcpu_cache;
 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
 
@@ -86,8 +90,6 @@ static struct kvm_stats_debugfs_item {
 
 static struct dentry *debugfs_dir;
 
-#define MAX_IO_MSRS 256
-
 #define CR0_RESERVED_BITS                                              \
        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
                          | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
@@ -102,7 +104,7 @@ static struct dentry *debugfs_dir;
 #define EFER_RESERVED_BITS 0xfffffffffffff2fe
 
 #ifdef CONFIG_X86_64
-// LDT or TSS descriptor in the GDT. 16 bytes.
+/* LDT or TSS descriptor in the GDT. 16 bytes. */
 struct segment_descriptor_64 {
        struct segment_descriptor s;
        u32 base_higher;
@@ -119,27 +121,27 @@ unsigned long segment_base(u16 selector)
        struct descriptor_table gdt;
        struct segment_descriptor *d;
        unsigned long table_base;
-       typedef unsigned long ul;
        unsigned long v;
 
        if (selector == 0)
                return 0;
 
-       asm ("sgdt %0" : "=m"(gdt));
+       asm("sgdt %0" : "=m"(gdt));
        table_base = gdt.base;
 
        if (selector & 4) {           /* from ldt */
                u16 ldt_selector;
 
-               asm ("sldt %0" : "=g"(ldt_selector));
+               asm("sldt %0" : "=g"(ldt_selector));
                table_base = segment_base(ldt_selector);
        }
        d = (struct segment_descriptor *)(table_base + (selector & ~7));
-       v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
+       v = d->base_low | ((unsigned long)d->base_mid << 16) |
+               ((unsigned long)d->base_high << 24);
 #ifdef CONFIG_X86_64
-       if (d->system == 0
-           && (d->type == 2 || d->type == 9 || d->type == 11))
-               v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
+       if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+               v |= ((unsigned long) \
+                     ((struct segment_descriptor_64 *)d)->base_higher) << 32;
 #endif
        return v;
 }
@@ -175,21 +177,21 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 /*
  * Switches to specified vcpu, until a matching vcpu_put()
  */
-static void vcpu_load(struct kvm_vcpu *vcpu)
+void vcpu_load(struct kvm_vcpu *vcpu)
 {
        int cpu;
 
        mutex_lock(&vcpu->mutex);
        cpu = get_cpu();
        preempt_notifier_register(&vcpu->preempt_notifier);
-       kvm_arch_ops->vcpu_load(vcpu, cpu);
+       kvm_arch_vcpu_load(vcpu, cpu);
        put_cpu();
 }
 
-static void vcpu_put(struct kvm_vcpu *vcpu)
+void vcpu_put(struct kvm_vcpu *vcpu)
 {
        preempt_disable();
-       kvm_arch_ops->vcpu_put(vcpu);
+       kvm_arch_vcpu_put(vcpu);
        preempt_notifier_unregister(&vcpu->preempt_notifier);
        preempt_enable();
        mutex_unlock(&vcpu->mutex);
@@ -197,46 +199,26 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
 
 static void ack_flush(void *_completed)
 {
-       atomic_t *completed = _completed;
-
-       atomic_inc(completed);
 }
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
-       int i, cpu, needed;
+       int i, cpu;
        cpumask_t cpus;
        struct kvm_vcpu *vcpu;
-       atomic_t completed;
 
-       atomic_set(&completed, 0);
        cpus_clear(cpus);
-       needed = 0;
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                vcpu = kvm->vcpus[i];
                if (!vcpu)
                        continue;
-               if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
+               if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
                        continue;
                cpu = vcpu->cpu;
                if (cpu != -1 && cpu != raw_smp_processor_id())
-                       if (!cpu_isset(cpu, cpus)) {
-                               cpu_set(cpu, cpus);
-                               ++needed;
-                       }
-       }
-
-       /*
-        * We really want smp_call_function_mask() here.  But that's not
-        * available, so ipi all cpus in parallel and wait for them
-        * to complete.
-        */
-       for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
-               smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
-       while (atomic_read(&completed) != needed) {
-               cpu_relax();
-               barrier();
+                       cpu_set(cpu, cpus);
        }
+       smp_call_function_mask(cpus, ack_flush, NULL, 1);
 }
 
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
@@ -273,23 +255,29 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        if (r < 0)
                goto fail_free_pio_data;
 
+       if (irqchip_in_kernel(kvm)) {
+               r = kvm_create_lapic(vcpu);
+               if (r < 0)
+                       goto fail_mmu_destroy;
+       }
+
        return 0;
 
+fail_mmu_destroy:
+       kvm_mmu_destroy(vcpu);
 fail_free_pio_data:
        free_page((unsigned long)vcpu->pio_data);
 fail_free_run:
        free_page((unsigned long)vcpu->run);
 fail:
-       return -ENOMEM;
+       return r;
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
 
 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
+       kvm_free_lapic(vcpu);
        kvm_mmu_destroy(vcpu);
-       if (vcpu->apic)
-               hrtimer_cancel(&vcpu->apic->timer.dev);
-       kvm_free_apic(vcpu->apic);
        free_page((unsigned long)vcpu->pio_data);
        free_page((unsigned long)vcpu->run);
 }
@@ -312,21 +300,44 @@ static struct kvm *kvm_create_vm(void)
        return kvm;
 }
 
+static void kvm_free_userspace_physmem(struct kvm_memory_slot *free)
+{
+       int i;
+
+       for (i = 0; i < free->npages; ++i) {
+               if (free->phys_mem[i]) {
+                       if (!PageReserved(free->phys_mem[i]))
+                               SetPageDirty(free->phys_mem[i]);
+                       page_cache_release(free->phys_mem[i]);
+               }
+       }
+}
+
+static void kvm_free_kernel_physmem(struct kvm_memory_slot *free)
+{
+       int i;
+
+       for (i = 0; i < free->npages; ++i)
+               if (free->phys_mem[i])
+                       __free_page(free->phys_mem[i]);
+}
+
 /*
  * Free any memory in @free but not in @dont.
  */
 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
                                  struct kvm_memory_slot *dont)
 {
-       int i;
-
        if (!dont || free->phys_mem != dont->phys_mem)
                if (free->phys_mem) {
-                       for (i = 0; i < free->npages; ++i)
-                               if (free->phys_mem[i])
-                                       __free_page(free->phys_mem[i]);
+                       if (free->user_alloc)
+                               kvm_free_userspace_physmem(free);
+                       else
+                               kvm_free_kernel_physmem(free);
                        vfree(free->phys_mem);
                }
+       if (!dont || free->rmap != dont->rmap)
+               vfree(free->rmap);
 
        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
                vfree(free->dirty_bitmap);
@@ -374,7 +385,7 @@ static void kvm_free_vcpus(struct kvm *kvm)
                        kvm_unload_vcpu_mmu(kvm->vcpus[i]);
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (kvm->vcpus[i]) {
-                       kvm_arch_ops->vcpu_free(kvm->vcpus[i]);
+                       kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
                        kvm->vcpus[i] = NULL;
                }
        }
@@ -405,7 +416,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
 
 static void inject_gp(struct kvm_vcpu *vcpu)
 {
-       kvm_arch_ops->inject_gp(vcpu, 0);
+       kvm_x86_ops->inject_gp(vcpu, 0);
 }
 
 /*
@@ -416,22 +427,16 @@ static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
        gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
        unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
        int i;
-       u64 *pdpt;
        int ret;
-       struct page *page;
        u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
 
        mutex_lock(&vcpu->kvm->lock);
-       page = gfn_to_page(vcpu->kvm, pdpt_gfn);
-       if (!page) {
+       ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
+                                 offset * sizeof(u64), sizeof(pdpte));
+       if (ret < 0) {
                ret = 0;
                goto out;
        }
-
-       pdpt = kmap_atomic(page, KM_USER0);
-       memcpy(pdpte, pdpt+offset, sizeof(pdpte));
-       kunmap_atomic(pdpt, KM_USER0);
-
        for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
                if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
                        ret = 0;
@@ -480,7 +485,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                                inject_gp(vcpu);
                                return;
                        }
-                       kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+                       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
                                printk(KERN_DEBUG "set_cr0: #GP, start paging "
                                       "in long mode while CS.L == 1\n");
@@ -499,7 +504,7 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        }
 
-       kvm_arch_ops->set_cr0(vcpu, cr0);
+       kvm_x86_ops->set_cr0(vcpu, cr0);
        vcpu->cr0 = cr0;
 
        mutex_lock(&vcpu->kvm->lock);
@@ -542,7 +547,7 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                inject_gp(vcpu);
                return;
        }
-       kvm_arch_ops->set_cr4(vcpu, cr4);
+       kvm_x86_ops->set_cr4(vcpu, cr4);
        vcpu->cr4 = cr4;
        mutex_lock(&vcpu->kvm->lock);
        kvm_mmu_reset_context(vcpu);
@@ -572,14 +577,11 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                                inject_gp(vcpu);
                                return;
                        }
-               } else {
-                       if (cr3 & CR3_NONPAE_RESERVED_BITS) {
-                               printk(KERN_DEBUG
-                                      "set_cr3: #GP, reserved bits\n");
-                               inject_gp(vcpu);
-                               return;
-                       }
                }
+               /*
+                * We don't check reserved bits in nonpae mode, because
+                * this isn't enforced, and VMware depends on this.
+                */
        }
 
        mutex_lock(&vcpu->kvm->lock);
@@ -671,7 +673,9 @@ EXPORT_SYMBOL_GPL(fx_init);
  * Discontiguous memory is allowed, mostly for framebuffers.
  */
 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
-                                         struct kvm_memory_region *mem)
+                                         struct
+                                         kvm_userspace_memory_region *mem,
+                                         int user_alloc)
 {
        int r;
        gfn_t base_gfn;
@@ -740,13 +744,34 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
                if (!new.phys_mem)
                        goto out_unlock;
 
+               new.rmap = vmalloc(npages * sizeof(struct page *));
+
+               if (!new.rmap)
+                       goto out_unlock;
+
                memset(new.phys_mem, 0, npages * sizeof(struct page *));
-               for (i = 0; i < npages; ++i) {
-                       new.phys_mem[i] = alloc_page(GFP_HIGHUSER
-                                                    | __GFP_ZERO);
-                       if (!new.phys_mem[i])
+               memset(new.rmap, 0, npages * sizeof(*new.rmap));
+               if (user_alloc) {
+                       unsigned long pages_num;
+
+                       new.user_alloc = 1;
+                       down_read(&current->mm->mmap_sem);
+
+                       pages_num = get_user_pages(current, current->mm,
+                                                  mem->userspace_addr,
+                                                  npages, 1, 1, new.phys_mem,
+                                                  NULL);
+
+                       up_read(&current->mm->mmap_sem);
+                       if (pages_num != npages)
                                goto out_unlock;
-                       set_page_private(new.phys_mem[i],0);
+               } else {
+                       for (i = 0; i < npages; ++i) {
+                               new.phys_mem[i] = alloc_page(GFP_HIGHUSER
+                                                            | __GFP_ZERO);
+                               if (!new.phys_mem[i])
+                                       goto out_unlock;
+                       }
                }
        }
 
@@ -763,6 +788,24 @@ static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
        if (mem->slot >= kvm->nmemslots)
                kvm->nmemslots = mem->slot + 1;
 
+       if (!kvm->n_requested_mmu_pages) {
+               unsigned int n_pages;
+
+               if (npages) {
+                       n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
+                       kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
+                                                n_pages);
+               } else {
+                       unsigned int nr_mmu_pages;
+
+                       n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
+                       nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
+                       nr_mmu_pages = max(nr_mmu_pages,
+                                       (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
+                       kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+               }
+       }
+
        *memslot = new;
 
        kvm_mmu_slot_remove_write_access(kvm, mem->slot);
@@ -780,6 +823,26 @@ out:
        return r;
 }
 
+static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
+                                         u32 kvm_nr_mmu_pages)
+{
+       if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
+               return -EINVAL;
+
+       mutex_lock(&kvm->lock);
+
+       kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
+       kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
+
+       mutex_unlock(&kvm->lock);
+       return 0;
+}
+
+static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
+{
+       return kvm->n_alloc_mmu_pages;
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
@@ -880,17 +943,17 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        r = 0;
        switch (chip->chip_id) {
        case KVM_IRQCHIP_PIC_MASTER:
-               memcpy (&chip->chip.pic,
+               memcpy(&chip->chip.pic,
                        &pic_irqchip(kvm)->pics[0],
                        sizeof(struct kvm_pic_state));
                break;
        case KVM_IRQCHIP_PIC_SLAVE:
-               memcpy (&chip->chip.pic,
+               memcpy(&chip->chip.pic,
                        &pic_irqchip(kvm)->pics[1],
                        sizeof(struct kvm_pic_state));
                break;
        case KVM_IRQCHIP_IOAPIC:
-               memcpy (&chip->chip.ioapic,
+               memcpy(&chip->chip.ioapic,
                        ioapic_irqchip(kvm),
                        sizeof(struct kvm_ioapic_state));
                break;
@@ -908,17 +971,17 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        r = 0;
        switch (chip->chip_id) {
        case KVM_IRQCHIP_PIC_MASTER:
-               memcpy (&pic_irqchip(kvm)->pics[0],
+               memcpy(&pic_irqchip(kvm)->pics[0],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
                break;
        case KVM_IRQCHIP_PIC_SLAVE:
-               memcpy (&pic_irqchip(kvm)->pics[1],
+               memcpy(&pic_irqchip(kvm)->pics[1],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
                break;
        case KVM_IRQCHIP_IOAPIC:
-               memcpy (ioapic_irqchip(kvm),
+               memcpy(ioapic_irqchip(kvm),
                        &chip->chip.ioapic,
                        sizeof(struct kvm_ioapic_state));
                break;
@@ -930,7 +993,13 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        return r;
 }
 
-static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+int is_error_page(struct page *page)
+{
+       return page == bad_page;
+}
+EXPORT_SYMBOL_GPL(is_error_page);
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
 {
        int i;
        struct kvm_mem_alias *alias;
@@ -971,11 +1040,132 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
        gfn = unalias_gfn(kvm, gfn);
        slot = __gfn_to_memslot(kvm, gfn);
        if (!slot)
-               return NULL;
+               return bad_page;
        return slot->phys_mem[gfn - slot->base_gfn];
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
+static int next_segment(unsigned long len, int offset)
+{
+       if (len > PAGE_SIZE - offset)
+               return PAGE_SIZE - offset;
+       else
+               return len;
+}
+
+int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
+                       int len)
+{
+       void *page_virt;
+       struct page *page;
+
+       page = gfn_to_page(kvm, gfn);
+       if (is_error_page(page))
+               return -EFAULT;
+       page_virt = kmap_atomic(page, KM_USER0);
+
+       memcpy(data, page_virt + offset, len);
+
+       kunmap_atomic(page_virt, KM_USER0);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_read_guest_page);
+
+int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
+{
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       int seg;
+       int offset = offset_in_page(gpa);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               data += seg;
+               ++gfn;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_read_guest);
+
+int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
+                        int offset, int len)
+{
+       void *page_virt;
+       struct page *page;
+
+       page = gfn_to_page(kvm, gfn);
+       if (is_error_page(page))
+               return -EFAULT;
+       page_virt = kmap_atomic(page, KM_USER0);
+
+       memcpy(page_virt + offset, data, len);
+
+       kunmap_atomic(page_virt, KM_USER0);
+       mark_page_dirty(kvm, gfn);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_guest_page);
+
+int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
+                   unsigned long len)
+{
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       int seg;
+       int offset = offset_in_page(gpa);
+       int ret;
+
+       while ((seg = next_segment(len, offset)) != 0) {
+               ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               data += seg;
+               ++gfn;
+       }
+       return 0;
+}
+
+int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
+{
+       void *page_virt;
+       struct page *page;
+
+       page = gfn_to_page(kvm, gfn);
+       if (is_error_page(page))
+               return -EFAULT;
+       page_virt = kmap_atomic(page, KM_USER0);
+
+       memset(page_virt + offset, 0, len);
+
+       kunmap_atomic(page_virt, KM_USER0);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
+
+int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
+{
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+       int seg;
+       int offset = offset_in_page(gpa);
+       int ret;
+
+        while ((seg = next_segment(len, offset)) != 0) {
+               ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
+               if (ret < 0)
+                       return ret;
+               offset = 0;
+               len -= seg;
+               ++gfn;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_clear_guest);
+
 /* WARNING: Does not work on aliased pages. */
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
 {
@@ -1002,21 +1192,13 @@ int emulator_read_std(unsigned long addr,
                gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
-               unsigned long pfn;
-               struct page *page;
-               void *page_virt;
+               int ret;
 
                if (gpa == UNMAPPED_GVA)
                        return X86EMUL_PROPAGATE_FAULT;
-               pfn = gpa >> PAGE_SHIFT;
-               page = gfn_to_page(vcpu->kvm, pfn);
-               if (!page)
+               ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
+               if (ret < 0)
                        return X86EMUL_UNHANDLEABLE;
-               page_virt = kmap_atomic(page, KM_USER0);
-
-               memcpy(data, page_virt + offset, tocopy);
-
-               kunmap_atomic(page_virt, KM_USER0);
 
                bytes -= tocopy;
                data += tocopy;
@@ -1109,19 +1291,12 @@ static int emulator_read_emulated(unsigned long addr,
 static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
                               const void *val, int bytes)
 {
-       struct page *page;
-       void *virt;
+       int ret;
 
-       if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
-               return 0;
-       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (!page)
+       ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
+       if (ret < 0)
                return 0;
-       mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
-       virt = kmap_atomic(page, KM_USER0);
        kvm_mmu_pte_write(vcpu, gpa, val, bytes);
-       memcpy(virt + offset_in_page(gpa), val, bytes);
-       kunmap_atomic(virt, KM_USER0);
        return 1;
 }
 
@@ -1134,7 +1309,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
        gpa_t                 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
 
        if (gpa == UNMAPPED_GVA) {
-               kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
+               kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
                return X86EMUL_PROPAGATE_FAULT;
        }
 
@@ -1197,7 +1372,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
 
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
-       return kvm_arch_ops->get_segment_base(vcpu, seg);
+       return kvm_x86_ops->get_segment_base(vcpu, seg);
 }
 
 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
@@ -1207,18 +1382,17 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
-       vcpu->cr0 &= ~X86_CR0_TS;
-       kvm_arch_ops->set_cr0(vcpu, vcpu->cr0);
+       kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
        return X86EMUL_CONTINUE;
 }
 
-int emulator_get_dr(struct x86_emulate_ctxtctxt, int dr, unsigned long *dest)
+int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 {
        struct kvm_vcpu *vcpu = ctxt->vcpu;
 
        switch (dr) {
        case 0 ... 3:
-               *dest = kvm_arch_ops->get_dr(vcpu, dr);
+               *dest = kvm_x86_ops->get_dr(vcpu, dr);
                return X86EMUL_CONTINUE;
        default:
                pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
@@ -1231,7 +1405,7 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
        int exception;
 
-       kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
+       kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
        if (exception) {
                /* FIXME: better handling */
                return X86EMUL_UNHANDLEABLE;
@@ -1239,25 +1413,25 @@ int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
        return X86EMUL_CONTINUE;
 }
 
-static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
+void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 {
        static int reported;
        u8 opcodes[4];
-       unsigned long rip = ctxt->vcpu->rip;
+       unsigned long rip = vcpu->rip;
        unsigned long rip_linear;
 
-       rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
+       rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
 
        if (reported)
                return;
 
-       emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt->vcpu);
+       emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
 
-       printk(KERN_ERR "emulation failed but !mmio_needed?"
-              " rip %lx %02x %02x %02x %02x\n",
-              rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
+       printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
+              context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
        reported = 1;
 }
+EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 
 struct x86_emulate_ops emulate_ops = {
        .read_std            = emulator_read_std,
@@ -1270,43 +1444,61 @@ struct x86_emulate_ops emulate_ops = {
 int emulate_instruction(struct kvm_vcpu *vcpu,
                        struct kvm_run *run,
                        unsigned long cr2,
-                       u16 error_code)
+                       u16 error_code,
+                       int no_decode)
 {
-       struct x86_emulate_ctxt emulate_ctxt;
        int r;
-       int cs_db, cs_l;
 
        vcpu->mmio_fault_cr2 = cr2;
-       kvm_arch_ops->cache_regs(vcpu);
-
-       kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
-
-       emulate_ctxt.vcpu = vcpu;
-       emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
-       emulate_ctxt.cr2 = cr2;
-       emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
-               ? X86EMUL_MODE_REAL : cs_l
-               ? X86EMUL_MODE_PROT64 : cs_db
-               ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-
-       if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
-               emulate_ctxt.cs_base = 0;
-               emulate_ctxt.ds_base = 0;
-               emulate_ctxt.es_base = 0;
-               emulate_ctxt.ss_base = 0;
-       } else {
-               emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
-               emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
-               emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
-               emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
-       }
-
-       emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
-       emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
+       kvm_x86_ops->cache_regs(vcpu);
 
        vcpu->mmio_is_write = 0;
        vcpu->pio.string = 0;
-       r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
+
+       if (!no_decode) {
+               int cs_db, cs_l;
+               kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+
+               vcpu->emulate_ctxt.vcpu = vcpu;
+               vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
+               vcpu->emulate_ctxt.cr2 = cr2;
+               vcpu->emulate_ctxt.mode =
+                       (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
+                       ? X86EMUL_MODE_REAL : cs_l
+                       ? X86EMUL_MODE_PROT64 : cs_db
+                       ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+
+               if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
+                       vcpu->emulate_ctxt.cs_base = 0;
+                       vcpu->emulate_ctxt.ds_base = 0;
+                       vcpu->emulate_ctxt.es_base = 0;
+                       vcpu->emulate_ctxt.ss_base = 0;
+               } else {
+                       vcpu->emulate_ctxt.cs_base =
+                                       get_segment_base(vcpu, VCPU_SREG_CS);
+                       vcpu->emulate_ctxt.ds_base =
+                                       get_segment_base(vcpu, VCPU_SREG_DS);
+                       vcpu->emulate_ctxt.es_base =
+                                       get_segment_base(vcpu, VCPU_SREG_ES);
+                       vcpu->emulate_ctxt.ss_base =
+                                       get_segment_base(vcpu, VCPU_SREG_SS);
+               }
+
+               vcpu->emulate_ctxt.gs_base =
+                                       get_segment_base(vcpu, VCPU_SREG_GS);
+               vcpu->emulate_ctxt.fs_base =
+                                       get_segment_base(vcpu, VCPU_SREG_FS);
+
+               r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
+               if (r)  {
+                       if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
+                               return EMULATE_DONE;
+                       return EMULATE_FAIL;
+               }
+       }
+
+       r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
+
        if (vcpu->pio.string)
                return EMULATE_DO_MMIO;
 
@@ -1322,14 +1514,14 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
                        return EMULATE_DONE;
                if (!vcpu->mmio_needed) {
-                       report_emulation_failure(&emulate_ctxt);
+                       kvm_report_emulation_failure(vcpu, "mmio");
                        return EMULATE_FAIL;
                }
                return EMULATE_DO_MMIO;
        }
 
-       kvm_arch_ops->decache_regs(vcpu);
-       kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
+       kvm_x86_ops->decache_regs(vcpu);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
 
        if (vcpu->mmio_is_write) {
                vcpu->mmio_needed = 0;
@@ -1382,51 +1574,61 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
-int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
-       unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
+       unsigned long nr, a0, a1, a2, a3, ret;
 
-       kvm_arch_ops->cache_regs(vcpu);
-       ret = -KVM_EINVAL;
-#ifdef CONFIG_X86_64
-       if (is_long_mode(vcpu)) {
-               nr = vcpu->regs[VCPU_REGS_RAX];
-               a0 = vcpu->regs[VCPU_REGS_RDI];
-               a1 = vcpu->regs[VCPU_REGS_RSI];
-               a2 = vcpu->regs[VCPU_REGS_RDX];
-               a3 = vcpu->regs[VCPU_REGS_RCX];
-               a4 = vcpu->regs[VCPU_REGS_R8];
-               a5 = vcpu->regs[VCPU_REGS_R9];
-       } else
-#endif
-       {
-               nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
-               a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
-               a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
-               a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
-               a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
-               a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
-               a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
+       kvm_x86_ops->cache_regs(vcpu);
+
+       nr = vcpu->regs[VCPU_REGS_RAX];
+       a0 = vcpu->regs[VCPU_REGS_RBX];
+       a1 = vcpu->regs[VCPU_REGS_RCX];
+       a2 = vcpu->regs[VCPU_REGS_RDX];
+       a3 = vcpu->regs[VCPU_REGS_RSI];
+
+       if (!is_long_mode(vcpu)) {
+               nr &= 0xFFFFFFFF;
+               a0 &= 0xFFFFFFFF;
+               a1 &= 0xFFFFFFFF;
+               a2 &= 0xFFFFFFFF;
+               a3 &= 0xFFFFFFFF;
        }
+
        switch (nr) {
        default:
-               run->hypercall.nr = nr;
-               run->hypercall.args[0] = a0;
-               run->hypercall.args[1] = a1;
-               run->hypercall.args[2] = a2;
-               run->hypercall.args[3] = a3;
-               run->hypercall.args[4] = a4;
-               run->hypercall.args[5] = a5;
-               run->hypercall.ret = ret;
-               run->hypercall.longmode = is_long_mode(vcpu);
-               kvm_arch_ops->decache_regs(vcpu);
-               return 0;
+               ret = -KVM_ENOSYS;
+               break;
        }
        vcpu->regs[VCPU_REGS_RAX] = ret;
-       kvm_arch_ops->decache_regs(vcpu);
-       return 1;
+       kvm_x86_ops->decache_regs(vcpu);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
+
+int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
+{
+       char instruction[3];
+       int ret = 0;
+
+       mutex_lock(&vcpu->kvm->lock);
+
+       /*
+        * Blow out the MMU to ensure that no other VCPU has an active mapping
+        * to ensure that the updated hypercall appears atomically across all
+        * VCPUs.
+        */
+       kvm_mmu_zap_all(vcpu->kvm);
+
+       kvm_x86_ops->cache_regs(vcpu);
+       kvm_x86_ops->patch_hypercall(vcpu, instruction);
+       if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
+           != X86EMUL_CONTINUE)
+               ret = -EFAULT;
+
+       mutex_unlock(&vcpu->kvm->lock);
+
+       return ret;
 }
-EXPORT_SYMBOL_GPL(kvm_hypercall);
 
 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
 {
@@ -1437,26 +1639,26 @@ void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
        struct descriptor_table dt = { limit, base };
 
-       kvm_arch_ops->set_gdt(vcpu, &dt);
+       kvm_x86_ops->set_gdt(vcpu, &dt);
 }
 
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
        struct descriptor_table dt = { limit, base };
 
-       kvm_arch_ops->set_idt(vcpu, &dt);
+       kvm_x86_ops->set_idt(vcpu, &dt);
 }
 
 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
                   unsigned long *rflags)
 {
        lmsw(vcpu, msw);
-       *rflags = kvm_arch_ops->get_rflags(vcpu);
+       *rflags = kvm_x86_ops->get_rflags(vcpu);
 }
 
 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
 {
-       kvm_arch_ops->decache_cr4_guest_bits(vcpu);
+       kvm_x86_ops->decache_cr4_guest_bits(vcpu);
        switch (cr) {
        case 0:
                return vcpu->cr0;
@@ -1478,7 +1680,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
        switch (cr) {
        case 0:
                set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
-               *rflags = kvm_arch_ops->get_rflags(vcpu);
+               *rflags = kvm_x86_ops->get_rflags(vcpu);
                break;
        case 2:
                vcpu->cr2 = val;
@@ -1494,75 +1696,6 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
        }
 }
 
-/*
- * Register the para guest with the host:
- */
-static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
-{
-       struct kvm_vcpu_para_state *para_state;
-       hpa_t para_state_hpa, hypercall_hpa;
-       struct page *para_state_page;
-       unsigned char *hypercall;
-       gpa_t hypercall_gpa;
-
-       printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
-       printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
-
-       /*
-        * Needs to be page aligned:
-        */
-       if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
-               goto err_gp;
-
-       para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
-       printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
-       if (is_error_hpa(para_state_hpa))
-               goto err_gp;
-
-       mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
-       para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
-       para_state = kmap(para_state_page);
-
-       printk(KERN_DEBUG "....  guest version: %d\n", para_state->guest_version);
-       printk(KERN_DEBUG "....           size: %d\n", para_state->size);
-
-       para_state->host_version = KVM_PARA_API_VERSION;
-       /*
-        * We cannot support guests that try to register themselves
-        * with a newer API version than the host supports:
-        */
-       if (para_state->guest_version > KVM_PARA_API_VERSION) {
-               para_state->ret = -KVM_EINVAL;
-               goto err_kunmap_skip;
-       }
-
-       hypercall_gpa = para_state->hypercall_gpa;
-       hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
-       printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
-       if (is_error_hpa(hypercall_hpa)) {
-               para_state->ret = -KVM_EINVAL;
-               goto err_kunmap_skip;
-       }
-
-       printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
-       vcpu->para_state_page = para_state_page;
-       vcpu->para_state_gpa = para_state_gpa;
-       vcpu->hypercall_gpa = hypercall_gpa;
-
-       mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
-       hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
-                               KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
-       kvm_arch_ops->patch_hypercall(vcpu, hypercall);
-       kunmap_atomic(hypercall, KM_USER1);
-
-       para_state->ret = 0;
-err_kunmap_skip:
-       kunmap(para_state_page);
-       return 0;
-err_gp:
-       return 1;
-}
-
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
@@ -1619,7 +1752,7 @@ EXPORT_SYMBOL_GPL(kvm_get_msr_common);
  */
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 {
-       return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
+       return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
 }
 
 #ifdef CONFIG_X86_64
@@ -1640,7 +1773,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
                return;
        }
 
-       kvm_arch_ops->set_efer(vcpu, efer);
+       kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
        efer |= vcpu->shadow_efer & EFER_LMA;
@@ -1676,12 +1809,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        case MSR_IA32_MISC_ENABLE:
                vcpu->ia32_misc_enable_msr = data;
                break;
-       /*
-        * This is the 'probe whether the host is KVM' logic:
-        */
-       case MSR_KVM_API_MAGIC:
-               return vcpu_register_para(vcpu, data);
-
        default:
                pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
                return 1;
@@ -1697,7 +1824,7 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
  */
 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 {
-       return kvm_arch_ops->set_msr(vcpu, msr_index, data);
+       return kvm_x86_ops->set_msr(vcpu, msr_index, data);
 }
 
 void kvm_resched(struct kvm_vcpu *vcpu)
@@ -1714,7 +1841,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
        u32 function;
        struct kvm_cpuid_entry *e, *best;
 
-       kvm_arch_ops->cache_regs(vcpu);
+       kvm_x86_ops->cache_regs(vcpu);
        function = vcpu->regs[VCPU_REGS_RAX];
        vcpu->regs[VCPU_REGS_RAX] = 0;
        vcpu->regs[VCPU_REGS_RBX] = 0;
@@ -1740,8 +1867,8 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
                vcpu->regs[VCPU_REGS_RCX] = best->ecx;
                vcpu->regs[VCPU_REGS_RDX] = best->edx;
        }
-       kvm_arch_ops->decache_regs(vcpu);
-       kvm_arch_ops->skip_emulated_instruction(vcpu);
+       kvm_x86_ops->decache_regs(vcpu);
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
 
@@ -1776,7 +1903,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
        long delta;
        int r;
 
-       kvm_arch_ops->cache_regs(vcpu);
+       kvm_x86_ops->cache_regs(vcpu);
 
        if (!io->string) {
                if (io->in)
@@ -1786,7 +1913,7 @@ static int complete_pio(struct kvm_vcpu *vcpu)
                if (io->in) {
                        r = pio_copy_data(vcpu);
                        if (r) {
-                               kvm_arch_ops->cache_regs(vcpu);
+                               kvm_x86_ops->cache_regs(vcpu);
                                return r;
                        }
                }
@@ -1809,13 +1936,11 @@ static int complete_pio(struct kvm_vcpu *vcpu)
                        vcpu->regs[VCPU_REGS_RSI] += delta;
        }
 
-       kvm_arch_ops->decache_regs(vcpu);
+       kvm_x86_ops->decache_regs(vcpu);
 
        io->count -= io->cur_count;
        io->cur_count = 0;
 
-       if (!io->count)
-               kvm_arch_ops->skip_emulated_instruction(vcpu);
        return 0;
 }
 
@@ -1854,7 +1979,7 @@ static void pio_string_write(struct kvm_io_device *pio_dev,
        mutex_unlock(&vcpu->kvm->lock);
 }
 
-int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
+int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
                  int size, unsigned port)
 {
        struct kvm_io_device *pio_dev;
@@ -1871,9 +1996,11 @@ int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->pio.guest_page_offset = 0;
        vcpu->pio.rep = 0;
 
-       kvm_arch_ops->cache_regs(vcpu);
+       kvm_x86_ops->cache_regs(vcpu);
        memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
-       kvm_arch_ops->decache_regs(vcpu);
+       kvm_x86_ops->decache_regs(vcpu);
+
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        pio_dev = vcpu_find_pio_dev(vcpu, port);
        if (pio_dev) {
@@ -1908,7 +2035,7 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->pio.rep = rep;
 
        if (!count) {
-               kvm_arch_ops->skip_emulated_instruction(vcpu);
+               kvm_x86_ops->skip_emulated_instruction(vcpu);
                return 1;
        }
 
@@ -1937,6 +2064,9 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->run->io.count = now;
        vcpu->pio.cur_count = now;
 
+       if (vcpu->pio.cur_count == vcpu->pio.count)
+               kvm_x86_ops->skip_emulated_instruction(vcpu);
+
        for (i = 0; i < nr_pages; ++i) {
                mutex_lock(&vcpu->kvm->lock);
                page = gva_to_page(vcpu, address + i * PAGE_SIZE);
@@ -1970,6 +2100,140 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
 
+/*
+ * Check if userspace requested an interrupt window, and that the
+ * interrupt window is open.
+ *
+ * No need to exit to userspace if we already have an interrupt queued.
+ */
+static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
+                                         struct kvm_run *kvm_run)
+{
+       return (!vcpu->irq_summary &&
+               kvm_run->request_interrupt_window &&
+               vcpu->interrupt_window_open &&
+               (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
+}
+
+static void post_kvm_run_save(struct kvm_vcpu *vcpu,
+                             struct kvm_run *kvm_run)
+{
+       kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
+       kvm_run->cr8 = get_cr8(vcpu);
+       kvm_run->apic_base = kvm_get_apic_base(vcpu);
+       if (irqchip_in_kernel(vcpu->kvm))
+               kvm_run->ready_for_interrupt_injection = 1;
+       else
+               kvm_run->ready_for_interrupt_injection =
+                                       (vcpu->interrupt_window_open &&
+                                        vcpu->irq_summary == 0);
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int r;
+
+       if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
+               pr_debug("vcpu %d received sipi with vector # %x\n",
+                      vcpu->vcpu_id, vcpu->sipi_vector);
+               kvm_lapic_reset(vcpu);
+               kvm_x86_ops->vcpu_reset(vcpu);
+               vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
+       }
+
+preempted:
+       if (vcpu->guest_debug.enabled)
+               kvm_x86_ops->guest_debug_pre(vcpu);
+
+again:
+       r = kvm_mmu_reload(vcpu);
+       if (unlikely(r))
+               goto out;
+
+       kvm_inject_pending_timer_irqs(vcpu);
+
+       preempt_disable();
+
+       kvm_x86_ops->prepare_guest_switch(vcpu);
+       kvm_load_guest_fpu(vcpu);
+
+       local_irq_disable();
+
+       if (signal_pending(current)) {
+               local_irq_enable();
+               preempt_enable();
+               r = -EINTR;
+               kvm_run->exit_reason = KVM_EXIT_INTR;
+               ++vcpu->stat.signal_exits;
+               goto out;
+       }
+
+       if (irqchip_in_kernel(vcpu->kvm))
+               kvm_x86_ops->inject_pending_irq(vcpu);
+       else if (!vcpu->mmio_read_completed)
+               kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
+
+       vcpu->guest_mode = 1;
+       kvm_guest_enter();
+
+       if (vcpu->requests)
+               if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+                       kvm_x86_ops->tlb_flush(vcpu);
+
+       kvm_x86_ops->run(vcpu, kvm_run);
+
+       vcpu->guest_mode = 0;
+       local_irq_enable();
+
+       ++vcpu->stat.exits;
+
+       /*
+        * We must have an instruction between local_irq_enable() and
+        * kvm_guest_exit(), so the timer interrupt isn't delayed by
+        * the interrupt shadow.  The stat.exits increment will do nicely.
+        * But we need to prevent reordering, hence this barrier():
+        */
+       barrier();
+
+       kvm_guest_exit();
+
+       preempt_enable();
+
+       /*
+        * Profile KVM exit RIPs:
+        */
+       if (unlikely(prof_on == KVM_PROFILING)) {
+               kvm_x86_ops->cache_regs(vcpu);
+               profile_hit(KVM_PROFILING, (void *)vcpu->rip);
+       }
+
+       r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
+
+       if (r > 0) {
+               if (dm_request_for_irq_injection(vcpu, kvm_run)) {
+                       r = -EINTR;
+                       kvm_run->exit_reason = KVM_EXIT_INTR;
+                       ++vcpu->stat.request_irq_exits;
+                       goto out;
+               }
+               if (!need_resched()) {
+                       ++vcpu->stat.light_exits;
+                       goto again;
+               }
+       }
+
+out:
+       if (r > 0) {
+               kvm_resched(vcpu);
+               goto preempted;
+       }
+
+       post_kvm_run_save(vcpu, kvm_run);
+
+       return r;
+}
+
+
 static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
@@ -2001,7 +2265,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->mmio_read_completed = 1;
                vcpu->mmio_needed = 0;
                r = emulate_instruction(vcpu, kvm_run,
-                                       vcpu->mmio_fault_cr2, 0);
+                                       vcpu->mmio_fault_cr2, 0, 1);
                if (r == EMULATE_DO_MMIO) {
                        /*
                         * Read-modify-write.  Back to userspace.
@@ -2012,12 +2276,12 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
-               kvm_arch_ops->cache_regs(vcpu);
+               kvm_x86_ops->cache_regs(vcpu);
                vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
-               kvm_arch_ops->decache_regs(vcpu);
+               kvm_x86_ops->decache_regs(vcpu);
        }
 
-       r = kvm_arch_ops->run(vcpu, kvm_run);
+       r = __vcpu_run(vcpu, kvm_run);
 
 out:
        if (vcpu->sigset_active)
@@ -2032,7 +2296,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
 {
        vcpu_load(vcpu);
 
-       kvm_arch_ops->cache_regs(vcpu);
+       kvm_x86_ops->cache_regs(vcpu);
 
        regs->rax = vcpu->regs[VCPU_REGS_RAX];
        regs->rbx = vcpu->regs[VCPU_REGS_RBX];
@@ -2054,7 +2318,7 @@ static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
 #endif
 
        regs->rip = vcpu->rip;
-       regs->rflags = kvm_arch_ops->get_rflags(vcpu);
+       regs->rflags = kvm_x86_ops->get_rflags(vcpu);
 
        /*
         * Don't leak debug flags in case they were set for guest debugging
@@ -2092,9 +2356,9 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
 #endif
 
        vcpu->rip = regs->rip;
-       kvm_arch_ops->set_rflags(vcpu, regs->rflags);
+       kvm_x86_ops->set_rflags(vcpu, regs->rflags);
 
-       kvm_arch_ops->decache_regs(vcpu);
+       kvm_x86_ops->decache_regs(vcpu);
 
        vcpu_put(vcpu);
 
@@ -2104,7 +2368,7 @@ static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
 static void get_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
-       return kvm_arch_ops->get_segment(vcpu, var, seg);
+       return kvm_x86_ops->get_segment(vcpu, var, seg);
 }
 
 static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
@@ -2125,14 +2389,14 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
        get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
-       kvm_arch_ops->get_idt(vcpu, &dt);
+       kvm_x86_ops->get_idt(vcpu, &dt);
        sregs->idt.limit = dt.limit;
        sregs->idt.base = dt.base;
-       kvm_arch_ops->get_gdt(vcpu, &dt);
+       kvm_x86_ops->get_gdt(vcpu, &dt);
        sregs->gdt.limit = dt.limit;
        sregs->gdt.base = dt.base;
 
-       kvm_arch_ops->decache_cr4_guest_bits(vcpu);
+       kvm_x86_ops->decache_cr4_guest_bits(vcpu);
        sregs->cr0 = vcpu->cr0;
        sregs->cr2 = vcpu->cr2;
        sregs->cr3 = vcpu->cr3;
@@ -2144,9 +2408,10 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        if (irqchip_in_kernel(vcpu->kvm)) {
                memset(sregs->interrupt_bitmap, 0,
                       sizeof sregs->interrupt_bitmap);
-               pending_vec = kvm_arch_ops->get_irq(vcpu);
+               pending_vec = kvm_x86_ops->get_irq(vcpu);
                if (pending_vec >= 0)
-                       set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap);
+                       set_bit(pending_vec,
+                               (unsigned long *)sregs->interrupt_bitmap);
        } else
                memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
                       sizeof sregs->interrupt_bitmap);
@@ -2159,7 +2424,7 @@ static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 static void set_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
-       return kvm_arch_ops->set_segment(vcpu, var, seg);
+       return kvm_x86_ops->set_segment(vcpu, var, seg);
 }
 
 static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -2173,10 +2438,10 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        dt.limit = sregs->idt.limit;
        dt.base = sregs->idt.base;
-       kvm_arch_ops->set_idt(vcpu, &dt);
+       kvm_x86_ops->set_idt(vcpu, &dt);
        dt.limit = sregs->gdt.limit;
        dt.base = sregs->gdt.base;
-       kvm_arch_ops->set_gdt(vcpu, &dt);
+       kvm_x86_ops->set_gdt(vcpu, &dt);
 
        vcpu->cr2 = sregs->cr2;
        mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
@@ -2186,18 +2451,18 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
 #ifdef CONFIG_X86_64
-       kvm_arch_ops->set_efer(vcpu, sregs->efer);
+       kvm_x86_ops->set_efer(vcpu, sregs->efer);
 #endif
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
-       kvm_arch_ops->decache_cr4_guest_bits(vcpu);
+       kvm_x86_ops->decache_cr4_guest_bits(vcpu);
 
        mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
        vcpu->cr0 = sregs->cr0;
-       kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
+       kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
 
        mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
-       kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
+       kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
        if (!is_long_mode(vcpu) && is_pae(vcpu))
                load_pdptrs(vcpu, vcpu->cr3);
 
@@ -2218,8 +2483,9 @@ static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                        max_bits);
                /* Only pending external irq is handled here */
                if (pending_vec < max_bits) {
-                       kvm_arch_ops->set_irq(vcpu, pending_vec);
-                       printk("Set back pending irq %d\n", pending_vec);
+                       kvm_x86_ops->set_irq(vcpu, pending_vec);
+                       pr_debug("Set back pending irq %d\n",
+                                pending_vec);
                }
        }
 
@@ -2248,123 +2514,6 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
 }
 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
 
-/*
- * List of msr numbers which we expose to userspace through KVM_GET_MSRS
- * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
- *
- * This list is modified at module load time to reflect the
- * capabilities of the host cpu.
- */
-static u32 msrs_to_save[] = {
-       MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
-       MSR_K6_STAR,
-#ifdef CONFIG_X86_64
-       MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
-#endif
-       MSR_IA32_TIME_STAMP_COUNTER,
-};
-
-static unsigned num_msrs_to_save;
-
-static u32 emulated_msrs[] = {
-       MSR_IA32_MISC_ENABLE,
-};
-
-static __init void kvm_init_msr_list(void)
-{
-       u32 dummy[2];
-       unsigned i, j;
-
-       for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
-               if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
-                       continue;
-               if (j < i)
-                       msrs_to_save[j] = msrs_to_save[i];
-               j++;
-       }
-       num_msrs_to_save = j;
-}
-
-/*
- * Adapt set_msr() to msr_io()'s calling convention
- */
-static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
-{
-       return kvm_set_msr(vcpu, index, *data);
-}
-
-/*
- * Read or write a bunch of msrs. All parameters are kernel addresses.
- *
- * @return number of msrs set successfully.
- */
-static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
-                   struct kvm_msr_entry *entries,
-                   int (*do_msr)(struct kvm_vcpu *vcpu,
-                                 unsigned index, u64 *data))
-{
-       int i;
-
-       vcpu_load(vcpu);
-
-       for (i = 0; i < msrs->nmsrs; ++i)
-               if (do_msr(vcpu, entries[i].index, &entries[i].data))
-                       break;
-
-       vcpu_put(vcpu);
-
-       return i;
-}
-
-/*
- * Read or write a bunch of msrs. Parameters are user addresses.
- *
- * @return number of msrs set successfully.
- */
-static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
-                 int (*do_msr)(struct kvm_vcpu *vcpu,
-                               unsigned index, u64 *data),
-                 int writeback)
-{
-       struct kvm_msrs msrs;
-       struct kvm_msr_entry *entries;
-       int r, n;
-       unsigned size;
-
-       r = -EFAULT;
-       if (copy_from_user(&msrs, user_msrs, sizeof msrs))
-               goto out;
-
-       r = -E2BIG;
-       if (msrs.nmsrs >= MAX_IO_MSRS)
-               goto out;
-
-       r = -ENOMEM;
-       size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
-       entries = vmalloc(size);
-       if (!entries)
-               goto out;
-
-       r = -EFAULT;
-       if (copy_from_user(entries, user_msrs->entries, size))
-               goto out_free;
-
-       r = n = __msr_io(vcpu, &msrs, entries, do_msr);
-       if (r < 0)
-               goto out_free;
-
-       r = -EFAULT;
-       if (writeback && copy_to_user(user_msrs->entries, entries, size))
-               goto out_free;
-
-       r = n;
-
-out_free:
-       vfree(entries);
-out:
-       return r;
-}
-
 /*
  * Translate a guest virtual address to a guest physical address.
  */
@@ -2411,7 +2560,7 @@ static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
 
        vcpu_load(vcpu);
 
-       r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
+       r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
 
        vcpu_put(vcpu);
 
@@ -2493,7 +2642,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
        if (!valid_vcpu(n))
                return -EINVAL;
 
-       vcpu = kvm_arch_ops->vcpu_create(kvm, n);
+       vcpu = kvm_x86_ops->vcpu_create(kvm, n);
        if (IS_ERR(vcpu))
                return PTR_ERR(vcpu);
 
@@ -2534,49 +2683,7 @@ mmu_unload:
        vcpu_put(vcpu);
 
 free_vcpu:
-       kvm_arch_ops->vcpu_free(vcpu);
-       return r;
-}
-
-static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
-{
-       u64 efer;
-       int i;
-       struct kvm_cpuid_entry *e, *entry;
-
-       rdmsrl(MSR_EFER, efer);
-       entry = NULL;
-       for (i = 0; i < vcpu->cpuid_nent; ++i) {
-               e = &vcpu->cpuid_entries[i];
-               if (e->function == 0x80000001) {
-                       entry = e;
-                       break;
-               }
-       }
-       if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
-               entry->edx &= ~(1 << 20);
-               printk(KERN_INFO "kvm: guest NX capability removed\n");
-       }
-}
-
-static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
-                                   struct kvm_cpuid *cpuid,
-                                   struct kvm_cpuid_entry __user *entries)
-{
-       int r;
-
-       r = -E2BIG;
-       if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
-               goto out;
-       r = -EFAULT;
-       if (copy_from_user(&vcpu->cpuid_entries, entries,
-                          cpuid->nent * sizeof(struct kvm_cpuid_entry)))
-               goto out;
-       vcpu->cpuid_nent = cpuid->nent;
-       cpuid_fix_nx_cap(vcpu);
-       return 0;
-
-out:
+       kvm_x86_ops->vcpu_free(vcpu);
        return r;
 }
 
@@ -2652,33 +2759,12 @@ static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        return 0;
 }
 
-static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
-                                   struct kvm_lapic_state *s)
-{
-       vcpu_load(vcpu);
-       memcpy(s->regs, vcpu->apic->regs, sizeof *s);
-       vcpu_put(vcpu);
-
-       return 0;
-}
-
-static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
-                                   struct kvm_lapic_state *s)
-{
-       vcpu_load(vcpu);
-       memcpy(vcpu->apic->regs, s->regs, sizeof *s);
-       kvm_apic_post_state_restore(vcpu);
-       vcpu_put(vcpu);
-
-       return 0;
-}
-
 static long kvm_vcpu_ioctl(struct file *filp,
                           unsigned int ioctl, unsigned long arg)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
        void __user *argp = (void __user *)arg;
-       int r = -EINVAL;
+       int r;
 
        switch (ioctl) {
        case KVM_RUN:
@@ -2776,24 +2862,6 @@ static long kvm_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_MSRS:
-               r = msr_io(vcpu, argp, kvm_get_msr, 1);
-               break;
-       case KVM_SET_MSRS:
-               r = msr_io(vcpu, argp, do_set_msr, 0);
-               break;
-       case KVM_SET_CPUID: {
-               struct kvm_cpuid __user *cpuid_arg = argp;
-               struct kvm_cpuid cpuid;
-
-               r = -EFAULT;
-               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
-                       goto out;
-               r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
-               if (r)
-                       goto out;
-               break;
-       }
        case KVM_SET_SIGNAL_MASK: {
                struct kvm_signal_mask __user *sigmask_arg = argp;
                struct kvm_signal_mask kvm_sigmask;
@@ -2842,33 +2910,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
-       case KVM_GET_LAPIC: {
-               struct kvm_lapic_state lapic;
-
-               memset(&lapic, 0, sizeof lapic);
-               r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
-               if (r)
-                       goto out;
-               r = -EFAULT;
-               if (copy_to_user(argp, &lapic, sizeof lapic))
-                       goto out;
-               r = 0;
-               break;
-       }
-       case KVM_SET_LAPIC: {
-               struct kvm_lapic_state lapic;
-
-               r = -EFAULT;
-               if (copy_from_user(&lapic, argp, sizeof lapic))
-                       goto out;
-               r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
-               if (r)
-                       goto out;
-               r = 0;
-               break;
-       }
        default:
-               ;
+               r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
        }
 out:
        return r;
@@ -2889,15 +2932,41 @@ static long kvm_vm_ioctl(struct file *filp,
                break;
        case KVM_SET_MEMORY_REGION: {
                struct kvm_memory_region kvm_mem;
+               struct kvm_userspace_memory_region kvm_userspace_mem;
 
                r = -EFAULT;
                if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
                        goto out;
-               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
+               kvm_userspace_mem.slot = kvm_mem.slot;
+               kvm_userspace_mem.flags = kvm_mem.flags;
+               kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
+               kvm_userspace_mem.memory_size = kvm_mem.memory_size;
+               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
+               if (r)
+                       goto out;
+               break;
+       }
+       case KVM_SET_USER_MEMORY_REGION: {
+               struct kvm_userspace_memory_region kvm_userspace_mem;
+
+               r = -EFAULT;
+               if (copy_from_user(&kvm_userspace_mem, argp,
+                                               sizeof kvm_userspace_mem))
+                       goto out;
+
+               r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
                if (r)
                        goto out;
                break;
        }
+       case KVM_SET_NR_MMU_PAGES:
+               r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
+               if (r)
+                       goto out;
+               break;
+       case KVM_GET_NR_MMU_PAGES:
+               r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
+               break;
        case KVM_GET_DIRTY_LOG: {
                struct kvm_dirty_log log;
 
@@ -2930,8 +2999,7 @@ static long kvm_vm_ioctl(struct file *filp,
                                kvm->vpic = NULL;
                                goto out;
                        }
-               }
-               else
+               } else
                        goto out;
                break;
        case KVM_IRQ_LINE: {
@@ -3006,7 +3074,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
 
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
        page = gfn_to_page(kvm, pgoff);
-       if (!page)
+       if (is_error_page(page))
                return NOPAGE_SIGBUS;
        get_page(page);
        if (type != NULL)
@@ -3072,39 +3140,14 @@ static long kvm_dev_ioctl(struct file *filp,
                        goto out;
                r = kvm_dev_ioctl_create_vm();
                break;
-       case KVM_GET_MSR_INDEX_LIST: {
-               struct kvm_msr_list __user *user_msr_list = argp;
-               struct kvm_msr_list msr_list;
-               unsigned n;
-
-               r = -EFAULT;
-               if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
-                       goto out;
-               n = msr_list.nmsrs;
-               msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
-               if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
-                       goto out;
-               r = -E2BIG;
-               if (n < num_msrs_to_save)
-                       goto out;
-               r = -EFAULT;
-               if (copy_to_user(user_msr_list->indices, &msrs_to_save,
-                                num_msrs_to_save * sizeof(u32)))
-                       goto out;
-               if (copy_to_user(user_msr_list->indices
-                                + num_msrs_to_save * sizeof(u32),
-                                &emulated_msrs,
-                                ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
-                       goto out;
-               r = 0;
-               break;
-       }
        case KVM_CHECK_EXTENSION: {
                int ext = (long)argp;
 
                switch (ext) {
                case KVM_CAP_IRQCHIP:
                case KVM_CAP_HLT:
+               case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
+               case KVM_CAP_USER_MEMORY:
                        r = 1;
                        break;
                default:
@@ -3120,7 +3163,7 @@ static long kvm_dev_ioctl(struct file *filp,
                r = 2 * PAGE_SIZE;
                break;
        default:
-               ;
+               return kvm_arch_dev_ioctl(filp, ioctl, arg);
        }
 out:
        return r;
@@ -3163,7 +3206,7 @@ static void decache_vcpus_on_cpu(int cpu)
                         */
                        if (mutex_trylock(&vcpu->mutex)) {
                                if (vcpu->cpu == cpu) {
-                                       kvm_arch_ops->vcpu_decache(vcpu);
+                                       kvm_x86_ops->vcpu_decache(vcpu);
                                        vcpu->cpu = -1;
                                }
                                mutex_unlock(&vcpu->mutex);
@@ -3179,7 +3222,7 @@ static void hardware_enable(void *junk)
        if (cpu_isset(cpu, cpus_hardware_enabled))
                return;
        cpu_set(cpu, cpus_hardware_enabled);
-       kvm_arch_ops->hardware_enable(NULL);
+       kvm_x86_ops->hardware_enable(NULL);
 }
 
 static void hardware_disable(void *junk)
@@ -3190,7 +3233,7 @@ static void hardware_disable(void *junk)
                return;
        cpu_clear(cpu, cpus_hardware_enabled);
        decache_vcpus_on_cpu(cpu);
-       kvm_arch_ops->hardware_disable(NULL);
+       kvm_x86_ops->hardware_disable(NULL);
 }
 
 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
@@ -3222,7 +3265,7 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
 }
 
 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
-                       void *v)
+                     void *v)
 {
        if (val == SYS_RESTART) {
                /*
@@ -3336,7 +3379,7 @@ static int kvm_resume(struct sys_device *dev)
 }
 
 static struct sysdev_class kvm_sysdev_class = {
-       set_kset_name("kvm"),
+       .name = "kvm",
        .suspend = kvm_suspend,
        .resume = kvm_resume,
 };
@@ -3346,7 +3389,7 @@ static struct sys_device kvm_sysdev = {
        .cls = &kvm_sysdev_class,
 };
 
-hpa_t bad_page_address;
+struct page *bad_page;
 
 static inline
 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
@@ -3358,7 +3401,7 @@ static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-       kvm_arch_ops->vcpu_load(vcpu, cpu);
+       kvm_x86_ops->vcpu_load(vcpu, cpu);
 }
 
 static void kvm_sched_out(struct preempt_notifier *pn,
@@ -3366,16 +3409,16 @@ static void kvm_sched_out(struct preempt_notifier *pn,
 {
        struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
 
-       kvm_arch_ops->vcpu_put(vcpu);
+       kvm_x86_ops->vcpu_put(vcpu);
 }
 
-int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
+int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
                  struct module *module)
 {
        int r;
        int cpu;
 
-       if (kvm_arch_ops) {
+       if (kvm_x86_ops) {
                printk(KERN_ERR "kvm: already loaded the other module\n");
                return -EEXIST;
        }
@@ -3389,15 +3432,15 @@ int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
                return -EOPNOTSUPP;
        }
 
-       kvm_arch_ops = ops;
+       kvm_x86_ops = ops;
 
-       r = kvm_arch_ops->hardware_setup();
+       r = kvm_x86_ops->hardware_setup();
        if (r < 0)
                goto out;
 
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu,
-                               kvm_arch_ops->check_processor_compatibility,
+                               kvm_x86_ops->check_processor_compatibility,
                                &r, 0, 1);
                if (r < 0)
                        goto out_free_0;
@@ -3429,14 +3472,16 @@ int kvm_init_arch(struct kvm_arch_ops *ops, unsigned int vcpu_size,
 
        r = misc_register(&kvm_dev);
        if (r) {
-               printk (KERN_ERR "kvm: misc device register failed\n");
+               printk(KERN_ERR "kvm: misc device register failed\n");
                goto out_free;
        }
 
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
 
-       return r;
+       kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
+
+       return 0;
 
 out_free:
        kmem_cache_destroy(kvm_vcpu_cache);
@@ -3450,13 +3495,14 @@ out_free_2:
 out_free_1:
        on_each_cpu(hardware_disable, NULL, 0, 1);
 out_free_0:
-       kvm_arch_ops->hardware_unsetup();
+       kvm_x86_ops->hardware_unsetup();
 out:
-       kvm_arch_ops = NULL;
+       kvm_x86_ops = NULL;
        return r;
 }
+EXPORT_SYMBOL_GPL(kvm_init_x86);
 
-void kvm_exit_arch(void)
+void kvm_exit_x86(void)
 {
        misc_deregister(&kvm_dev);
        kmem_cache_destroy(kvm_vcpu_cache);
@@ -3465,13 +3511,13 @@ void kvm_exit_arch(void)
        unregister_reboot_notifier(&kvm_reboot_notifier);
        unregister_cpu_notifier(&kvm_cpu_notifier);
        on_each_cpu(hardware_disable, NULL, 0, 1);
-       kvm_arch_ops->hardware_unsetup();
-       kvm_arch_ops = NULL;
+       kvm_x86_ops->hardware_unsetup();
+       kvm_x86_ops = NULL;
 }
+EXPORT_SYMBOL_GPL(kvm_exit_x86);
 
 static __init int kvm_init(void)
 {
-       static struct page *bad_page;
        int r;
 
        r = kvm_mmu_module_init();
@@ -3480,16 +3526,15 @@ static __init int kvm_init(void)
 
        kvm_init_debug();
 
-       kvm_init_msr_list();
+       kvm_arch_init();
 
-       if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
+       bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+
+       if (bad_page == NULL) {
                r = -ENOMEM;
                goto out;
        }
 
-       bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
-       memset(__va(bad_page_address), 0, PAGE_SIZE);
-
        return 0;
 
 out:
@@ -3502,12 +3547,9 @@ out4:
 static __exit void kvm_exit(void)
 {
        kvm_exit_debug();
-       __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
+       __free_page(bad_page);
        kvm_mmu_module_exit();
 }
 
 module_init(kvm_init)
 module_exit(kvm_exit)
-
-EXPORT_SYMBOL_GPL(kvm_init_arch);
-EXPORT_SYMBOL_GPL(kvm_exit_arch);