KVM: MMU: Concurrent guest walkers
[linux-2.6-block.git] / virt / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
e2174021 18#include "iodev.h"
6aa8b732 19
edf88417 20#include <linux/kvm_host.h>
6aa8b732
AK
21#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
6aa8b732
AK
24#include <linux/percpu.h>
25#include <linux/gfp.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
6aa8b732 29#include <linux/reboot.h>
6aa8b732
AK
30#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
59ae6c6b 33#include <linux/sysdev.h>
774c47f1 34#include <linux/cpu.h>
e8edc6e0 35#include <linux/sched.h>
d9e368d6
AK
36#include <linux/cpumask.h>
37#include <linux/smp.h>
d6d28168 38#include <linux/anon_inodes.h>
04d2cc77 39#include <linux/profile.h>
7aa81cc0 40#include <linux/kvm_para.h>
6fc138d2 41#include <linux/pagemap.h>
8d4e1288 42#include <linux/mman.h>
6aa8b732 43
e495606d 44#include <asm/processor.h>
e495606d
AK
45#include <asm/io.h>
46#include <asm/uaccess.h>
3e021bf5 47#include <asm/pgtable.h>
6aa8b732
AK
48
49MODULE_AUTHOR("Qumranet");
50MODULE_LICENSE("GPL");
51
e9b11c17
ZX
52DEFINE_SPINLOCK(kvm_lock);
53LIST_HEAD(vm_list);
133de902 54
1b6c0168
AK
55static cpumask_t cpus_hardware_enabled;
56
c16f862d
RR
57struct kmem_cache *kvm_vcpu_cache;
58EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 59
15ad7146
AK
60static __read_mostly struct preempt_ops kvm_preempt_ops;
61
6aa8b732
AK
62static struct dentry *debugfs_dir;
63
bccf2150
AK
64static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
65 unsigned long arg);
66
5aacf0ca
JM
67static inline int valid_vcpu(int n)
68{
69 return likely(n >= 0 && n < KVM_MAX_VCPUS);
70}
71
bccf2150
AK
72/*
73 * Switches to specified vcpu, until a matching vcpu_put()
74 */
313a3dc7 75void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 76{
15ad7146
AK
77 int cpu;
78
bccf2150 79 mutex_lock(&vcpu->mutex);
15ad7146
AK
80 cpu = get_cpu();
81 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 82 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 83 put_cpu();
6aa8b732
AK
84}
85
313a3dc7 86void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 87{
15ad7146 88 preempt_disable();
313a3dc7 89 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
90 preempt_notifier_unregister(&vcpu->preempt_notifier);
91 preempt_enable();
6aa8b732
AK
92 mutex_unlock(&vcpu->mutex);
93}
94
d9e368d6
AK
95static void ack_flush(void *_completed)
96{
d9e368d6
AK
97}
98
99void kvm_flush_remote_tlbs(struct kvm *kvm)
100{
49d3bd7e 101 int i, cpu;
d9e368d6
AK
102 cpumask_t cpus;
103 struct kvm_vcpu *vcpu;
d9e368d6 104
d9e368d6 105 cpus_clear(cpus);
fb3f0f51
RR
106 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
107 vcpu = kvm->vcpus[i];
108 if (!vcpu)
109 continue;
3176bc3e 110 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
111 continue;
112 cpu = vcpu->cpu;
113 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 114 cpu_set(cpu, cpus);
d9e368d6 115 }
0f74a24c
AK
116 if (cpus_empty(cpus))
117 return;
118 ++kvm->stat.remote_tlb_flush;
49d3bd7e 119 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
120}
121
fb3f0f51
RR
122int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
123{
124 struct page *page;
125 int r;
126
127 mutex_init(&vcpu->mutex);
128 vcpu->cpu = -1;
fb3f0f51
RR
129 vcpu->kvm = kvm;
130 vcpu->vcpu_id = id;
b6958ce4 131 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
132
133 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
134 if (!page) {
135 r = -ENOMEM;
136 goto fail;
137 }
138 vcpu->run = page_address(page);
139
e9b11c17 140 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 141 if (r < 0)
e9b11c17 142 goto fail_free_run;
fb3f0f51
RR
143 return 0;
144
fb3f0f51
RR
145fail_free_run:
146 free_page((unsigned long)vcpu->run);
147fail:
76fafa5e 148 return r;
fb3f0f51
RR
149}
150EXPORT_SYMBOL_GPL(kvm_vcpu_init);
151
152void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
153{
e9b11c17 154 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
155 free_page((unsigned long)vcpu->run);
156}
157EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
158
f17abe9a 159static struct kvm *kvm_create_vm(void)
6aa8b732 160{
d19a9cd2 161 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 162
d19a9cd2
ZX
163 if (IS_ERR(kvm))
164 goto out;
6aa8b732 165
6d4e4c4f
AK
166 kvm->mm = current->mm;
167 atomic_inc(&kvm->mm->mm_count);
74906345 168 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 169 mutex_init(&kvm->lock);
2eeb2e94 170 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
171 spin_lock(&kvm_lock);
172 list_add(&kvm->vm_list, &vm_list);
173 spin_unlock(&kvm_lock);
d19a9cd2 174out:
f17abe9a
AK
175 return kvm;
176}
177
6aa8b732
AK
178/*
179 * Free any memory in @free but not in @dont.
180 */
181static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
182 struct kvm_memory_slot *dont)
183{
290fc38d
IE
184 if (!dont || free->rmap != dont->rmap)
185 vfree(free->rmap);
6aa8b732
AK
186
187 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
188 vfree(free->dirty_bitmap);
189
6aa8b732 190 free->npages = 0;
8b6d44c7 191 free->dirty_bitmap = NULL;
8d4e1288 192 free->rmap = NULL;
6aa8b732
AK
193}
194
d19a9cd2 195void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
196{
197 int i;
198
199 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 200 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
201}
202
f17abe9a
AK
203static void kvm_destroy_vm(struct kvm *kvm)
204{
6d4e4c4f
AK
205 struct mm_struct *mm = kvm->mm;
206
133de902
AK
207 spin_lock(&kvm_lock);
208 list_del(&kvm->vm_list);
209 spin_unlock(&kvm_lock);
74906345 210 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 211 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 212 kvm_arch_destroy_vm(kvm);
6d4e4c4f 213 mmdrop(mm);
f17abe9a
AK
214}
215
216static int kvm_vm_release(struct inode *inode, struct file *filp)
217{
218 struct kvm *kvm = filp->private_data;
219
220 kvm_destroy_vm(kvm);
6aa8b732
AK
221 return 0;
222}
223
6aa8b732
AK
224/*
225 * Allocate some memory and give it an address in the guest physical address
226 * space.
227 *
228 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e 229 *
10589a46 230 * Must be called holding mmap_sem for write.
6aa8b732 231 */
f78e0e2e
SY
232int __kvm_set_memory_region(struct kvm *kvm,
233 struct kvm_userspace_memory_region *mem,
234 int user_alloc)
6aa8b732
AK
235{
236 int r;
237 gfn_t base_gfn;
238 unsigned long npages;
239 unsigned long i;
240 struct kvm_memory_slot *memslot;
241 struct kvm_memory_slot old, new;
6aa8b732
AK
242
243 r = -EINVAL;
244 /* General sanity checks */
245 if (mem->memory_size & (PAGE_SIZE - 1))
246 goto out;
247 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
248 goto out;
e0d62c7f 249 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
250 goto out;
251 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
252 goto out;
253
254 memslot = &kvm->memslots[mem->slot];
255 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
256 npages = mem->memory_size >> PAGE_SHIFT;
257
258 if (!npages)
259 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
260
6aa8b732
AK
261 new = old = *memslot;
262
263 new.base_gfn = base_gfn;
264 new.npages = npages;
265 new.flags = mem->flags;
266
267 /* Disallow changing a memory slot's size. */
268 r = -EINVAL;
269 if (npages && old.npages && npages != old.npages)
f78e0e2e 270 goto out_free;
6aa8b732
AK
271
272 /* Check for overlaps */
273 r = -EEXIST;
274 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
275 struct kvm_memory_slot *s = &kvm->memslots[i];
276
277 if (s == memslot)
278 continue;
279 if (!((base_gfn + npages <= s->base_gfn) ||
280 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 281 goto out_free;
6aa8b732 282 }
6aa8b732 283
6aa8b732
AK
284 /* Free page dirty bitmap if unneeded */
285 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 286 new.dirty_bitmap = NULL;
6aa8b732
AK
287
288 r = -ENOMEM;
289
290 /* Allocate if a slot is being created */
8d4e1288 291 if (npages && !new.rmap) {
d77c26fc 292 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
293
294 if (!new.rmap)
f78e0e2e 295 goto out_free;
290fc38d 296
290fc38d 297 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 298
80b14b5b 299 new.user_alloc = user_alloc;
0de10343 300 new.userspace_addr = mem->userspace_addr;
6aa8b732
AK
301 }
302
303 /* Allocate page dirty bitmap if needed */
304 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
305 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
306
307 new.dirty_bitmap = vmalloc(dirty_bytes);
308 if (!new.dirty_bitmap)
f78e0e2e 309 goto out_free;
6aa8b732
AK
310 memset(new.dirty_bitmap, 0, dirty_bytes);
311 }
312
6aa8b732
AK
313 if (mem->slot >= kvm->nmemslots)
314 kvm->nmemslots = mem->slot + 1;
315
3ad82a7e
ZX
316 *memslot = new;
317
0de10343
ZX
318 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
319 if (r) {
320 *memslot = old;
321 goto out_free;
82ce2c96
IE
322 }
323
6aa8b732
AK
324 kvm_free_physmem_slot(&old, &new);
325 return 0;
326
f78e0e2e 327out_free:
6aa8b732
AK
328 kvm_free_physmem_slot(&new, &old);
329out:
330 return r;
210c7c4d
IE
331
332}
f78e0e2e
SY
333EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
334
335int kvm_set_memory_region(struct kvm *kvm,
336 struct kvm_userspace_memory_region *mem,
337 int user_alloc)
338{
339 int r;
340
10589a46 341 down_write(&current->mm->mmap_sem);
f78e0e2e 342 r = __kvm_set_memory_region(kvm, mem, user_alloc);
10589a46 343 up_write(&current->mm->mmap_sem);
f78e0e2e
SY
344 return r;
345}
210c7c4d
IE
346EXPORT_SYMBOL_GPL(kvm_set_memory_region);
347
1fe779f8
CO
348int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
349 struct
350 kvm_userspace_memory_region *mem,
351 int user_alloc)
210c7c4d 352{
e0d62c7f
IE
353 if (mem->slot >= KVM_MEMORY_SLOTS)
354 return -EINVAL;
210c7c4d 355 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
356}
357
5bb064dc
ZX
358int kvm_get_dirty_log(struct kvm *kvm,
359 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
360{
361 struct kvm_memory_slot *memslot;
362 int r, i;
363 int n;
364 unsigned long any = 0;
365
6aa8b732
AK
366 r = -EINVAL;
367 if (log->slot >= KVM_MEMORY_SLOTS)
368 goto out;
369
370 memslot = &kvm->memslots[log->slot];
371 r = -ENOENT;
372 if (!memslot->dirty_bitmap)
373 goto out;
374
cd1a4a98 375 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 376
cd1a4a98 377 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
378 any = memslot->dirty_bitmap[i];
379
380 r = -EFAULT;
381 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
382 goto out;
383
5bb064dc
ZX
384 if (any)
385 *is_dirty = 1;
6aa8b732
AK
386
387 r = 0;
6aa8b732 388out:
6aa8b732
AK
389 return r;
390}
391
cea7bb21
IE
392int is_error_page(struct page *page)
393{
394 return page == bad_page;
395}
396EXPORT_SYMBOL_GPL(is_error_page);
397
f9d46eb0
IE
398static inline unsigned long bad_hva(void)
399{
400 return PAGE_OFFSET;
401}
402
403int kvm_is_error_hva(unsigned long addr)
404{
405 return addr == bad_hva();
406}
407EXPORT_SYMBOL_GPL(kvm_is_error_hva);
408
e8207547 409static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
410{
411 int i;
412
413 for (i = 0; i < kvm->nmemslots; ++i) {
414 struct kvm_memory_slot *memslot = &kvm->memslots[i];
415
416 if (gfn >= memslot->base_gfn
417 && gfn < memslot->base_gfn + memslot->npages)
418 return memslot;
419 }
8b6d44c7 420 return NULL;
6aa8b732 421}
e8207547
AK
422
423struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
424{
425 gfn = unalias_gfn(kvm, gfn);
426 return __gfn_to_memslot(kvm, gfn);
427}
6aa8b732 428
e0d62c7f
IE
429int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
430{
431 int i;
432
433 gfn = unalias_gfn(kvm, gfn);
434 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
435 struct kvm_memory_slot *memslot = &kvm->memslots[i];
436
437 if (gfn >= memslot->base_gfn
438 && gfn < memslot->base_gfn + memslot->npages)
439 return 1;
440 }
441 return 0;
442}
443EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
444
539cb660
IE
445static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
446{
447 struct kvm_memory_slot *slot;
448
449 gfn = unalias_gfn(kvm, gfn);
450 slot = __gfn_to_memslot(kvm, gfn);
451 if (!slot)
452 return bad_hva();
453 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
454}
455
aab61cc0
AL
456/*
457 * Requires current->mm->mmap_sem to be held
458 */
10589a46 459struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 460{
8d4e1288 461 struct page *page[1];
539cb660 462 unsigned long addr;
8d4e1288 463 int npages;
954bbbc2 464
60395224
AK
465 might_sleep();
466
539cb660
IE
467 addr = gfn_to_hva(kvm, gfn);
468 if (kvm_is_error_hva(addr)) {
8a7ae055 469 get_page(bad_page);
cea7bb21 470 return bad_page;
8a7ae055 471 }
8d4e1288 472
539cb660
IE
473 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
474 NULL);
475
8d4e1288
AL
476 if (npages != 1) {
477 get_page(bad_page);
478 return bad_page;
8a7ae055 479 }
8d4e1288
AL
480
481 return page[0];
954bbbc2 482}
aab61cc0 483
954bbbc2
AK
484EXPORT_SYMBOL_GPL(gfn_to_page);
485
b4231d61
IE
486void kvm_release_page_clean(struct page *page)
487{
488 put_page(page);
489}
490EXPORT_SYMBOL_GPL(kvm_release_page_clean);
491
492void kvm_release_page_dirty(struct page *page)
8a7ae055
IE
493{
494 if (!PageReserved(page))
495 SetPageDirty(page);
496 put_page(page);
497}
b4231d61 498EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
8a7ae055 499
195aefde
IE
500static int next_segment(unsigned long len, int offset)
501{
502 if (len > PAGE_SIZE - offset)
503 return PAGE_SIZE - offset;
504 else
505 return len;
506}
507
508int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
509 int len)
510{
e0506bcb
IE
511 int r;
512 unsigned long addr;
195aefde 513
e0506bcb
IE
514 addr = gfn_to_hva(kvm, gfn);
515 if (kvm_is_error_hva(addr))
516 return -EFAULT;
517 r = copy_from_user(data, (void __user *)addr + offset, len);
518 if (r)
195aefde 519 return -EFAULT;
195aefde
IE
520 return 0;
521}
522EXPORT_SYMBOL_GPL(kvm_read_guest_page);
523
524int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
525{
526 gfn_t gfn = gpa >> PAGE_SHIFT;
527 int seg;
528 int offset = offset_in_page(gpa);
529 int ret;
530
531 while ((seg = next_segment(len, offset)) != 0) {
532 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
533 if (ret < 0)
534 return ret;
535 offset = 0;
536 len -= seg;
537 data += seg;
538 ++gfn;
539 }
540 return 0;
541}
542EXPORT_SYMBOL_GPL(kvm_read_guest);
543
544int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
545 int offset, int len)
546{
e0506bcb
IE
547 int r;
548 unsigned long addr;
195aefde 549
e0506bcb
IE
550 addr = gfn_to_hva(kvm, gfn);
551 if (kvm_is_error_hva(addr))
552 return -EFAULT;
553 r = copy_to_user((void __user *)addr + offset, data, len);
554 if (r)
195aefde 555 return -EFAULT;
195aefde
IE
556 mark_page_dirty(kvm, gfn);
557 return 0;
558}
559EXPORT_SYMBOL_GPL(kvm_write_guest_page);
560
561int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
562 unsigned long len)
563{
564 gfn_t gfn = gpa >> PAGE_SHIFT;
565 int seg;
566 int offset = offset_in_page(gpa);
567 int ret;
568
569 while ((seg = next_segment(len, offset)) != 0) {
570 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
571 if (ret < 0)
572 return ret;
573 offset = 0;
574 len -= seg;
575 data += seg;
576 ++gfn;
577 }
578 return 0;
579}
580
581int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
582{
3e021bf5 583 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
584}
585EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
586
587int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
588{
589 gfn_t gfn = gpa >> PAGE_SHIFT;
590 int seg;
591 int offset = offset_in_page(gpa);
592 int ret;
593
594 while ((seg = next_segment(len, offset)) != 0) {
595 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
596 if (ret < 0)
597 return ret;
598 offset = 0;
599 len -= seg;
600 ++gfn;
601 }
602 return 0;
603}
604EXPORT_SYMBOL_GPL(kvm_clear_guest);
605
6aa8b732
AK
606void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
607{
31389947 608 struct kvm_memory_slot *memslot;
6aa8b732 609
3b6fff19 610 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
611 memslot = __gfn_to_memslot(kvm, gfn);
612 if (memslot && memslot->dirty_bitmap) {
613 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 614
7e9d619d
RR
615 /* avoid RMW */
616 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
617 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
618 }
619}
620
b6958ce4
ED
621/*
622 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
623 */
8776e519 624void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 625{
b6958ce4
ED
626 DECLARE_WAITQUEUE(wait, current);
627
628 add_wait_queue(&vcpu->wq, &wait);
629
630 /*
631 * We will block until either an interrupt or a signal wakes us up
632 */
c5ec1534
HQ
633 while (!kvm_cpu_has_interrupt(vcpu)
634 && !signal_pending(current)
53e0aa7b 635 && !kvm_arch_vcpu_runnable(vcpu)) {
b6958ce4
ED
636 set_current_state(TASK_INTERRUPTIBLE);
637 vcpu_put(vcpu);
638 schedule();
639 vcpu_load(vcpu);
640 }
d3bef15f 641
c5ec1534 642 __set_current_state(TASK_RUNNING);
b6958ce4 643 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
644}
645
6aa8b732
AK
646void kvm_resched(struct kvm_vcpu *vcpu)
647{
3fca0365
YD
648 if (!need_resched())
649 return;
6aa8b732 650 cond_resched();
6aa8b732
AK
651}
652EXPORT_SYMBOL_GPL(kvm_resched);
653
e4a533a4 654static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9a2bb7f4
AK
655{
656 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
9a2bb7f4
AK
657 struct page *page;
658
e4a533a4 659 if (vmf->pgoff == 0)
039576c0 660 page = virt_to_page(vcpu->run);
e4a533a4 661 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
ad312c7c 662 page = virt_to_page(vcpu->arch.pio_data);
039576c0 663 else
e4a533a4 664 return VM_FAULT_SIGBUS;
9a2bb7f4 665 get_page(page);
e4a533a4 666 vmf->page = page;
667 return 0;
9a2bb7f4
AK
668}
669
670static struct vm_operations_struct kvm_vcpu_vm_ops = {
e4a533a4 671 .fault = kvm_vcpu_fault,
9a2bb7f4
AK
672};
673
674static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
675{
676 vma->vm_ops = &kvm_vcpu_vm_ops;
677 return 0;
678}
679
bccf2150
AK
680static int kvm_vcpu_release(struct inode *inode, struct file *filp)
681{
682 struct kvm_vcpu *vcpu = filp->private_data;
683
684 fput(vcpu->kvm->filp);
685 return 0;
686}
687
688static struct file_operations kvm_vcpu_fops = {
689 .release = kvm_vcpu_release,
690 .unlocked_ioctl = kvm_vcpu_ioctl,
691 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 692 .mmap = kvm_vcpu_mmap,
bccf2150
AK
693};
694
695/*
696 * Allocates an inode for the vcpu.
697 */
698static int create_vcpu_fd(struct kvm_vcpu *vcpu)
699{
700 int fd, r;
701 struct inode *inode;
702 struct file *file;
703
d6d28168
AK
704 r = anon_inode_getfd(&fd, &inode, &file,
705 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
706 if (r)
707 return r;
bccf2150 708 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 709 return fd;
bccf2150
AK
710}
711
c5ea7660
AK
712/*
713 * Creates some virtual cpus. Good luck creating more than one.
714 */
715static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
716{
717 int r;
718 struct kvm_vcpu *vcpu;
719
c5ea7660 720 if (!valid_vcpu(n))
fb3f0f51 721 return -EINVAL;
c5ea7660 722
e9b11c17 723 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
724 if (IS_ERR(vcpu))
725 return PTR_ERR(vcpu);
c5ea7660 726
15ad7146
AK
727 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
728
26e5215f
AK
729 r = kvm_arch_vcpu_setup(vcpu);
730 if (r)
731 goto vcpu_destroy;
732
11ec2804 733 mutex_lock(&kvm->lock);
fb3f0f51
RR
734 if (kvm->vcpus[n]) {
735 r = -EEXIST;
11ec2804 736 mutex_unlock(&kvm->lock);
e9b11c17 737 goto vcpu_destroy;
fb3f0f51
RR
738 }
739 kvm->vcpus[n] = vcpu;
11ec2804 740 mutex_unlock(&kvm->lock);
c5ea7660 741
fb3f0f51 742 /* Now it's all set up, let userspace reach it */
bccf2150
AK
743 r = create_vcpu_fd(vcpu);
744 if (r < 0)
fb3f0f51
RR
745 goto unlink;
746 return r;
39c3b86e 747
fb3f0f51 748unlink:
11ec2804 749 mutex_lock(&kvm->lock);
fb3f0f51 750 kvm->vcpus[n] = NULL;
11ec2804 751 mutex_unlock(&kvm->lock);
e9b11c17 752vcpu_destroy:
d40ccc62 753 kvm_arch_vcpu_destroy(vcpu);
c5ea7660
AK
754 return r;
755}
756
1961d276
AK
757static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
758{
759 if (sigset) {
760 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
761 vcpu->sigset_active = 1;
762 vcpu->sigset = *sigset;
763 } else
764 vcpu->sigset_active = 0;
765 return 0;
766}
767
bccf2150
AK
768static long kvm_vcpu_ioctl(struct file *filp,
769 unsigned int ioctl, unsigned long arg)
6aa8b732 770{
bccf2150 771 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 772 void __user *argp = (void __user *)arg;
313a3dc7 773 int r;
6aa8b732 774
6d4e4c4f
AK
775 if (vcpu->kvm->mm != current->mm)
776 return -EIO;
6aa8b732 777 switch (ioctl) {
9a2bb7f4 778 case KVM_RUN:
f0fe5108
AK
779 r = -EINVAL;
780 if (arg)
781 goto out;
b6c7a5dc 782 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 783 break;
6aa8b732
AK
784 case KVM_GET_REGS: {
785 struct kvm_regs kvm_regs;
786
bccf2150 787 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 788 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
789 if (r)
790 goto out;
791 r = -EFAULT;
2f366987 792 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
793 goto out;
794 r = 0;
795 break;
796 }
797 case KVM_SET_REGS: {
798 struct kvm_regs kvm_regs;
799
800 r = -EFAULT;
2f366987 801 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 802 goto out;
b6c7a5dc 803 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
804 if (r)
805 goto out;
806 r = 0;
807 break;
808 }
809 case KVM_GET_SREGS: {
810 struct kvm_sregs kvm_sregs;
811
bccf2150 812 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 813 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
814 if (r)
815 goto out;
816 r = -EFAULT;
2f366987 817 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
818 goto out;
819 r = 0;
820 break;
821 }
822 case KVM_SET_SREGS: {
823 struct kvm_sregs kvm_sregs;
824
825 r = -EFAULT;
2f366987 826 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 827 goto out;
b6c7a5dc 828 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
829 if (r)
830 goto out;
831 r = 0;
832 break;
833 }
834 case KVM_TRANSLATE: {
835 struct kvm_translation tr;
836
837 r = -EFAULT;
2f366987 838 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 839 goto out;
8b006791 840 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
841 if (r)
842 goto out;
843 r = -EFAULT;
2f366987 844 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
845 goto out;
846 r = 0;
847 break;
848 }
6aa8b732
AK
849 case KVM_DEBUG_GUEST: {
850 struct kvm_debug_guest dbg;
851
852 r = -EFAULT;
2f366987 853 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 854 goto out;
b6c7a5dc 855 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
856 if (r)
857 goto out;
858 r = 0;
859 break;
860 }
1961d276
AK
861 case KVM_SET_SIGNAL_MASK: {
862 struct kvm_signal_mask __user *sigmask_arg = argp;
863 struct kvm_signal_mask kvm_sigmask;
864 sigset_t sigset, *p;
865
866 p = NULL;
867 if (argp) {
868 r = -EFAULT;
869 if (copy_from_user(&kvm_sigmask, argp,
870 sizeof kvm_sigmask))
871 goto out;
872 r = -EINVAL;
873 if (kvm_sigmask.len != sizeof sigset)
874 goto out;
875 r = -EFAULT;
876 if (copy_from_user(&sigset, sigmask_arg->sigset,
877 sizeof sigset))
878 goto out;
879 p = &sigset;
880 }
881 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
882 break;
883 }
b8836737
AK
884 case KVM_GET_FPU: {
885 struct kvm_fpu fpu;
886
887 memset(&fpu, 0, sizeof fpu);
d0752060 888 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
889 if (r)
890 goto out;
891 r = -EFAULT;
892 if (copy_to_user(argp, &fpu, sizeof fpu))
893 goto out;
894 r = 0;
895 break;
896 }
897 case KVM_SET_FPU: {
898 struct kvm_fpu fpu;
899
900 r = -EFAULT;
901 if (copy_from_user(&fpu, argp, sizeof fpu))
902 goto out;
d0752060 903 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
904 if (r)
905 goto out;
906 r = 0;
907 break;
908 }
bccf2150 909 default:
313a3dc7 910 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
911 }
912out:
913 return r;
914}
915
916static long kvm_vm_ioctl(struct file *filp,
917 unsigned int ioctl, unsigned long arg)
918{
919 struct kvm *kvm = filp->private_data;
920 void __user *argp = (void __user *)arg;
1fe779f8 921 int r;
bccf2150 922
6d4e4c4f
AK
923 if (kvm->mm != current->mm)
924 return -EIO;
bccf2150
AK
925 switch (ioctl) {
926 case KVM_CREATE_VCPU:
927 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
928 if (r < 0)
929 goto out;
930 break;
6fc138d2
IE
931 case KVM_SET_USER_MEMORY_REGION: {
932 struct kvm_userspace_memory_region kvm_userspace_mem;
933
934 r = -EFAULT;
935 if (copy_from_user(&kvm_userspace_mem, argp,
936 sizeof kvm_userspace_mem))
937 goto out;
938
939 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
940 if (r)
941 goto out;
942 break;
943 }
944 case KVM_GET_DIRTY_LOG: {
945 struct kvm_dirty_log log;
946
947 r = -EFAULT;
2f366987 948 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 949 goto out;
2c6f5df9 950 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
951 if (r)
952 goto out;
953 break;
954 }
f17abe9a 955 default:
1fe779f8 956 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
957 }
958out:
959 return r;
960}
961
e4a533a4 962static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
f17abe9a
AK
963{
964 struct kvm *kvm = vma->vm_file->private_data;
f17abe9a
AK
965 struct page *page;
966
e4a533a4 967 if (!kvm_is_visible_gfn(kvm, vmf->pgoff))
968 return VM_FAULT_SIGBUS;
10589a46 969 page = gfn_to_page(kvm, vmf->pgoff);
8a7ae055 970 if (is_error_page(page)) {
b4231d61 971 kvm_release_page_clean(page);
e4a533a4 972 return VM_FAULT_SIGBUS;
8a7ae055 973 }
e4a533a4 974 vmf->page = page;
975 return 0;
f17abe9a
AK
976}
977
978static struct vm_operations_struct kvm_vm_vm_ops = {
e4a533a4 979 .fault = kvm_vm_fault,
f17abe9a
AK
980};
981
982static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
983{
984 vma->vm_ops = &kvm_vm_vm_ops;
985 return 0;
986}
987
988static struct file_operations kvm_vm_fops = {
989 .release = kvm_vm_release,
990 .unlocked_ioctl = kvm_vm_ioctl,
991 .compat_ioctl = kvm_vm_ioctl,
992 .mmap = kvm_vm_mmap,
993};
994
995static int kvm_dev_ioctl_create_vm(void)
996{
997 int fd, r;
998 struct inode *inode;
999 struct file *file;
1000 struct kvm *kvm;
1001
f17abe9a 1002 kvm = kvm_create_vm();
d6d28168
AK
1003 if (IS_ERR(kvm))
1004 return PTR_ERR(kvm);
1005 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1006 if (r) {
1007 kvm_destroy_vm(kvm);
1008 return r;
f17abe9a
AK
1009 }
1010
bccf2150 1011 kvm->filp = file;
f17abe9a 1012
f17abe9a 1013 return fd;
f17abe9a
AK
1014}
1015
1016static long kvm_dev_ioctl(struct file *filp,
1017 unsigned int ioctl, unsigned long arg)
1018{
1019 void __user *argp = (void __user *)arg;
07c45a36 1020 long r = -EINVAL;
f17abe9a
AK
1021
1022 switch (ioctl) {
1023 case KVM_GET_API_VERSION:
f0fe5108
AK
1024 r = -EINVAL;
1025 if (arg)
1026 goto out;
f17abe9a
AK
1027 r = KVM_API_VERSION;
1028 break;
1029 case KVM_CREATE_VM:
f0fe5108
AK
1030 r = -EINVAL;
1031 if (arg)
1032 goto out;
f17abe9a
AK
1033 r = kvm_dev_ioctl_create_vm();
1034 break;
018d00d2
ZX
1035 case KVM_CHECK_EXTENSION:
1036 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1037 break;
07c45a36
AK
1038 case KVM_GET_VCPU_MMAP_SIZE:
1039 r = -EINVAL;
1040 if (arg)
1041 goto out;
039576c0 1042 r = 2 * PAGE_SIZE;
07c45a36 1043 break;
6aa8b732 1044 default:
043405e1 1045 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1046 }
1047out:
1048 return r;
1049}
1050
6aa8b732 1051static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1052 .unlocked_ioctl = kvm_dev_ioctl,
1053 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1054};
1055
1056static struct miscdevice kvm_dev = {
bbe4432e 1057 KVM_MINOR,
6aa8b732
AK
1058 "kvm",
1059 &kvm_chardev_ops,
1060};
1061
1b6c0168
AK
1062static void hardware_enable(void *junk)
1063{
1064 int cpu = raw_smp_processor_id();
1065
1066 if (cpu_isset(cpu, cpus_hardware_enabled))
1067 return;
1068 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1069 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1070}
1071
1072static void hardware_disable(void *junk)
1073{
1074 int cpu = raw_smp_processor_id();
1075
1076 if (!cpu_isset(cpu, cpus_hardware_enabled))
1077 return;
1078 cpu_clear(cpu, cpus_hardware_enabled);
1079 decache_vcpus_on_cpu(cpu);
e9b11c17 1080 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1081}
1082
774c47f1
AK
1083static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1084 void *v)
1085{
1086 int cpu = (long)v;
1087
1a6f4d7f 1088 val &= ~CPU_TASKS_FROZEN;
774c47f1 1089 switch (val) {
cec9ad27 1090 case CPU_DYING:
6ec8a856
AK
1091 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1092 cpu);
1093 hardware_disable(NULL);
1094 break;
774c47f1 1095 case CPU_UP_CANCELED:
43934a38
JK
1096 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1097 cpu);
1b6c0168 1098 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1099 break;
43934a38
JK
1100 case CPU_ONLINE:
1101 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1102 cpu);
1b6c0168 1103 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1104 break;
1105 }
1106 return NOTIFY_OK;
1107}
1108
9a2b85c6 1109static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1110 void *v)
9a2b85c6
RR
1111{
1112 if (val == SYS_RESTART) {
1113 /*
1114 * Some (well, at least mine) BIOSes hang on reboot if
1115 * in vmx root mode.
1116 */
1117 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1118 on_each_cpu(hardware_disable, NULL, 0, 1);
1119 }
1120 return NOTIFY_OK;
1121}
1122
1123static struct notifier_block kvm_reboot_notifier = {
1124 .notifier_call = kvm_reboot,
1125 .priority = 0,
1126};
1127
2eeb2e94
GH
1128void kvm_io_bus_init(struct kvm_io_bus *bus)
1129{
1130 memset(bus, 0, sizeof(*bus));
1131}
1132
1133void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1134{
1135 int i;
1136
1137 for (i = 0; i < bus->dev_count; i++) {
1138 struct kvm_io_device *pos = bus->devs[i];
1139
1140 kvm_iodevice_destructor(pos);
1141 }
1142}
1143
1144struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1145{
1146 int i;
1147
1148 for (i = 0; i < bus->dev_count; i++) {
1149 struct kvm_io_device *pos = bus->devs[i];
1150
1151 if (pos->in_range(pos, addr))
1152 return pos;
1153 }
1154
1155 return NULL;
1156}
1157
1158void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1159{
1160 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1161
1162 bus->devs[bus->dev_count++] = dev;
1163}
1164
774c47f1
AK
1165static struct notifier_block kvm_cpu_notifier = {
1166 .notifier_call = kvm_cpu_hotplug,
1167 .priority = 20, /* must be > scheduler priority */
1168};
1169
ba1389b7
AK
1170static u64 vm_stat_get(void *_offset)
1171{
1172 unsigned offset = (long)_offset;
1173 u64 total = 0;
1174 struct kvm *kvm;
1175
1176 spin_lock(&kvm_lock);
1177 list_for_each_entry(kvm, &vm_list, vm_list)
1178 total += *(u32 *)((void *)kvm + offset);
1179 spin_unlock(&kvm_lock);
1180 return total;
1181}
1182
1183DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1184
1185static u64 vcpu_stat_get(void *_offset)
1165f5fe
AK
1186{
1187 unsigned offset = (long)_offset;
1188 u64 total = 0;
1189 struct kvm *kvm;
1190 struct kvm_vcpu *vcpu;
1191 int i;
1192
1193 spin_lock(&kvm_lock);
1194 list_for_each_entry(kvm, &vm_list, vm_list)
1195 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1196 vcpu = kvm->vcpus[i];
1197 if (vcpu)
1198 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1199 }
1200 spin_unlock(&kvm_lock);
1201 return total;
1202}
1203
ba1389b7
AK
1204DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1205
1206static struct file_operations *stat_fops[] = {
1207 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1208 [KVM_STAT_VM] = &vm_stat_fops,
1209};
1165f5fe 1210
a16b043c 1211static void kvm_init_debug(void)
6aa8b732
AK
1212{
1213 struct kvm_stats_debugfs_item *p;
1214
8b6d44c7 1215 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1216 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1217 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1218 (void *)(long)p->offset,
ba1389b7 1219 stat_fops[p->kind]);
6aa8b732
AK
1220}
1221
1222static void kvm_exit_debug(void)
1223{
1224 struct kvm_stats_debugfs_item *p;
1225
1226 for (p = debugfs_entries; p->name; ++p)
1227 debugfs_remove(p->dentry);
1228 debugfs_remove(debugfs_dir);
1229}
1230
59ae6c6b
AK
1231static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1232{
4267c41a 1233 hardware_disable(NULL);
59ae6c6b
AK
1234 return 0;
1235}
1236
1237static int kvm_resume(struct sys_device *dev)
1238{
4267c41a 1239 hardware_enable(NULL);
59ae6c6b
AK
1240 return 0;
1241}
1242
1243static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1244 .name = "kvm",
59ae6c6b
AK
1245 .suspend = kvm_suspend,
1246 .resume = kvm_resume,
1247};
1248
1249static struct sys_device kvm_sysdev = {
1250 .id = 0,
1251 .cls = &kvm_sysdev_class,
1252};
1253
cea7bb21 1254struct page *bad_page;
6aa8b732 1255
15ad7146
AK
1256static inline
1257struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1258{
1259 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1260}
1261
1262static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1263{
1264 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1265
e9b11c17 1266 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1267}
1268
1269static void kvm_sched_out(struct preempt_notifier *pn,
1270 struct task_struct *next)
1271{
1272 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1273
e9b11c17 1274 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1275}
1276
f8c16bba 1277int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1278 struct module *module)
6aa8b732
AK
1279{
1280 int r;
002c7f7c 1281 int cpu;
6aa8b732 1282
cb498ea2
ZX
1283 kvm_init_debug();
1284
f8c16bba
ZX
1285 r = kvm_arch_init(opaque);
1286 if (r)
d2308784 1287 goto out_fail;
cb498ea2
ZX
1288
1289 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1290
1291 if (bad_page == NULL) {
1292 r = -ENOMEM;
1293 goto out;
1294 }
1295
e9b11c17 1296 r = kvm_arch_hardware_setup();
6aa8b732 1297 if (r < 0)
d2308784 1298 goto out_free_0;
6aa8b732 1299
002c7f7c
YS
1300 for_each_online_cpu(cpu) {
1301 smp_call_function_single(cpu,
e9b11c17 1302 kvm_arch_check_processor_compat,
002c7f7c
YS
1303 &r, 0, 1);
1304 if (r < 0)
d2308784 1305 goto out_free_1;
002c7f7c
YS
1306 }
1307
1b6c0168 1308 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1309 r = register_cpu_notifier(&kvm_cpu_notifier);
1310 if (r)
d2308784 1311 goto out_free_2;
6aa8b732
AK
1312 register_reboot_notifier(&kvm_reboot_notifier);
1313
59ae6c6b
AK
1314 r = sysdev_class_register(&kvm_sysdev_class);
1315 if (r)
d2308784 1316 goto out_free_3;
59ae6c6b
AK
1317
1318 r = sysdev_register(&kvm_sysdev);
1319 if (r)
d2308784 1320 goto out_free_4;
59ae6c6b 1321
c16f862d
RR
1322 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1323 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1324 __alignof__(struct kvm_vcpu),
1325 0, NULL);
c16f862d
RR
1326 if (!kvm_vcpu_cache) {
1327 r = -ENOMEM;
d2308784 1328 goto out_free_5;
c16f862d
RR
1329 }
1330
6aa8b732
AK
1331 kvm_chardev_ops.owner = module;
1332
1333 r = misc_register(&kvm_dev);
1334 if (r) {
d77c26fc 1335 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1336 goto out_free;
1337 }
1338
15ad7146
AK
1339 kvm_preempt_ops.sched_in = kvm_sched_in;
1340 kvm_preempt_ops.sched_out = kvm_sched_out;
1341
c7addb90 1342 return 0;
6aa8b732
AK
1343
1344out_free:
c16f862d 1345 kmem_cache_destroy(kvm_vcpu_cache);
d2308784 1346out_free_5:
59ae6c6b 1347 sysdev_unregister(&kvm_sysdev);
d2308784 1348out_free_4:
59ae6c6b 1349 sysdev_class_unregister(&kvm_sysdev_class);
d2308784 1350out_free_3:
6aa8b732 1351 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1 1352 unregister_cpu_notifier(&kvm_cpu_notifier);
d2308784 1353out_free_2:
1b6c0168 1354 on_each_cpu(hardware_disable, NULL, 0, 1);
d2308784 1355out_free_1:
e9b11c17 1356 kvm_arch_hardware_unsetup();
d2308784
ZX
1357out_free_0:
1358 __free_page(bad_page);
ca45aaae 1359out:
f8c16bba 1360 kvm_arch_exit();
cb498ea2 1361 kvm_exit_debug();
d2308784 1362out_fail:
6aa8b732
AK
1363 return r;
1364}
cb498ea2 1365EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1366
cb498ea2 1367void kvm_exit(void)
6aa8b732
AK
1368{
1369 misc_deregister(&kvm_dev);
c16f862d 1370 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1371 sysdev_unregister(&kvm_sysdev);
1372 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1373 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1374 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1375 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1376 kvm_arch_hardware_unsetup();
f8c16bba 1377 kvm_arch_exit();
6aa8b732 1378 kvm_exit_debug();
cea7bb21 1379 __free_page(bad_page);
6aa8b732 1380}
cb498ea2 1381EXPORT_SYMBOL_GPL(kvm_exit);