KVM: Remove unused "rmap_overflow" variable
[linux-2.6-block.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
043405e1 19#include "x86.h"
85f455f7 20#include "irq.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
04d2cc77 40#include <linux/profile.h>
7aa81cc0 41#include <linux/kvm_para.h>
6fc138d2 42#include <linux/pagemap.h>
8d4e1288 43#include <linux/mman.h>
6aa8b732 44
e495606d 45#include <asm/processor.h>
e495606d
AK
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/desc.h>
3e021bf5 49#include <asm/pgtable.h>
6aa8b732
AK
50
51MODULE_AUTHOR("Qumranet");
52MODULE_LICENSE("GPL");
53
e9b11c17
ZX
54DEFINE_SPINLOCK(kvm_lock);
55LIST_HEAD(vm_list);
133de902 56
1b6c0168
AK
57static cpumask_t cpus_hardware_enabled;
58
c16f862d
RR
59struct kmem_cache *kvm_vcpu_cache;
60EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
1165f5fe 61
15ad7146
AK
62static __read_mostly struct preempt_ops kvm_preempt_ops;
63
6aa8b732
AK
64static struct dentry *debugfs_dir;
65
bccf2150
AK
66static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
67 unsigned long arg);
68
5aacf0ca
JM
69static inline int valid_vcpu(int n)
70{
71 return likely(n >= 0 && n < KVM_MAX_VCPUS);
72}
73
bccf2150
AK
74/*
75 * Switches to specified vcpu, until a matching vcpu_put()
76 */
313a3dc7 77void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 78{
15ad7146
AK
79 int cpu;
80
bccf2150 81 mutex_lock(&vcpu->mutex);
15ad7146
AK
82 cpu = get_cpu();
83 preempt_notifier_register(&vcpu->preempt_notifier);
313a3dc7 84 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146 85 put_cpu();
6aa8b732
AK
86}
87
313a3dc7 88void vcpu_put(struct kvm_vcpu *vcpu)
6aa8b732 89{
15ad7146 90 preempt_disable();
313a3dc7 91 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
92 preempt_notifier_unregister(&vcpu->preempt_notifier);
93 preempt_enable();
6aa8b732
AK
94 mutex_unlock(&vcpu->mutex);
95}
96
d9e368d6
AK
97static void ack_flush(void *_completed)
98{
d9e368d6
AK
99}
100
101void kvm_flush_remote_tlbs(struct kvm *kvm)
102{
49d3bd7e 103 int i, cpu;
d9e368d6
AK
104 cpumask_t cpus;
105 struct kvm_vcpu *vcpu;
d9e368d6 106
d9e368d6 107 cpus_clear(cpus);
fb3f0f51
RR
108 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
109 vcpu = kvm->vcpus[i];
110 if (!vcpu)
111 continue;
3176bc3e 112 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
d9e368d6
AK
113 continue;
114 cpu = vcpu->cpu;
115 if (cpu != -1 && cpu != raw_smp_processor_id())
49d3bd7e 116 cpu_set(cpu, cpus);
d9e368d6 117 }
49d3bd7e 118 smp_call_function_mask(cpus, ack_flush, NULL, 1);
d9e368d6
AK
119}
120
fb3f0f51
RR
121int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
122{
123 struct page *page;
124 int r;
125
126 mutex_init(&vcpu->mutex);
127 vcpu->cpu = -1;
fb3f0f51
RR
128 vcpu->kvm = kvm;
129 vcpu->vcpu_id = id;
b6958ce4 130 init_waitqueue_head(&vcpu->wq);
fb3f0f51
RR
131
132 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
133 if (!page) {
134 r = -ENOMEM;
135 goto fail;
136 }
137 vcpu->run = page_address(page);
138
e9b11c17 139 r = kvm_arch_vcpu_init(vcpu);
fb3f0f51 140 if (r < 0)
e9b11c17 141 goto fail_free_run;
fb3f0f51
RR
142 return 0;
143
fb3f0f51
RR
144fail_free_run:
145 free_page((unsigned long)vcpu->run);
146fail:
76fafa5e 147 return r;
fb3f0f51
RR
148}
149EXPORT_SYMBOL_GPL(kvm_vcpu_init);
150
151void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
152{
e9b11c17 153 kvm_arch_vcpu_uninit(vcpu);
fb3f0f51
RR
154 free_page((unsigned long)vcpu->run);
155}
156EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
157
f17abe9a 158static struct kvm *kvm_create_vm(void)
6aa8b732 159{
d19a9cd2 160 struct kvm *kvm = kvm_arch_create_vm();
6aa8b732 161
d19a9cd2
ZX
162 if (IS_ERR(kvm))
163 goto out;
6aa8b732 164
74906345 165 kvm_io_bus_init(&kvm->pio_bus);
11ec2804 166 mutex_init(&kvm->lock);
2eeb2e94 167 kvm_io_bus_init(&kvm->mmio_bus);
5e58cfe4
RR
168 spin_lock(&kvm_lock);
169 list_add(&kvm->vm_list, &vm_list);
170 spin_unlock(&kvm_lock);
d19a9cd2 171out:
f17abe9a
AK
172 return kvm;
173}
174
6aa8b732
AK
175/*
176 * Free any memory in @free but not in @dont.
177 */
178static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
179 struct kvm_memory_slot *dont)
180{
290fc38d
IE
181 if (!dont || free->rmap != dont->rmap)
182 vfree(free->rmap);
6aa8b732
AK
183
184 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
185 vfree(free->dirty_bitmap);
186
6aa8b732 187 free->npages = 0;
8b6d44c7 188 free->dirty_bitmap = NULL;
8d4e1288 189 free->rmap = NULL;
6aa8b732
AK
190}
191
d19a9cd2 192void kvm_free_physmem(struct kvm *kvm)
6aa8b732
AK
193{
194 int i;
195
196 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 197 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
198}
199
f17abe9a
AK
200static void kvm_destroy_vm(struct kvm *kvm)
201{
133de902
AK
202 spin_lock(&kvm_lock);
203 list_del(&kvm->vm_list);
204 spin_unlock(&kvm_lock);
74906345 205 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 206 kvm_io_bus_destroy(&kvm->mmio_bus);
d19a9cd2 207 kvm_arch_destroy_vm(kvm);
f17abe9a
AK
208}
209
210static int kvm_vm_release(struct inode *inode, struct file *filp)
211{
212 struct kvm *kvm = filp->private_data;
213
214 kvm_destroy_vm(kvm);
6aa8b732
AK
215 return 0;
216}
217
6aa8b732
AK
218/*
219 * Allocate some memory and give it an address in the guest physical address
220 * space.
221 *
222 * Discontiguous memory is allowed, mostly for framebuffers.
f78e0e2e
SY
223 *
224 * Must be called holding kvm->lock.
6aa8b732 225 */
f78e0e2e
SY
226int __kvm_set_memory_region(struct kvm *kvm,
227 struct kvm_userspace_memory_region *mem,
228 int user_alloc)
6aa8b732
AK
229{
230 int r;
231 gfn_t base_gfn;
232 unsigned long npages;
233 unsigned long i;
234 struct kvm_memory_slot *memslot;
235 struct kvm_memory_slot old, new;
6aa8b732
AK
236
237 r = -EINVAL;
238 /* General sanity checks */
239 if (mem->memory_size & (PAGE_SIZE - 1))
240 goto out;
241 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
242 goto out;
e0d62c7f 243 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
6aa8b732
AK
244 goto out;
245 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
246 goto out;
247
248 memslot = &kvm->memslots[mem->slot];
249 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
250 npages = mem->memory_size >> PAGE_SHIFT;
251
252 if (!npages)
253 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
254
6aa8b732
AK
255 new = old = *memslot;
256
257 new.base_gfn = base_gfn;
258 new.npages = npages;
259 new.flags = mem->flags;
260
261 /* Disallow changing a memory slot's size. */
262 r = -EINVAL;
263 if (npages && old.npages && npages != old.npages)
f78e0e2e 264 goto out_free;
6aa8b732
AK
265
266 /* Check for overlaps */
267 r = -EEXIST;
268 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
269 struct kvm_memory_slot *s = &kvm->memslots[i];
270
271 if (s == memslot)
272 continue;
273 if (!((base_gfn + npages <= s->base_gfn) ||
274 (base_gfn >= s->base_gfn + s->npages)))
f78e0e2e 275 goto out_free;
6aa8b732 276 }
6aa8b732 277
6aa8b732
AK
278 /* Free page dirty bitmap if unneeded */
279 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 280 new.dirty_bitmap = NULL;
6aa8b732
AK
281
282 r = -ENOMEM;
283
284 /* Allocate if a slot is being created */
8d4e1288 285 if (npages && !new.rmap) {
d77c26fc 286 new.rmap = vmalloc(npages * sizeof(struct page *));
290fc38d
IE
287
288 if (!new.rmap)
f78e0e2e 289 goto out_free;
290fc38d 290
290fc38d 291 memset(new.rmap, 0, npages * sizeof(*new.rmap));
8d4e1288 292
80b14b5b 293 new.user_alloc = user_alloc;
8d4e1288 294 if (user_alloc)
8a7ae055 295 new.userspace_addr = mem->userspace_addr;
8d4e1288
AL
296 else {
297 down_write(&current->mm->mmap_sem);
298 new.userspace_addr = do_mmap(NULL, 0,
299 npages * PAGE_SIZE,
300 PROT_READ | PROT_WRITE,
301 MAP_SHARED | MAP_ANONYMOUS,
302 0);
303 up_write(&current->mm->mmap_sem);
304
305 if (IS_ERR((void *)new.userspace_addr))
f78e0e2e 306 goto out_free;
6aa8b732 307 }
80b14b5b
IE
308 } else {
309 if (!old.user_alloc && old.rmap) {
310 int ret;
311
312 down_write(&current->mm->mmap_sem);
313 ret = do_munmap(current->mm, old.userspace_addr,
314 old.npages * PAGE_SIZE);
315 up_write(&current->mm->mmap_sem);
316 if (ret < 0)
317 printk(KERN_WARNING
318 "kvm_vm_ioctl_set_memory_region: "
319 "failed to munmap memory\n");
320 }
6aa8b732
AK
321 }
322
323 /* Allocate page dirty bitmap if needed */
324 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
325 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
326
327 new.dirty_bitmap = vmalloc(dirty_bytes);
328 if (!new.dirty_bitmap)
f78e0e2e 329 goto out_free;
6aa8b732
AK
330 memset(new.dirty_bitmap, 0, dirty_bytes);
331 }
332
6aa8b732
AK
333 if (mem->slot >= kvm->nmemslots)
334 kvm->nmemslots = mem->slot + 1;
335
82ce2c96
IE
336 if (!kvm->n_requested_mmu_pages) {
337 unsigned int n_pages;
338
339 if (npages) {
340 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
341 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
342 n_pages);
343 } else {
344 unsigned int nr_mmu_pages;
345
346 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
347 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
348 nr_mmu_pages = max(nr_mmu_pages,
349 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
350 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
351 }
352 }
353
6aa8b732 354 *memslot = new;
6aa8b732 355
90cb0529
AK
356 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
357 kvm_flush_remote_tlbs(kvm);
6aa8b732 358
6aa8b732
AK
359 kvm_free_physmem_slot(&old, &new);
360 return 0;
361
f78e0e2e 362out_free:
6aa8b732
AK
363 kvm_free_physmem_slot(&new, &old);
364out:
365 return r;
210c7c4d
IE
366
367}
f78e0e2e
SY
368EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
369
370int kvm_set_memory_region(struct kvm *kvm,
371 struct kvm_userspace_memory_region *mem,
372 int user_alloc)
373{
374 int r;
375
376 mutex_lock(&kvm->lock);
377 r = __kvm_set_memory_region(kvm, mem, user_alloc);
378 mutex_unlock(&kvm->lock);
379 return r;
380}
210c7c4d
IE
381EXPORT_SYMBOL_GPL(kvm_set_memory_region);
382
1fe779f8
CO
383int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
384 struct
385 kvm_userspace_memory_region *mem,
386 int user_alloc)
210c7c4d 387{
e0d62c7f
IE
388 if (mem->slot >= KVM_MEMORY_SLOTS)
389 return -EINVAL;
210c7c4d 390 return kvm_set_memory_region(kvm, mem, user_alloc);
6aa8b732
AK
391}
392
5bb064dc
ZX
393int kvm_get_dirty_log(struct kvm *kvm,
394 struct kvm_dirty_log *log, int *is_dirty)
6aa8b732
AK
395{
396 struct kvm_memory_slot *memslot;
397 int r, i;
398 int n;
399 unsigned long any = 0;
400
6aa8b732
AK
401 r = -EINVAL;
402 if (log->slot >= KVM_MEMORY_SLOTS)
403 goto out;
404
405 memslot = &kvm->memslots[log->slot];
406 r = -ENOENT;
407 if (!memslot->dirty_bitmap)
408 goto out;
409
cd1a4a98 410 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 411
cd1a4a98 412 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
413 any = memslot->dirty_bitmap[i];
414
415 r = -EFAULT;
416 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
417 goto out;
418
5bb064dc
ZX
419 if (any)
420 *is_dirty = 1;
6aa8b732
AK
421
422 r = 0;
6aa8b732 423out:
6aa8b732
AK
424 return r;
425}
426
cea7bb21
IE
427int is_error_page(struct page *page)
428{
429 return page == bad_page;
430}
431EXPORT_SYMBOL_GPL(is_error_page);
432
f9d46eb0
IE
433static inline unsigned long bad_hva(void)
434{
435 return PAGE_OFFSET;
436}
437
438int kvm_is_error_hva(unsigned long addr)
439{
440 return addr == bad_hva();
441}
442EXPORT_SYMBOL_GPL(kvm_is_error_hva);
443
290fc38d 444gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
e8207547
AK
445{
446 int i;
447 struct kvm_mem_alias *alias;
448
449 for (i = 0; i < kvm->naliases; ++i) {
450 alias = &kvm->aliases[i];
451 if (gfn >= alias->base_gfn
452 && gfn < alias->base_gfn + alias->npages)
453 return alias->target_gfn + gfn - alias->base_gfn;
454 }
455 return gfn;
456}
457
458static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
459{
460 int i;
461
462 for (i = 0; i < kvm->nmemslots; ++i) {
463 struct kvm_memory_slot *memslot = &kvm->memslots[i];
464
465 if (gfn >= memslot->base_gfn
466 && gfn < memslot->base_gfn + memslot->npages)
467 return memslot;
468 }
8b6d44c7 469 return NULL;
6aa8b732 470}
e8207547
AK
471
472struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
473{
474 gfn = unalias_gfn(kvm, gfn);
475 return __gfn_to_memslot(kvm, gfn);
476}
6aa8b732 477
e0d62c7f
IE
478int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
479{
480 int i;
481
482 gfn = unalias_gfn(kvm, gfn);
483 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
484 struct kvm_memory_slot *memslot = &kvm->memslots[i];
485
486 if (gfn >= memslot->base_gfn
487 && gfn < memslot->base_gfn + memslot->npages)
488 return 1;
489 }
490 return 0;
491}
492EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
493
539cb660
IE
494static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
495{
496 struct kvm_memory_slot *slot;
497
498 gfn = unalias_gfn(kvm, gfn);
499 slot = __gfn_to_memslot(kvm, gfn);
500 if (!slot)
501 return bad_hva();
502 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
503}
504
aab61cc0
AL
505/*
506 * Requires current->mm->mmap_sem to be held
507 */
508static struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn)
954bbbc2 509{
8d4e1288 510 struct page *page[1];
539cb660 511 unsigned long addr;
8d4e1288 512 int npages;
954bbbc2 513
60395224
AK
514 might_sleep();
515
539cb660
IE
516 addr = gfn_to_hva(kvm, gfn);
517 if (kvm_is_error_hva(addr)) {
8a7ae055 518 get_page(bad_page);
cea7bb21 519 return bad_page;
8a7ae055 520 }
8d4e1288 521
539cb660
IE
522 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
523 NULL);
524
8d4e1288
AL
525 if (npages != 1) {
526 get_page(bad_page);
527 return bad_page;
8a7ae055 528 }
8d4e1288
AL
529
530 return page[0];
954bbbc2 531}
aab61cc0
AL
532
533struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
534{
535 struct page *page;
536
537 down_read(&current->mm->mmap_sem);
538 page = __gfn_to_page(kvm, gfn);
539 up_read(&current->mm->mmap_sem);
540
541 return page;
542}
543
954bbbc2
AK
544EXPORT_SYMBOL_GPL(gfn_to_page);
545
8a7ae055
IE
546void kvm_release_page(struct page *page)
547{
548 if (!PageReserved(page))
549 SetPageDirty(page);
550 put_page(page);
551}
552EXPORT_SYMBOL_GPL(kvm_release_page);
553
195aefde
IE
554static int next_segment(unsigned long len, int offset)
555{
556 if (len > PAGE_SIZE - offset)
557 return PAGE_SIZE - offset;
558 else
559 return len;
560}
561
562int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
563 int len)
564{
e0506bcb
IE
565 int r;
566 unsigned long addr;
195aefde 567
e0506bcb
IE
568 addr = gfn_to_hva(kvm, gfn);
569 if (kvm_is_error_hva(addr))
570 return -EFAULT;
571 r = copy_from_user(data, (void __user *)addr + offset, len);
572 if (r)
195aefde 573 return -EFAULT;
195aefde
IE
574 return 0;
575}
576EXPORT_SYMBOL_GPL(kvm_read_guest_page);
577
578int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
579{
580 gfn_t gfn = gpa >> PAGE_SHIFT;
581 int seg;
582 int offset = offset_in_page(gpa);
583 int ret;
584
585 while ((seg = next_segment(len, offset)) != 0) {
586 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
587 if (ret < 0)
588 return ret;
589 offset = 0;
590 len -= seg;
591 data += seg;
592 ++gfn;
593 }
594 return 0;
595}
596EXPORT_SYMBOL_GPL(kvm_read_guest);
597
598int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
599 int offset, int len)
600{
e0506bcb
IE
601 int r;
602 unsigned long addr;
195aefde 603
e0506bcb
IE
604 addr = gfn_to_hva(kvm, gfn);
605 if (kvm_is_error_hva(addr))
606 return -EFAULT;
607 r = copy_to_user((void __user *)addr + offset, data, len);
608 if (r)
195aefde 609 return -EFAULT;
195aefde
IE
610 mark_page_dirty(kvm, gfn);
611 return 0;
612}
613EXPORT_SYMBOL_GPL(kvm_write_guest_page);
614
615int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
616 unsigned long len)
617{
618 gfn_t gfn = gpa >> PAGE_SHIFT;
619 int seg;
620 int offset = offset_in_page(gpa);
621 int ret;
622
623 while ((seg = next_segment(len, offset)) != 0) {
624 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
625 if (ret < 0)
626 return ret;
627 offset = 0;
628 len -= seg;
629 data += seg;
630 ++gfn;
631 }
632 return 0;
633}
634
635int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
636{
3e021bf5 637 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
195aefde
IE
638}
639EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
640
641int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
642{
643 gfn_t gfn = gpa >> PAGE_SHIFT;
644 int seg;
645 int offset = offset_in_page(gpa);
646 int ret;
647
648 while ((seg = next_segment(len, offset)) != 0) {
649 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
650 if (ret < 0)
651 return ret;
652 offset = 0;
653 len -= seg;
654 ++gfn;
655 }
656 return 0;
657}
658EXPORT_SYMBOL_GPL(kvm_clear_guest);
659
6aa8b732
AK
660void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
661{
31389947 662 struct kvm_memory_slot *memslot;
6aa8b732 663
3b6fff19 664 gfn = unalias_gfn(kvm, gfn);
7e9d619d
RR
665 memslot = __gfn_to_memslot(kvm, gfn);
666 if (memslot && memslot->dirty_bitmap) {
667 unsigned long rel_gfn = gfn - memslot->base_gfn;
6aa8b732 668
7e9d619d
RR
669 /* avoid RMW */
670 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
671 set_bit(rel_gfn, memslot->dirty_bitmap);
6aa8b732
AK
672 }
673}
674
b6958ce4
ED
675/*
676 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
677 */
8776e519 678void kvm_vcpu_block(struct kvm_vcpu *vcpu)
d3bef15f 679{
b6958ce4
ED
680 DECLARE_WAITQUEUE(wait, current);
681
682 add_wait_queue(&vcpu->wq, &wait);
683
684 /*
685 * We will block until either an interrupt or a signal wakes us up
686 */
c5ec1534
HQ
687 while (!kvm_cpu_has_interrupt(vcpu)
688 && !signal_pending(current)
689 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
690 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
b6958ce4
ED
691 set_current_state(TASK_INTERRUPTIBLE);
692 vcpu_put(vcpu);
693 schedule();
694 vcpu_load(vcpu);
695 }
d3bef15f 696
c5ec1534 697 __set_current_state(TASK_RUNNING);
b6958ce4 698 remove_wait_queue(&vcpu->wq, &wait);
b6958ce4
ED
699}
700
6aa8b732
AK
701void kvm_resched(struct kvm_vcpu *vcpu)
702{
3fca0365
YD
703 if (!need_resched())
704 return;
6aa8b732 705 cond_resched();
6aa8b732
AK
706}
707EXPORT_SYMBOL_GPL(kvm_resched);
708
bccf2150
AK
709static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
710 struct kvm_interrupt *irq)
6aa8b732 711{
6aa8b732
AK
712 if (irq->irq < 0 || irq->irq >= 256)
713 return -EINVAL;
97222cc8
ED
714 if (irqchip_in_kernel(vcpu->kvm))
715 return -ENXIO;
bccf2150 716 vcpu_load(vcpu);
6aa8b732
AK
717
718 set_bit(irq->irq, vcpu->irq_pending);
719 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
720
721 vcpu_put(vcpu);
722
723 return 0;
724}
725
9a2bb7f4
AK
726static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
727 unsigned long address,
728 int *type)
729{
730 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
731 unsigned long pgoff;
732 struct page *page;
733
9a2bb7f4 734 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
735 if (pgoff == 0)
736 page = virt_to_page(vcpu->run);
737 else if (pgoff == KVM_PIO_PAGE_OFFSET)
738 page = virt_to_page(vcpu->pio_data);
739 else
9a2bb7f4 740 return NOPAGE_SIGBUS;
9a2bb7f4 741 get_page(page);
cd0d9137
NAQ
742 if (type != NULL)
743 *type = VM_FAULT_MINOR;
744
9a2bb7f4
AK
745 return page;
746}
747
748static struct vm_operations_struct kvm_vcpu_vm_ops = {
749 .nopage = kvm_vcpu_nopage,
750};
751
752static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
753{
754 vma->vm_ops = &kvm_vcpu_vm_ops;
755 return 0;
756}
757
bccf2150
AK
758static int kvm_vcpu_release(struct inode *inode, struct file *filp)
759{
760 struct kvm_vcpu *vcpu = filp->private_data;
761
762 fput(vcpu->kvm->filp);
763 return 0;
764}
765
766static struct file_operations kvm_vcpu_fops = {
767 .release = kvm_vcpu_release,
768 .unlocked_ioctl = kvm_vcpu_ioctl,
769 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 770 .mmap = kvm_vcpu_mmap,
bccf2150
AK
771};
772
773/*
774 * Allocates an inode for the vcpu.
775 */
776static int create_vcpu_fd(struct kvm_vcpu *vcpu)
777{
778 int fd, r;
779 struct inode *inode;
780 struct file *file;
781
d6d28168
AK
782 r = anon_inode_getfd(&fd, &inode, &file,
783 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
784 if (r)
785 return r;
bccf2150 786 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 787 return fd;
bccf2150
AK
788}
789
c5ea7660
AK
790/*
791 * Creates some virtual cpus. Good luck creating more than one.
792 */
793static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
794{
795 int r;
796 struct kvm_vcpu *vcpu;
797
c5ea7660 798 if (!valid_vcpu(n))
fb3f0f51 799 return -EINVAL;
c5ea7660 800
e9b11c17 801 vcpu = kvm_arch_vcpu_create(kvm, n);
fb3f0f51
RR
802 if (IS_ERR(vcpu))
803 return PTR_ERR(vcpu);
c5ea7660 804
15ad7146
AK
805 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
806
11ec2804 807 mutex_lock(&kvm->lock);
fb3f0f51
RR
808 if (kvm->vcpus[n]) {
809 r = -EEXIST;
11ec2804 810 mutex_unlock(&kvm->lock);
e9b11c17 811 goto vcpu_destroy;
fb3f0f51
RR
812 }
813 kvm->vcpus[n] = vcpu;
11ec2804 814 mutex_unlock(&kvm->lock);
c5ea7660 815
fb3f0f51 816 /* Now it's all set up, let userspace reach it */
bccf2150
AK
817 r = create_vcpu_fd(vcpu);
818 if (r < 0)
fb3f0f51
RR
819 goto unlink;
820 return r;
39c3b86e 821
fb3f0f51 822unlink:
11ec2804 823 mutex_lock(&kvm->lock);
fb3f0f51 824 kvm->vcpus[n] = NULL;
11ec2804 825 mutex_unlock(&kvm->lock);
e9b11c17
ZX
826vcpu_destroy:
827 kvm_arch_vcpu_destory(vcpu);
c5ea7660
AK
828 return r;
829}
830
1961d276
AK
831static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
832{
833 if (sigset) {
834 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
835 vcpu->sigset_active = 1;
836 vcpu->sigset = *sigset;
837 } else
838 vcpu->sigset_active = 0;
839 return 0;
840}
841
bccf2150
AK
842static long kvm_vcpu_ioctl(struct file *filp,
843 unsigned int ioctl, unsigned long arg)
6aa8b732 844{
bccf2150 845 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 846 void __user *argp = (void __user *)arg;
313a3dc7 847 int r;
6aa8b732
AK
848
849 switch (ioctl) {
9a2bb7f4 850 case KVM_RUN:
f0fe5108
AK
851 r = -EINVAL;
852 if (arg)
853 goto out;
b6c7a5dc 854 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 855 break;
6aa8b732
AK
856 case KVM_GET_REGS: {
857 struct kvm_regs kvm_regs;
858
bccf2150 859 memset(&kvm_regs, 0, sizeof kvm_regs);
b6c7a5dc 860 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
861 if (r)
862 goto out;
863 r = -EFAULT;
2f366987 864 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
865 goto out;
866 r = 0;
867 break;
868 }
869 case KVM_SET_REGS: {
870 struct kvm_regs kvm_regs;
871
872 r = -EFAULT;
2f366987 873 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 874 goto out;
b6c7a5dc 875 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
876 if (r)
877 goto out;
878 r = 0;
879 break;
880 }
881 case KVM_GET_SREGS: {
882 struct kvm_sregs kvm_sregs;
883
bccf2150 884 memset(&kvm_sregs, 0, sizeof kvm_sregs);
b6c7a5dc 885 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
886 if (r)
887 goto out;
888 r = -EFAULT;
2f366987 889 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
890 goto out;
891 r = 0;
892 break;
893 }
894 case KVM_SET_SREGS: {
895 struct kvm_sregs kvm_sregs;
896
897 r = -EFAULT;
2f366987 898 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 899 goto out;
b6c7a5dc 900 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
901 if (r)
902 goto out;
903 r = 0;
904 break;
905 }
906 case KVM_TRANSLATE: {
907 struct kvm_translation tr;
908
909 r = -EFAULT;
2f366987 910 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 911 goto out;
8b006791 912 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
913 if (r)
914 goto out;
915 r = -EFAULT;
2f366987 916 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
917 goto out;
918 r = 0;
919 break;
920 }
921 case KVM_INTERRUPT: {
922 struct kvm_interrupt irq;
923
924 r = -EFAULT;
2f366987 925 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 926 goto out;
bccf2150 927 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
928 if (r)
929 goto out;
930 r = 0;
931 break;
932 }
933 case KVM_DEBUG_GUEST: {
934 struct kvm_debug_guest dbg;
935
936 r = -EFAULT;
2f366987 937 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 938 goto out;
b6c7a5dc 939 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
940 if (r)
941 goto out;
942 r = 0;
943 break;
944 }
1961d276
AK
945 case KVM_SET_SIGNAL_MASK: {
946 struct kvm_signal_mask __user *sigmask_arg = argp;
947 struct kvm_signal_mask kvm_sigmask;
948 sigset_t sigset, *p;
949
950 p = NULL;
951 if (argp) {
952 r = -EFAULT;
953 if (copy_from_user(&kvm_sigmask, argp,
954 sizeof kvm_sigmask))
955 goto out;
956 r = -EINVAL;
957 if (kvm_sigmask.len != sizeof sigset)
958 goto out;
959 r = -EFAULT;
960 if (copy_from_user(&sigset, sigmask_arg->sigset,
961 sizeof sigset))
962 goto out;
963 p = &sigset;
964 }
965 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
966 break;
967 }
b8836737
AK
968 case KVM_GET_FPU: {
969 struct kvm_fpu fpu;
970
971 memset(&fpu, 0, sizeof fpu);
d0752060 972 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
b8836737
AK
973 if (r)
974 goto out;
975 r = -EFAULT;
976 if (copy_to_user(argp, &fpu, sizeof fpu))
977 goto out;
978 r = 0;
979 break;
980 }
981 case KVM_SET_FPU: {
982 struct kvm_fpu fpu;
983
984 r = -EFAULT;
985 if (copy_from_user(&fpu, argp, sizeof fpu))
986 goto out;
d0752060 987 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
b8836737
AK
988 if (r)
989 goto out;
990 r = 0;
991 break;
992 }
bccf2150 993 default:
313a3dc7 994 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
bccf2150
AK
995 }
996out:
997 return r;
998}
999
1000static long kvm_vm_ioctl(struct file *filp,
1001 unsigned int ioctl, unsigned long arg)
1002{
1003 struct kvm *kvm = filp->private_data;
1004 void __user *argp = (void __user *)arg;
1fe779f8 1005 int r;
bccf2150
AK
1006
1007 switch (ioctl) {
1008 case KVM_CREATE_VCPU:
1009 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1010 if (r < 0)
1011 goto out;
1012 break;
6fc138d2
IE
1013 case KVM_SET_USER_MEMORY_REGION: {
1014 struct kvm_userspace_memory_region kvm_userspace_mem;
1015
1016 r = -EFAULT;
1017 if (copy_from_user(&kvm_userspace_mem, argp,
1018 sizeof kvm_userspace_mem))
1019 goto out;
1020
1021 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
6aa8b732
AK
1022 if (r)
1023 goto out;
1024 break;
1025 }
1026 case KVM_GET_DIRTY_LOG: {
1027 struct kvm_dirty_log log;
1028
1029 r = -EFAULT;
2f366987 1030 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 1031 goto out;
2c6f5df9 1032 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
1033 if (r)
1034 goto out;
1035 break;
1036 }
f17abe9a 1037 default:
1fe779f8 1038 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
f17abe9a
AK
1039 }
1040out:
1041 return r;
1042}
1043
1044static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
1045 unsigned long address,
1046 int *type)
1047{
1048 struct kvm *kvm = vma->vm_file->private_data;
1049 unsigned long pgoff;
f17abe9a
AK
1050 struct page *page;
1051
f17abe9a 1052 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
e0d62c7f
IE
1053 if (!kvm_is_visible_gfn(kvm, pgoff))
1054 return NOPAGE_SIGBUS;
aab61cc0
AL
1055 /* current->mm->mmap_sem is already held so call lockless version */
1056 page = __gfn_to_page(kvm, pgoff);
8a7ae055
IE
1057 if (is_error_page(page)) {
1058 kvm_release_page(page);
f17abe9a 1059 return NOPAGE_SIGBUS;
8a7ae055 1060 }
cd0d9137
NAQ
1061 if (type != NULL)
1062 *type = VM_FAULT_MINOR;
1063
f17abe9a
AK
1064 return page;
1065}
1066
1067static struct vm_operations_struct kvm_vm_vm_ops = {
1068 .nopage = kvm_vm_nopage,
1069};
1070
1071static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1072{
1073 vma->vm_ops = &kvm_vm_vm_ops;
1074 return 0;
1075}
1076
1077static struct file_operations kvm_vm_fops = {
1078 .release = kvm_vm_release,
1079 .unlocked_ioctl = kvm_vm_ioctl,
1080 .compat_ioctl = kvm_vm_ioctl,
1081 .mmap = kvm_vm_mmap,
1082};
1083
1084static int kvm_dev_ioctl_create_vm(void)
1085{
1086 int fd, r;
1087 struct inode *inode;
1088 struct file *file;
1089 struct kvm *kvm;
1090
f17abe9a 1091 kvm = kvm_create_vm();
d6d28168
AK
1092 if (IS_ERR(kvm))
1093 return PTR_ERR(kvm);
1094 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
1095 if (r) {
1096 kvm_destroy_vm(kvm);
1097 return r;
f17abe9a
AK
1098 }
1099
bccf2150 1100 kvm->filp = file;
f17abe9a 1101
f17abe9a 1102 return fd;
f17abe9a
AK
1103}
1104
1105static long kvm_dev_ioctl(struct file *filp,
1106 unsigned int ioctl, unsigned long arg)
1107{
1108 void __user *argp = (void __user *)arg;
07c45a36 1109 long r = -EINVAL;
f17abe9a
AK
1110
1111 switch (ioctl) {
1112 case KVM_GET_API_VERSION:
f0fe5108
AK
1113 r = -EINVAL;
1114 if (arg)
1115 goto out;
f17abe9a
AK
1116 r = KVM_API_VERSION;
1117 break;
1118 case KVM_CREATE_VM:
f0fe5108
AK
1119 r = -EINVAL;
1120 if (arg)
1121 goto out;
f17abe9a
AK
1122 r = kvm_dev_ioctl_create_vm();
1123 break;
018d00d2
ZX
1124 case KVM_CHECK_EXTENSION:
1125 r = kvm_dev_ioctl_check_extension((long)argp);
5d308f45 1126 break;
07c45a36
AK
1127 case KVM_GET_VCPU_MMAP_SIZE:
1128 r = -EINVAL;
1129 if (arg)
1130 goto out;
039576c0 1131 r = 2 * PAGE_SIZE;
07c45a36 1132 break;
6aa8b732 1133 default:
043405e1 1134 return kvm_arch_dev_ioctl(filp, ioctl, arg);
6aa8b732
AK
1135 }
1136out:
1137 return r;
1138}
1139
6aa8b732 1140static struct file_operations kvm_chardev_ops = {
6aa8b732
AK
1141 .unlocked_ioctl = kvm_dev_ioctl,
1142 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
1143};
1144
1145static struct miscdevice kvm_dev = {
bbe4432e 1146 KVM_MINOR,
6aa8b732
AK
1147 "kvm",
1148 &kvm_chardev_ops,
1149};
1150
1b6c0168
AK
1151static void hardware_enable(void *junk)
1152{
1153 int cpu = raw_smp_processor_id();
1154
1155 if (cpu_isset(cpu, cpus_hardware_enabled))
1156 return;
1157 cpu_set(cpu, cpus_hardware_enabled);
e9b11c17 1158 kvm_arch_hardware_enable(NULL);
1b6c0168
AK
1159}
1160
1161static void hardware_disable(void *junk)
1162{
1163 int cpu = raw_smp_processor_id();
1164
1165 if (!cpu_isset(cpu, cpus_hardware_enabled))
1166 return;
1167 cpu_clear(cpu, cpus_hardware_enabled);
1168 decache_vcpus_on_cpu(cpu);
e9b11c17 1169 kvm_arch_hardware_disable(NULL);
1b6c0168
AK
1170}
1171
774c47f1
AK
1172static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1173 void *v)
1174{
1175 int cpu = (long)v;
1176
1a6f4d7f 1177 val &= ~CPU_TASKS_FROZEN;
774c47f1 1178 switch (val) {
cec9ad27 1179 case CPU_DYING:
6ec8a856
AK
1180 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1181 cpu);
1182 hardware_disable(NULL);
1183 break;
774c47f1 1184 case CPU_UP_CANCELED:
43934a38
JK
1185 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1186 cpu);
1b6c0168 1187 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 1188 break;
43934a38
JK
1189 case CPU_ONLINE:
1190 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
1191 cpu);
1b6c0168 1192 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
1193 break;
1194 }
1195 return NOTIFY_OK;
1196}
1197
9a2b85c6 1198static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
d77c26fc 1199 void *v)
9a2b85c6
RR
1200{
1201 if (val == SYS_RESTART) {
1202 /*
1203 * Some (well, at least mine) BIOSes hang on reboot if
1204 * in vmx root mode.
1205 */
1206 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1207 on_each_cpu(hardware_disable, NULL, 0, 1);
1208 }
1209 return NOTIFY_OK;
1210}
1211
1212static struct notifier_block kvm_reboot_notifier = {
1213 .notifier_call = kvm_reboot,
1214 .priority = 0,
1215};
1216
2eeb2e94
GH
1217void kvm_io_bus_init(struct kvm_io_bus *bus)
1218{
1219 memset(bus, 0, sizeof(*bus));
1220}
1221
1222void kvm_io_bus_destroy(struct kvm_io_bus *bus)
1223{
1224 int i;
1225
1226 for (i = 0; i < bus->dev_count; i++) {
1227 struct kvm_io_device *pos = bus->devs[i];
1228
1229 kvm_iodevice_destructor(pos);
1230 }
1231}
1232
1233struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
1234{
1235 int i;
1236
1237 for (i = 0; i < bus->dev_count; i++) {
1238 struct kvm_io_device *pos = bus->devs[i];
1239
1240 if (pos->in_range(pos, addr))
1241 return pos;
1242 }
1243
1244 return NULL;
1245}
1246
1247void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
1248{
1249 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
1250
1251 bus->devs[bus->dev_count++] = dev;
1252}
1253
774c47f1
AK
1254static struct notifier_block kvm_cpu_notifier = {
1255 .notifier_call = kvm_cpu_hotplug,
1256 .priority = 20, /* must be > scheduler priority */
1257};
1258
ba1389b7
AK
1259static u64 vm_stat_get(void *_offset)
1260{
1261 unsigned offset = (long)_offset;
1262 u64 total = 0;
1263 struct kvm *kvm;
1264
1265 spin_lock(&kvm_lock);
1266 list_for_each_entry(kvm, &vm_list, vm_list)
1267 total += *(u32 *)((void *)kvm + offset);
1268 spin_unlock(&kvm_lock);
1269 return total;
1270}
1271
1272DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
1273
1274static u64 vcpu_stat_get(void *_offset)
1165f5fe
AK
1275{
1276 unsigned offset = (long)_offset;
1277 u64 total = 0;
1278 struct kvm *kvm;
1279 struct kvm_vcpu *vcpu;
1280 int i;
1281
1282 spin_lock(&kvm_lock);
1283 list_for_each_entry(kvm, &vm_list, vm_list)
1284 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
fb3f0f51
RR
1285 vcpu = kvm->vcpus[i];
1286 if (vcpu)
1287 total += *(u32 *)((void *)vcpu + offset);
1165f5fe
AK
1288 }
1289 spin_unlock(&kvm_lock);
1290 return total;
1291}
1292
ba1389b7
AK
1293DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
1294
1295static struct file_operations *stat_fops[] = {
1296 [KVM_STAT_VCPU] = &vcpu_stat_fops,
1297 [KVM_STAT_VM] = &vm_stat_fops,
1298};
1165f5fe 1299
a16b043c 1300static void kvm_init_debug(void)
6aa8b732
AK
1301{
1302 struct kvm_stats_debugfs_item *p;
1303
8b6d44c7 1304 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 1305 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
1306 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
1307 (void *)(long)p->offset,
ba1389b7 1308 stat_fops[p->kind]);
6aa8b732
AK
1309}
1310
1311static void kvm_exit_debug(void)
1312{
1313 struct kvm_stats_debugfs_item *p;
1314
1315 for (p = debugfs_entries; p->name; ++p)
1316 debugfs_remove(p->dentry);
1317 debugfs_remove(debugfs_dir);
1318}
1319
59ae6c6b
AK
1320static int kvm_suspend(struct sys_device *dev, pm_message_t state)
1321{
4267c41a 1322 hardware_disable(NULL);
59ae6c6b
AK
1323 return 0;
1324}
1325
1326static int kvm_resume(struct sys_device *dev)
1327{
4267c41a 1328 hardware_enable(NULL);
59ae6c6b
AK
1329 return 0;
1330}
1331
1332static struct sysdev_class kvm_sysdev_class = {
af5ca3f4 1333 .name = "kvm",
59ae6c6b
AK
1334 .suspend = kvm_suspend,
1335 .resume = kvm_resume,
1336};
1337
1338static struct sys_device kvm_sysdev = {
1339 .id = 0,
1340 .cls = &kvm_sysdev_class,
1341};
1342
cea7bb21 1343struct page *bad_page;
6aa8b732 1344
15ad7146
AK
1345static inline
1346struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
1347{
1348 return container_of(pn, struct kvm_vcpu, preempt_notifier);
1349}
1350
1351static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
1352{
1353 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1354
e9b11c17 1355 kvm_arch_vcpu_load(vcpu, cpu);
15ad7146
AK
1356}
1357
1358static void kvm_sched_out(struct preempt_notifier *pn,
1359 struct task_struct *next)
1360{
1361 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
1362
e9b11c17 1363 kvm_arch_vcpu_put(vcpu);
15ad7146
AK
1364}
1365
f8c16bba 1366int kvm_init(void *opaque, unsigned int vcpu_size,
c16f862d 1367 struct module *module)
6aa8b732
AK
1368{
1369 int r;
002c7f7c 1370 int cpu;
6aa8b732 1371
cb498ea2
ZX
1372 kvm_init_debug();
1373
f8c16bba
ZX
1374 r = kvm_arch_init(opaque);
1375 if (r)
1376 goto out4;
cb498ea2
ZX
1377
1378 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1379
1380 if (bad_page == NULL) {
1381 r = -ENOMEM;
1382 goto out;
1383 }
1384
e9b11c17 1385 r = kvm_arch_hardware_setup();
6aa8b732 1386 if (r < 0)
ca45aaae 1387 goto out;
6aa8b732 1388
002c7f7c
YS
1389 for_each_online_cpu(cpu) {
1390 smp_call_function_single(cpu,
e9b11c17 1391 kvm_arch_check_processor_compat,
002c7f7c
YS
1392 &r, 0, 1);
1393 if (r < 0)
1394 goto out_free_0;
1395 }
1396
1b6c0168 1397 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
1398 r = register_cpu_notifier(&kvm_cpu_notifier);
1399 if (r)
1400 goto out_free_1;
6aa8b732
AK
1401 register_reboot_notifier(&kvm_reboot_notifier);
1402
59ae6c6b
AK
1403 r = sysdev_class_register(&kvm_sysdev_class);
1404 if (r)
1405 goto out_free_2;
1406
1407 r = sysdev_register(&kvm_sysdev);
1408 if (r)
1409 goto out_free_3;
1410
c16f862d
RR
1411 /* A kmem cache lets us meet the alignment requirements of fx_save. */
1412 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
56919c5c
JP
1413 __alignof__(struct kvm_vcpu),
1414 0, NULL);
c16f862d
RR
1415 if (!kvm_vcpu_cache) {
1416 r = -ENOMEM;
1417 goto out_free_4;
1418 }
1419
6aa8b732
AK
1420 kvm_chardev_ops.owner = module;
1421
1422 r = misc_register(&kvm_dev);
1423 if (r) {
d77c26fc 1424 printk(KERN_ERR "kvm: misc device register failed\n");
6aa8b732
AK
1425 goto out_free;
1426 }
1427
15ad7146
AK
1428 kvm_preempt_ops.sched_in = kvm_sched_in;
1429 kvm_preempt_ops.sched_out = kvm_sched_out;
1430
c7addb90 1431 return 0;
6aa8b732
AK
1432
1433out_free:
c16f862d
RR
1434 kmem_cache_destroy(kvm_vcpu_cache);
1435out_free_4:
59ae6c6b
AK
1436 sysdev_unregister(&kvm_sysdev);
1437out_free_3:
1438 sysdev_class_unregister(&kvm_sysdev_class);
1439out_free_2:
6aa8b732 1440 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
1441 unregister_cpu_notifier(&kvm_cpu_notifier);
1442out_free_1:
1b6c0168 1443 on_each_cpu(hardware_disable, NULL, 0, 1);
002c7f7c 1444out_free_0:
e9b11c17 1445 kvm_arch_hardware_unsetup();
ca45aaae 1446out:
f8c16bba 1447 kvm_arch_exit();
cb498ea2 1448 kvm_exit_debug();
cb498ea2 1449out4:
6aa8b732
AK
1450 return r;
1451}
cb498ea2 1452EXPORT_SYMBOL_GPL(kvm_init);
6aa8b732 1453
cb498ea2 1454void kvm_exit(void)
6aa8b732
AK
1455{
1456 misc_deregister(&kvm_dev);
c16f862d 1457 kmem_cache_destroy(kvm_vcpu_cache);
59ae6c6b
AK
1458 sysdev_unregister(&kvm_sysdev);
1459 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 1460 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 1461 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 1462 on_each_cpu(hardware_disable, NULL, 0, 1);
e9b11c17 1463 kvm_arch_hardware_unsetup();
f8c16bba 1464 kvm_arch_exit();
6aa8b732 1465 kvm_exit_debug();
cea7bb21 1466 __free_page(bad_page);
6aa8b732 1467}
cb498ea2 1468EXPORT_SYMBOL_GPL(kvm_exit);