KVM: Trivial: Comment spelling may escape grep
[linux-2.6-block.git] / drivers / kvm / kvm_main.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
e495606d
AK
19#include "x86_emulate.h"
20#include "segment_descriptor.h"
6aa8b732
AK
21
22#include <linux/kvm.h>
23#include <linux/module.h>
24#include <linux/errno.h>
6aa8b732
AK
25#include <linux/percpu.h>
26#include <linux/gfp.h>
6aa8b732
AK
27#include <linux/mm.h>
28#include <linux/miscdevice.h>
29#include <linux/vmalloc.h>
6aa8b732 30#include <linux/reboot.h>
6aa8b732
AK
31#include <linux/debugfs.h>
32#include <linux/highmem.h>
33#include <linux/file.h>
59ae6c6b 34#include <linux/sysdev.h>
774c47f1 35#include <linux/cpu.h>
e8edc6e0 36#include <linux/sched.h>
d9e368d6
AK
37#include <linux/cpumask.h>
38#include <linux/smp.h>
d6d28168 39#include <linux/anon_inodes.h>
6aa8b732 40
e495606d
AK
41#include <asm/processor.h>
42#include <asm/msr.h>
43#include <asm/io.h>
44#include <asm/uaccess.h>
45#include <asm/desc.h>
6aa8b732
AK
46
47MODULE_AUTHOR("Qumranet");
48MODULE_LICENSE("GPL");
49
133de902
AK
50static DEFINE_SPINLOCK(kvm_lock);
51static LIST_HEAD(vm_list);
52
1b6c0168
AK
53static cpumask_t cpus_hardware_enabled;
54
6aa8b732 55struct kvm_arch_ops *kvm_arch_ops;
1165f5fe 56
1b6c0168
AK
57static void hardware_disable(void *ignored);
58
1165f5fe 59#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
6aa8b732
AK
60
61static struct kvm_stats_debugfs_item {
62 const char *name;
1165f5fe 63 int offset;
6aa8b732
AK
64 struct dentry *dentry;
65} debugfs_entries[] = {
1165f5fe
AK
66 { "pf_fixed", STAT_OFFSET(pf_fixed) },
67 { "pf_guest", STAT_OFFSET(pf_guest) },
68 { "tlb_flush", STAT_OFFSET(tlb_flush) },
69 { "invlpg", STAT_OFFSET(invlpg) },
70 { "exits", STAT_OFFSET(exits) },
71 { "io_exits", STAT_OFFSET(io_exits) },
72 { "mmio_exits", STAT_OFFSET(mmio_exits) },
73 { "signal_exits", STAT_OFFSET(signal_exits) },
74 { "irq_window", STAT_OFFSET(irq_window_exits) },
75 { "halt_exits", STAT_OFFSET(halt_exits) },
76 { "request_irq", STAT_OFFSET(request_irq_exits) },
77 { "irq_exits", STAT_OFFSET(irq_exits) },
e6adf283 78 { "light_exits", STAT_OFFSET(light_exits) },
2cc51560 79 { "efer_reload", STAT_OFFSET(efer_reload) },
1165f5fe 80 { NULL }
6aa8b732
AK
81};
82
83static struct dentry *debugfs_dir;
84
85#define MAX_IO_MSRS 256
86
87#define CR0_RESEVED_BITS 0xffffffff1ffaffc0ULL
88#define LMSW_GUEST_MASK 0x0eULL
89#define CR4_RESEVED_BITS (~((1ULL << 11) - 1))
90#define CR8_RESEVED_BITS (~0x0fULL)
91#define EFER_RESERVED_BITS 0xfffffffffffff2fe
92
05b3e0c2 93#ifdef CONFIG_X86_64
6aa8b732
AK
94// LDT or TSS descriptor in the GDT. 16 bytes.
95struct segment_descriptor_64 {
96 struct segment_descriptor s;
97 u32 base_higher;
98 u32 pad_zero;
99};
100
101#endif
102
bccf2150
AK
103static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
104 unsigned long arg);
105
6aa8b732
AK
106unsigned long segment_base(u16 selector)
107{
108 struct descriptor_table gdt;
109 struct segment_descriptor *d;
110 unsigned long table_base;
111 typedef unsigned long ul;
112 unsigned long v;
113
114 if (selector == 0)
115 return 0;
116
117 asm ("sgdt %0" : "=m"(gdt));
118 table_base = gdt.base;
119
120 if (selector & 4) { /* from ldt */
121 u16 ldt_selector;
122
123 asm ("sldt %0" : "=g"(ldt_selector));
124 table_base = segment_base(ldt_selector);
125 }
126 d = (struct segment_descriptor *)(table_base + (selector & ~7));
127 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
05b3e0c2 128#ifdef CONFIG_X86_64
6aa8b732
AK
129 if (d->system == 0
130 && (d->type == 2 || d->type == 9 || d->type == 11))
131 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
132#endif
133 return v;
134}
135EXPORT_SYMBOL_GPL(segment_base);
136
5aacf0ca
JM
137static inline int valid_vcpu(int n)
138{
139 return likely(n >= 0 && n < KVM_MAX_VCPUS);
140}
141
d27d4aca
AK
142int kvm_read_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
143 void *dest)
6aa8b732
AK
144{
145 unsigned char *host_buf = dest;
146 unsigned long req_size = size;
147
148 while (size) {
149 hpa_t paddr;
150 unsigned now;
151 unsigned offset;
152 hva_t guest_buf;
153
154 paddr = gva_to_hpa(vcpu, addr);
155
156 if (is_error_hpa(paddr))
157 break;
158
159 guest_buf = (hva_t)kmap_atomic(
160 pfn_to_page(paddr >> PAGE_SHIFT),
161 KM_USER0);
162 offset = addr & ~PAGE_MASK;
163 guest_buf |= offset;
164 now = min(size, PAGE_SIZE - offset);
165 memcpy(host_buf, (void*)guest_buf, now);
166 host_buf += now;
167 addr += now;
168 size -= now;
169 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
170 }
171 return req_size - size;
172}
173EXPORT_SYMBOL_GPL(kvm_read_guest);
174
d27d4aca
AK
175int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
176 void *data)
6aa8b732
AK
177{
178 unsigned char *host_buf = data;
179 unsigned long req_size = size;
180
181 while (size) {
182 hpa_t paddr;
183 unsigned now;
184 unsigned offset;
185 hva_t guest_buf;
ab51a434 186 gfn_t gfn;
6aa8b732
AK
187
188 paddr = gva_to_hpa(vcpu, addr);
189
190 if (is_error_hpa(paddr))
191 break;
192
ab51a434
UL
193 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
194 mark_page_dirty(vcpu->kvm, gfn);
6aa8b732
AK
195 guest_buf = (hva_t)kmap_atomic(
196 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
197 offset = addr & ~PAGE_MASK;
198 guest_buf |= offset;
199 now = min(size, PAGE_SIZE - offset);
200 memcpy((void*)guest_buf, host_buf, now);
201 host_buf += now;
202 addr += now;
203 size -= now;
204 kunmap_atomic((void *)(guest_buf & PAGE_MASK), KM_USER0);
205 }
206 return req_size - size;
207}
208EXPORT_SYMBOL_GPL(kvm_write_guest);
209
7702fd1f
AK
210void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
211{
212 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
213 return;
214
215 vcpu->guest_fpu_loaded = 1;
216 fx_save(vcpu->host_fx_image);
217 fx_restore(vcpu->guest_fx_image);
218}
219EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
220
221void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
222{
223 if (!vcpu->guest_fpu_loaded)
224 return;
225
226 vcpu->guest_fpu_loaded = 0;
227 fx_save(vcpu->guest_fx_image);
228 fx_restore(vcpu->host_fx_image);
229}
230EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
231
bccf2150
AK
232/*
233 * Switches to specified vcpu, until a matching vcpu_put()
234 */
235static void vcpu_load(struct kvm_vcpu *vcpu)
6aa8b732 236{
bccf2150
AK
237 mutex_lock(&vcpu->mutex);
238 kvm_arch_ops->vcpu_load(vcpu);
6aa8b732
AK
239}
240
6aa8b732
AK
241static void vcpu_put(struct kvm_vcpu *vcpu)
242{
243 kvm_arch_ops->vcpu_put(vcpu);
6aa8b732
AK
244 mutex_unlock(&vcpu->mutex);
245}
246
d9e368d6
AK
247static void ack_flush(void *_completed)
248{
249 atomic_t *completed = _completed;
250
251 atomic_inc(completed);
252}
253
254void kvm_flush_remote_tlbs(struct kvm *kvm)
255{
256 int i, cpu, needed;
257 cpumask_t cpus;
258 struct kvm_vcpu *vcpu;
259 atomic_t completed;
260
261 atomic_set(&completed, 0);
262 cpus_clear(cpus);
263 needed = 0;
264 for (i = 0; i < kvm->nvcpus; ++i) {
265 vcpu = &kvm->vcpus[i];
266 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
267 continue;
268 cpu = vcpu->cpu;
269 if (cpu != -1 && cpu != raw_smp_processor_id())
270 if (!cpu_isset(cpu, cpus)) {
271 cpu_set(cpu, cpus);
272 ++needed;
273 }
274 }
275
276 /*
277 * We really want smp_call_function_mask() here. But that's not
278 * available, so ipi all cpus in parallel and wait for them
279 * to complete.
280 */
281 for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
282 smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
283 while (atomic_read(&completed) != needed) {
284 cpu_relax();
285 barrier();
286 }
287}
288
f17abe9a 289static struct kvm *kvm_create_vm(void)
6aa8b732
AK
290{
291 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
292 int i;
293
294 if (!kvm)
f17abe9a 295 return ERR_PTR(-ENOMEM);
6aa8b732 296
74906345 297 kvm_io_bus_init(&kvm->pio_bus);
6aa8b732
AK
298 spin_lock_init(&kvm->lock);
299 INIT_LIST_HEAD(&kvm->active_mmu_pages);
2eeb2e94 300 kvm_io_bus_init(&kvm->mmio_bus);
6aa8b732
AK
301 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
302 struct kvm_vcpu *vcpu = &kvm->vcpus[i];
303
304 mutex_init(&vcpu->mutex);
133de902 305 vcpu->cpu = -1;
86a2b42e 306 vcpu->kvm = kvm;
6aa8b732 307 vcpu->mmu.root_hpa = INVALID_PAGE;
6aa8b732 308 }
5e58cfe4
RR
309 spin_lock(&kvm_lock);
310 list_add(&kvm->vm_list, &vm_list);
311 spin_unlock(&kvm_lock);
f17abe9a
AK
312 return kvm;
313}
314
315static int kvm_dev_open(struct inode *inode, struct file *filp)
316{
6aa8b732
AK
317 return 0;
318}
319
320/*
321 * Free any memory in @free but not in @dont.
322 */
323static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
324 struct kvm_memory_slot *dont)
325{
326 int i;
327
328 if (!dont || free->phys_mem != dont->phys_mem)
329 if (free->phys_mem) {
330 for (i = 0; i < free->npages; ++i)
55a54f79
AK
331 if (free->phys_mem[i])
332 __free_page(free->phys_mem[i]);
6aa8b732
AK
333 vfree(free->phys_mem);
334 }
335
336 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
337 vfree(free->dirty_bitmap);
338
8b6d44c7 339 free->phys_mem = NULL;
6aa8b732 340 free->npages = 0;
8b6d44c7 341 free->dirty_bitmap = NULL;
6aa8b732
AK
342}
343
344static void kvm_free_physmem(struct kvm *kvm)
345{
346 int i;
347
348 for (i = 0; i < kvm->nmemslots; ++i)
8b6d44c7 349 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
6aa8b732
AK
350}
351
039576c0
AK
352static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
353{
354 int i;
355
356 for (i = 0; i < 2; ++i)
357 if (vcpu->pio.guest_pages[i]) {
358 __free_page(vcpu->pio.guest_pages[i]);
359 vcpu->pio.guest_pages[i] = NULL;
360 }
361}
362
7b53aa56
AK
363static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
364{
365 if (!vcpu->vmcs)
366 return;
367
368 vcpu_load(vcpu);
369 kvm_mmu_unload(vcpu);
370 vcpu_put(vcpu);
371}
372
6aa8b732
AK
373static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
374{
bccf2150 375 if (!vcpu->vmcs)
1e8ba6fb
IM
376 return;
377
bccf2150 378 vcpu_load(vcpu);
6aa8b732 379 kvm_mmu_destroy(vcpu);
08438475 380 vcpu_put(vcpu);
9ede74e0 381 kvm_arch_ops->vcpu_free(vcpu);
9a2bb7f4
AK
382 free_page((unsigned long)vcpu->run);
383 vcpu->run = NULL;
039576c0
AK
384 free_page((unsigned long)vcpu->pio_data);
385 vcpu->pio_data = NULL;
386 free_pio_guest_pages(vcpu);
6aa8b732
AK
387}
388
389static void kvm_free_vcpus(struct kvm *kvm)
390{
391 unsigned int i;
392
7b53aa56
AK
393 /*
394 * Unpin any mmu pages first.
395 */
396 for (i = 0; i < KVM_MAX_VCPUS; ++i)
397 kvm_unload_vcpu_mmu(&kvm->vcpus[i]);
6aa8b732
AK
398 for (i = 0; i < KVM_MAX_VCPUS; ++i)
399 kvm_free_vcpu(&kvm->vcpus[i]);
400}
401
402static int kvm_dev_release(struct inode *inode, struct file *filp)
403{
f17abe9a
AK
404 return 0;
405}
6aa8b732 406
f17abe9a
AK
407static void kvm_destroy_vm(struct kvm *kvm)
408{
133de902
AK
409 spin_lock(&kvm_lock);
410 list_del(&kvm->vm_list);
411 spin_unlock(&kvm_lock);
74906345 412 kvm_io_bus_destroy(&kvm->pio_bus);
2eeb2e94 413 kvm_io_bus_destroy(&kvm->mmio_bus);
6aa8b732
AK
414 kvm_free_vcpus(kvm);
415 kvm_free_physmem(kvm);
416 kfree(kvm);
f17abe9a
AK
417}
418
419static int kvm_vm_release(struct inode *inode, struct file *filp)
420{
421 struct kvm *kvm = filp->private_data;
422
423 kvm_destroy_vm(kvm);
6aa8b732
AK
424 return 0;
425}
426
427static void inject_gp(struct kvm_vcpu *vcpu)
428{
429 kvm_arch_ops->inject_gp(vcpu, 0);
430}
431
1342d353
AK
432/*
433 * Load the pae pdptrs. Return true is they are all valid.
434 */
435static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
6aa8b732
AK
436{
437 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
1342d353 438 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
6aa8b732
AK
439 int i;
440 u64 pdpte;
441 u64 *pdpt;
1342d353 442 int ret;
954bbbc2 443 struct page *page;
6aa8b732
AK
444
445 spin_lock(&vcpu->kvm->lock);
954bbbc2
AK
446 page = gfn_to_page(vcpu->kvm, pdpt_gfn);
447 /* FIXME: !page - emulate? 0xff? */
448 pdpt = kmap_atomic(page, KM_USER0);
6aa8b732 449
1342d353 450 ret = 1;
6aa8b732
AK
451 for (i = 0; i < 4; ++i) {
452 pdpte = pdpt[offset + i];
1342d353
AK
453 if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
454 ret = 0;
455 goto out;
456 }
6aa8b732
AK
457 }
458
1342d353
AK
459 for (i = 0; i < 4; ++i)
460 vcpu->pdptrs[i] = pdpt[offset + i];
461
462out:
6aa8b732
AK
463 kunmap_atomic(pdpt, KM_USER0);
464 spin_unlock(&vcpu->kvm->lock);
465
1342d353 466 return ret;
6aa8b732
AK
467}
468
469void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
470{
471 if (cr0 & CR0_RESEVED_BITS) {
472 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
473 cr0, vcpu->cr0);
474 inject_gp(vcpu);
475 return;
476 }
477
478 if ((cr0 & CR0_NW_MASK) && !(cr0 & CR0_CD_MASK)) {
479 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
480 inject_gp(vcpu);
481 return;
482 }
483
484 if ((cr0 & CR0_PG_MASK) && !(cr0 & CR0_PE_MASK)) {
485 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
486 "and a clear PE flag\n");
487 inject_gp(vcpu);
488 return;
489 }
490
491 if (!is_paging(vcpu) && (cr0 & CR0_PG_MASK)) {
05b3e0c2 492#ifdef CONFIG_X86_64
6aa8b732
AK
493 if ((vcpu->shadow_efer & EFER_LME)) {
494 int cs_db, cs_l;
495
496 if (!is_pae(vcpu)) {
497 printk(KERN_DEBUG "set_cr0: #GP, start paging "
498 "in long mode while PAE is disabled\n");
499 inject_gp(vcpu);
500 return;
501 }
502 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
503 if (cs_l) {
504 printk(KERN_DEBUG "set_cr0: #GP, start paging "
505 "in long mode while CS.L == 1\n");
506 inject_gp(vcpu);
507 return;
508
509 }
510 } else
511#endif
1342d353 512 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
6aa8b732
AK
513 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
514 "reserved bits\n");
515 inject_gp(vcpu);
516 return;
517 }
518
519 }
520
521 kvm_arch_ops->set_cr0(vcpu, cr0);
522 vcpu->cr0 = cr0;
523
524 spin_lock(&vcpu->kvm->lock);
525 kvm_mmu_reset_context(vcpu);
526 spin_unlock(&vcpu->kvm->lock);
527 return;
528}
529EXPORT_SYMBOL_GPL(set_cr0);
530
531void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
532{
533 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
534}
535EXPORT_SYMBOL_GPL(lmsw);
536
537void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
538{
539 if (cr4 & CR4_RESEVED_BITS) {
540 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
541 inject_gp(vcpu);
542 return;
543 }
544
a9058ecd 545 if (is_long_mode(vcpu)) {
6aa8b732
AK
546 if (!(cr4 & CR4_PAE_MASK)) {
547 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
548 "in long mode\n");
549 inject_gp(vcpu);
550 return;
551 }
552 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK)
1342d353 553 && !load_pdptrs(vcpu, vcpu->cr3)) {
6aa8b732
AK
554 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
555 inject_gp(vcpu);
556 }
557
558 if (cr4 & CR4_VMXE_MASK) {
559 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
560 inject_gp(vcpu);
561 return;
562 }
563 kvm_arch_ops->set_cr4(vcpu, cr4);
564 spin_lock(&vcpu->kvm->lock);
565 kvm_mmu_reset_context(vcpu);
566 spin_unlock(&vcpu->kvm->lock);
567}
568EXPORT_SYMBOL_GPL(set_cr4);
569
570void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
571{
a9058ecd 572 if (is_long_mode(vcpu)) {
d27d4aca 573 if (cr3 & CR3_L_MODE_RESEVED_BITS) {
6aa8b732
AK
574 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
575 inject_gp(vcpu);
576 return;
577 }
578 } else {
579 if (cr3 & CR3_RESEVED_BITS) {
580 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
581 inject_gp(vcpu);
582 return;
583 }
584 if (is_paging(vcpu) && is_pae(vcpu) &&
1342d353 585 !load_pdptrs(vcpu, cr3)) {
6aa8b732
AK
586 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
587 "reserved bits\n");
588 inject_gp(vcpu);
589 return;
590 }
591 }
592
593 vcpu->cr3 = cr3;
594 spin_lock(&vcpu->kvm->lock);
d21225ee
IM
595 /*
596 * Does the new cr3 value map to physical memory? (Note, we
597 * catch an invalid cr3 even in real-mode, because it would
598 * cause trouble later on when we turn on paging anyway.)
599 *
600 * A real CPU would silently accept an invalid cr3 and would
601 * attempt to use it - with largely undefined (and often hard
602 * to debug) behavior on the guest side.
603 */
604 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
605 inject_gp(vcpu);
606 else
607 vcpu->mmu.new_cr3(vcpu);
6aa8b732
AK
608 spin_unlock(&vcpu->kvm->lock);
609}
610EXPORT_SYMBOL_GPL(set_cr3);
611
612void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
613{
614 if ( cr8 & CR8_RESEVED_BITS) {
615 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
616 inject_gp(vcpu);
617 return;
618 }
619 vcpu->cr8 = cr8;
620}
621EXPORT_SYMBOL_GPL(set_cr8);
622
623void fx_init(struct kvm_vcpu *vcpu)
624{
625 struct __attribute__ ((__packed__)) fx_image_s {
626 u16 control; //fcw
627 u16 status; //fsw
628 u16 tag; // ftw
629 u16 opcode; //fop
630 u64 ip; // fpu ip
631 u64 operand;// fpu dp
632 u32 mxcsr;
633 u32 mxcsr_mask;
634
635 } *fx_image;
636
637 fx_save(vcpu->host_fx_image);
638 fpu_init();
639 fx_save(vcpu->guest_fx_image);
640 fx_restore(vcpu->host_fx_image);
641
642 fx_image = (struct fx_image_s *)vcpu->guest_fx_image;
643 fx_image->mxcsr = 0x1f80;
644 memset(vcpu->guest_fx_image + sizeof(struct fx_image_s),
645 0, FX_IMAGE_SIZE - sizeof(struct fx_image_s));
646}
647EXPORT_SYMBOL_GPL(fx_init);
648
6aa8b732
AK
649/*
650 * Allocate some memory and give it an address in the guest physical address
651 * space.
652 *
653 * Discontiguous memory is allowed, mostly for framebuffers.
654 */
2c6f5df9
AK
655static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
656 struct kvm_memory_region *mem)
6aa8b732
AK
657{
658 int r;
659 gfn_t base_gfn;
660 unsigned long npages;
661 unsigned long i;
662 struct kvm_memory_slot *memslot;
663 struct kvm_memory_slot old, new;
664 int memory_config_version;
665
666 r = -EINVAL;
667 /* General sanity checks */
668 if (mem->memory_size & (PAGE_SIZE - 1))
669 goto out;
670 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
671 goto out;
672 if (mem->slot >= KVM_MEMORY_SLOTS)
673 goto out;
674 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
675 goto out;
676
677 memslot = &kvm->memslots[mem->slot];
678 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
679 npages = mem->memory_size >> PAGE_SHIFT;
680
681 if (!npages)
682 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
683
684raced:
685 spin_lock(&kvm->lock);
686
687 memory_config_version = kvm->memory_config_version;
688 new = old = *memslot;
689
690 new.base_gfn = base_gfn;
691 new.npages = npages;
692 new.flags = mem->flags;
693
694 /* Disallow changing a memory slot's size. */
695 r = -EINVAL;
696 if (npages && old.npages && npages != old.npages)
697 goto out_unlock;
698
699 /* Check for overlaps */
700 r = -EEXIST;
701 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
702 struct kvm_memory_slot *s = &kvm->memslots[i];
703
704 if (s == memslot)
705 continue;
706 if (!((base_gfn + npages <= s->base_gfn) ||
707 (base_gfn >= s->base_gfn + s->npages)))
708 goto out_unlock;
709 }
710 /*
711 * Do memory allocations outside lock. memory_config_version will
712 * detect any races.
713 */
714 spin_unlock(&kvm->lock);
715
716 /* Deallocate if slot is being removed */
717 if (!npages)
8b6d44c7 718 new.phys_mem = NULL;
6aa8b732
AK
719
720 /* Free page dirty bitmap if unneeded */
721 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
8b6d44c7 722 new.dirty_bitmap = NULL;
6aa8b732
AK
723
724 r = -ENOMEM;
725
726 /* Allocate if a slot is being created */
727 if (npages && !new.phys_mem) {
728 new.phys_mem = vmalloc(npages * sizeof(struct page *));
729
730 if (!new.phys_mem)
731 goto out_free;
732
733 memset(new.phys_mem, 0, npages * sizeof(struct page *));
734 for (i = 0; i < npages; ++i) {
735 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
736 | __GFP_ZERO);
737 if (!new.phys_mem[i])
738 goto out_free;
5972e953 739 set_page_private(new.phys_mem[i],0);
6aa8b732
AK
740 }
741 }
742
743 /* Allocate page dirty bitmap if needed */
744 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
745 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
746
747 new.dirty_bitmap = vmalloc(dirty_bytes);
748 if (!new.dirty_bitmap)
749 goto out_free;
750 memset(new.dirty_bitmap, 0, dirty_bytes);
751 }
752
753 spin_lock(&kvm->lock);
754
755 if (memory_config_version != kvm->memory_config_version) {
756 spin_unlock(&kvm->lock);
757 kvm_free_physmem_slot(&new, &old);
758 goto raced;
759 }
760
761 r = -EAGAIN;
762 if (kvm->busy)
763 goto out_unlock;
764
765 if (mem->slot >= kvm->nmemslots)
766 kvm->nmemslots = mem->slot + 1;
767
768 *memslot = new;
769 ++kvm->memory_config_version;
770
90cb0529
AK
771 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
772 kvm_flush_remote_tlbs(kvm);
6aa8b732 773
90cb0529 774 spin_unlock(&kvm->lock);
6aa8b732
AK
775
776 kvm_free_physmem_slot(&old, &new);
777 return 0;
778
779out_unlock:
780 spin_unlock(&kvm->lock);
781out_free:
782 kvm_free_physmem_slot(&new, &old);
783out:
784 return r;
785}
786
787/*
788 * Get (and clear) the dirty memory log for a memory slot.
789 */
2c6f5df9
AK
790static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
791 struct kvm_dirty_log *log)
6aa8b732
AK
792{
793 struct kvm_memory_slot *memslot;
794 int r, i;
795 int n;
796 unsigned long any = 0;
797
798 spin_lock(&kvm->lock);
799
800 /*
801 * Prevent changes to guest memory configuration even while the lock
802 * is not taken.
803 */
804 ++kvm->busy;
805 spin_unlock(&kvm->lock);
806 r = -EINVAL;
807 if (log->slot >= KVM_MEMORY_SLOTS)
808 goto out;
809
810 memslot = &kvm->memslots[log->slot];
811 r = -ENOENT;
812 if (!memslot->dirty_bitmap)
813 goto out;
814
cd1a4a98 815 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
6aa8b732 816
cd1a4a98 817 for (i = 0; !any && i < n/sizeof(long); ++i)
6aa8b732
AK
818 any = memslot->dirty_bitmap[i];
819
820 r = -EFAULT;
821 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
822 goto out;
823
90cb0529
AK
824 spin_lock(&kvm->lock);
825 kvm_mmu_slot_remove_write_access(kvm, log->slot);
826 kvm_flush_remote_tlbs(kvm);
827 memset(memslot->dirty_bitmap, 0, n);
828 spin_unlock(&kvm->lock);
6aa8b732
AK
829
830 r = 0;
831
832out:
833 spin_lock(&kvm->lock);
834 --kvm->busy;
835 spin_unlock(&kvm->lock);
836 return r;
837}
838
e8207547
AK
839/*
840 * Set a new alias region. Aliases map a portion of physical memory into
841 * another portion. This is useful for memory windows, for example the PC
842 * VGA region.
843 */
844static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
845 struct kvm_memory_alias *alias)
846{
847 int r, n;
848 struct kvm_mem_alias *p;
849
850 r = -EINVAL;
851 /* General sanity checks */
852 if (alias->memory_size & (PAGE_SIZE - 1))
853 goto out;
854 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
855 goto out;
856 if (alias->slot >= KVM_ALIAS_SLOTS)
857 goto out;
858 if (alias->guest_phys_addr + alias->memory_size
859 < alias->guest_phys_addr)
860 goto out;
861 if (alias->target_phys_addr + alias->memory_size
862 < alias->target_phys_addr)
863 goto out;
864
865 spin_lock(&kvm->lock);
866
867 p = &kvm->aliases[alias->slot];
868 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
869 p->npages = alias->memory_size >> PAGE_SHIFT;
870 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
871
872 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
873 if (kvm->aliases[n - 1].npages)
874 break;
875 kvm->naliases = n;
876
90cb0529 877 kvm_mmu_zap_all(kvm);
e8207547 878
e8207547 879 spin_unlock(&kvm->lock);
e8207547
AK
880
881 return 0;
882
883out:
884 return r;
885}
886
887static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
888{
889 int i;
890 struct kvm_mem_alias *alias;
891
892 for (i = 0; i < kvm->naliases; ++i) {
893 alias = &kvm->aliases[i];
894 if (gfn >= alias->base_gfn
895 && gfn < alias->base_gfn + alias->npages)
896 return alias->target_gfn + gfn - alias->base_gfn;
897 }
898 return gfn;
899}
900
901static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
6aa8b732
AK
902{
903 int i;
904
905 for (i = 0; i < kvm->nmemslots; ++i) {
906 struct kvm_memory_slot *memslot = &kvm->memslots[i];
907
908 if (gfn >= memslot->base_gfn
909 && gfn < memslot->base_gfn + memslot->npages)
910 return memslot;
911 }
8b6d44c7 912 return NULL;
6aa8b732 913}
e8207547
AK
914
915struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
916{
917 gfn = unalias_gfn(kvm, gfn);
918 return __gfn_to_memslot(kvm, gfn);
919}
6aa8b732 920
954bbbc2
AK
921struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
922{
923 struct kvm_memory_slot *slot;
924
e8207547
AK
925 gfn = unalias_gfn(kvm, gfn);
926 slot = __gfn_to_memslot(kvm, gfn);
954bbbc2
AK
927 if (!slot)
928 return NULL;
929 return slot->phys_mem[gfn - slot->base_gfn];
930}
931EXPORT_SYMBOL_GPL(gfn_to_page);
932
6aa8b732
AK
933void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
934{
935 int i;
31389947 936 struct kvm_memory_slot *memslot;
6aa8b732
AK
937 unsigned long rel_gfn;
938
939 for (i = 0; i < kvm->nmemslots; ++i) {
940 memslot = &kvm->memslots[i];
941
942 if (gfn >= memslot->base_gfn
943 && gfn < memslot->base_gfn + memslot->npages) {
944
31389947 945 if (!memslot->dirty_bitmap)
6aa8b732
AK
946 return;
947
948 rel_gfn = gfn - memslot->base_gfn;
949
950 /* avoid RMW */
951 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
952 set_bit(rel_gfn, memslot->dirty_bitmap);
953 return;
954 }
955 }
956}
957
958static int emulator_read_std(unsigned long addr,
4c690a1e 959 void *val,
6aa8b732
AK
960 unsigned int bytes,
961 struct x86_emulate_ctxt *ctxt)
962{
963 struct kvm_vcpu *vcpu = ctxt->vcpu;
964 void *data = val;
965
966 while (bytes) {
967 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
968 unsigned offset = addr & (PAGE_SIZE-1);
969 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
970 unsigned long pfn;
954bbbc2
AK
971 struct page *page;
972 void *page_virt;
6aa8b732
AK
973
974 if (gpa == UNMAPPED_GVA)
975 return X86EMUL_PROPAGATE_FAULT;
976 pfn = gpa >> PAGE_SHIFT;
954bbbc2
AK
977 page = gfn_to_page(vcpu->kvm, pfn);
978 if (!page)
6aa8b732 979 return X86EMUL_UNHANDLEABLE;
954bbbc2 980 page_virt = kmap_atomic(page, KM_USER0);
6aa8b732 981
954bbbc2 982 memcpy(data, page_virt + offset, tocopy);
6aa8b732 983
954bbbc2 984 kunmap_atomic(page_virt, KM_USER0);
6aa8b732
AK
985
986 bytes -= tocopy;
987 data += tocopy;
988 addr += tocopy;
989 }
990
991 return X86EMUL_CONTINUE;
992}
993
994static int emulator_write_std(unsigned long addr,
4c690a1e 995 const void *val,
6aa8b732
AK
996 unsigned int bytes,
997 struct x86_emulate_ctxt *ctxt)
998{
999 printk(KERN_ERR "emulator_write_std: addr %lx n %d\n",
1000 addr, bytes);
1001 return X86EMUL_UNHANDLEABLE;
1002}
1003
2eeb2e94
GH
1004static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1005 gpa_t addr)
1006{
1007 /*
1008 * Note that its important to have this wrapper function because
1009 * in the very near future we will be checking for MMIOs against
1010 * the LAPIC as well as the general MMIO bus
1011 */
1012 return kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1013}
1014
74906345
ED
1015static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1016 gpa_t addr)
1017{
1018 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1019}
1020
6aa8b732 1021static int emulator_read_emulated(unsigned long addr,
4c690a1e 1022 void *val,
6aa8b732
AK
1023 unsigned int bytes,
1024 struct x86_emulate_ctxt *ctxt)
1025{
2eeb2e94
GH
1026 struct kvm_vcpu *vcpu = ctxt->vcpu;
1027 struct kvm_io_device *mmio_dev;
1028 gpa_t gpa;
6aa8b732
AK
1029
1030 if (vcpu->mmio_read_completed) {
1031 memcpy(val, vcpu->mmio_data, bytes);
1032 vcpu->mmio_read_completed = 0;
1033 return X86EMUL_CONTINUE;
1034 } else if (emulator_read_std(addr, val, bytes, ctxt)
1035 == X86EMUL_CONTINUE)
1036 return X86EMUL_CONTINUE;
d27d4aca 1037
2eeb2e94
GH
1038 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1039 if (gpa == UNMAPPED_GVA)
1040 return X86EMUL_PROPAGATE_FAULT;
6aa8b732 1041
2eeb2e94
GH
1042 /*
1043 * Is this MMIO handled locally?
1044 */
1045 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1046 if (mmio_dev) {
1047 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1048 return X86EMUL_CONTINUE;
6aa8b732 1049 }
2eeb2e94
GH
1050
1051 vcpu->mmio_needed = 1;
1052 vcpu->mmio_phys_addr = gpa;
1053 vcpu->mmio_size = bytes;
1054 vcpu->mmio_is_write = 0;
1055
1056 return X86EMUL_UNHANDLEABLE;
6aa8b732
AK
1057}
1058
da4a00f0 1059static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
4c690a1e 1060 const void *val, int bytes)
da4a00f0 1061{
da4a00f0
AK
1062 struct page *page;
1063 void *virt;
09072daf 1064 unsigned offset = offset_in_page(gpa);
da4a00f0
AK
1065
1066 if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
1067 return 0;
954bbbc2
AK
1068 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1069 if (!page)
da4a00f0 1070 return 0;
ab51a434 1071 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
da4a00f0 1072 virt = kmap_atomic(page, KM_USER0);
7cfa4b0a
AK
1073 kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
1074 memcpy(virt + offset_in_page(gpa), val, bytes);
da4a00f0 1075 kunmap_atomic(virt, KM_USER0);
da4a00f0
AK
1076 return 1;
1077}
1078
b0fcd903
AK
1079static int emulator_write_emulated_onepage(unsigned long addr,
1080 const void *val,
1081 unsigned int bytes,
1082 struct x86_emulate_ctxt *ctxt)
6aa8b732 1083{
2eeb2e94
GH
1084 struct kvm_vcpu *vcpu = ctxt->vcpu;
1085 struct kvm_io_device *mmio_dev;
1086 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
6aa8b732 1087
c9047f53
AK
1088 if (gpa == UNMAPPED_GVA) {
1089 kvm_arch_ops->inject_page_fault(vcpu, addr, 2);
6aa8b732 1090 return X86EMUL_PROPAGATE_FAULT;
c9047f53 1091 }
6aa8b732 1092
da4a00f0
AK
1093 if (emulator_write_phys(vcpu, gpa, val, bytes))
1094 return X86EMUL_CONTINUE;
1095
2eeb2e94
GH
1096 /*
1097 * Is this MMIO handled locally?
1098 */
1099 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1100 if (mmio_dev) {
1101 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1102 return X86EMUL_CONTINUE;
1103 }
1104
6aa8b732
AK
1105 vcpu->mmio_needed = 1;
1106 vcpu->mmio_phys_addr = gpa;
1107 vcpu->mmio_size = bytes;
1108 vcpu->mmio_is_write = 1;
4c690a1e 1109 memcpy(vcpu->mmio_data, val, bytes);
6aa8b732
AK
1110
1111 return X86EMUL_CONTINUE;
1112}
1113
b0fcd903
AK
1114static int emulator_write_emulated(unsigned long addr,
1115 const void *val,
1116 unsigned int bytes,
1117 struct x86_emulate_ctxt *ctxt)
1118{
1119 /* Crossing a page boundary? */
1120 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1121 int rc, now;
1122
1123 now = -addr & ~PAGE_MASK;
1124 rc = emulator_write_emulated_onepage(addr, val, now, ctxt);
1125 if (rc != X86EMUL_CONTINUE)
1126 return rc;
1127 addr += now;
1128 val += now;
1129 bytes -= now;
1130 }
1131 return emulator_write_emulated_onepage(addr, val, bytes, ctxt);
1132}
1133
6aa8b732 1134static int emulator_cmpxchg_emulated(unsigned long addr,
4c690a1e
AK
1135 const void *old,
1136 const void *new,
6aa8b732
AK
1137 unsigned int bytes,
1138 struct x86_emulate_ctxt *ctxt)
1139{
1140 static int reported;
1141
1142 if (!reported) {
1143 reported = 1;
1144 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1145 }
1146 return emulator_write_emulated(addr, new, bytes, ctxt);
1147}
1148
1149static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1150{
1151 return kvm_arch_ops->get_segment_base(vcpu, seg);
1152}
1153
1154int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1155{
6aa8b732
AK
1156 return X86EMUL_CONTINUE;
1157}
1158
1159int emulate_clts(struct kvm_vcpu *vcpu)
1160{
399badf3 1161 unsigned long cr0;
6aa8b732 1162
399badf3 1163 cr0 = vcpu->cr0 & ~CR0_TS_MASK;
6aa8b732
AK
1164 kvm_arch_ops->set_cr0(vcpu, cr0);
1165 return X86EMUL_CONTINUE;
1166}
1167
1168int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1169{
1170 struct kvm_vcpu *vcpu = ctxt->vcpu;
1171
1172 switch (dr) {
1173 case 0 ... 3:
1174 *dest = kvm_arch_ops->get_dr(vcpu, dr);
1175 return X86EMUL_CONTINUE;
1176 default:
1177 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1178 __FUNCTION__, dr);
1179 return X86EMUL_UNHANDLEABLE;
1180 }
1181}
1182
1183int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1184{
1185 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1186 int exception;
1187
1188 kvm_arch_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1189 if (exception) {
1190 /* FIXME: better handling */
1191 return X86EMUL_UNHANDLEABLE;
1192 }
1193 return X86EMUL_CONTINUE;
1194}
1195
1196static void report_emulation_failure(struct x86_emulate_ctxt *ctxt)
1197{
1198 static int reported;
1199 u8 opcodes[4];
1200 unsigned long rip = ctxt->vcpu->rip;
1201 unsigned long rip_linear;
1202
1203 rip_linear = rip + get_segment_base(ctxt->vcpu, VCPU_SREG_CS);
1204
1205 if (reported)
1206 return;
1207
1208 emulator_read_std(rip_linear, (void *)opcodes, 4, ctxt);
1209
1210 printk(KERN_ERR "emulation failed but !mmio_needed?"
1211 " rip %lx %02x %02x %02x %02x\n",
1212 rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1213 reported = 1;
1214}
1215
1216struct x86_emulate_ops emulate_ops = {
1217 .read_std = emulator_read_std,
1218 .write_std = emulator_write_std,
1219 .read_emulated = emulator_read_emulated,
1220 .write_emulated = emulator_write_emulated,
1221 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1222};
1223
1224int emulate_instruction(struct kvm_vcpu *vcpu,
1225 struct kvm_run *run,
1226 unsigned long cr2,
1227 u16 error_code)
1228{
1229 struct x86_emulate_ctxt emulate_ctxt;
1230 int r;
1231 int cs_db, cs_l;
1232
e7df56e4 1233 vcpu->mmio_fault_cr2 = cr2;
6aa8b732
AK
1234 kvm_arch_ops->cache_regs(vcpu);
1235
1236 kvm_arch_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1237
1238 emulate_ctxt.vcpu = vcpu;
1239 emulate_ctxt.eflags = kvm_arch_ops->get_rflags(vcpu);
1240 emulate_ctxt.cr2 = cr2;
1241 emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
1242 ? X86EMUL_MODE_REAL : cs_l
1243 ? X86EMUL_MODE_PROT64 : cs_db
1244 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1245
1246 if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1247 emulate_ctxt.cs_base = 0;
1248 emulate_ctxt.ds_base = 0;
1249 emulate_ctxt.es_base = 0;
1250 emulate_ctxt.ss_base = 0;
1251 } else {
1252 emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
1253 emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
1254 emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
1255 emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
1256 }
1257
1258 emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
1259 emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
1260
1261 vcpu->mmio_is_write = 0;
1262 r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
1263
1264 if ((r || vcpu->mmio_is_write) && run) {
1265 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1266 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1267 run->mmio.len = vcpu->mmio_size;
1268 run->mmio.is_write = vcpu->mmio_is_write;
1269 }
1270
1271 if (r) {
a436036b
AK
1272 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1273 return EMULATE_DONE;
6aa8b732
AK
1274 if (!vcpu->mmio_needed) {
1275 report_emulation_failure(&emulate_ctxt);
1276 return EMULATE_FAIL;
1277 }
1278 return EMULATE_DO_MMIO;
1279 }
1280
1281 kvm_arch_ops->decache_regs(vcpu);
1282 kvm_arch_ops->set_rflags(vcpu, emulate_ctxt.eflags);
1283
02c83209
AK
1284 if (vcpu->mmio_is_write) {
1285 vcpu->mmio_needed = 0;
6aa8b732 1286 return EMULATE_DO_MMIO;
02c83209 1287 }
6aa8b732
AK
1288
1289 return EMULATE_DONE;
1290}
1291EXPORT_SYMBOL_GPL(emulate_instruction);
1292
d3bef15f
AK
1293int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1294{
1295 if (vcpu->irq_summary)
1296 return 1;
1297
1298 vcpu->run->exit_reason = KVM_EXIT_HLT;
1299 ++vcpu->stat.halt_exits;
1300 return 0;
1301}
1302EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1303
270fd9b9
AK
1304int kvm_hypercall(struct kvm_vcpu *vcpu, struct kvm_run *run)
1305{
1306 unsigned long nr, a0, a1, a2, a3, a4, a5, ret;
1307
9b22bf57 1308 kvm_arch_ops->cache_regs(vcpu);
270fd9b9
AK
1309 ret = -KVM_EINVAL;
1310#ifdef CONFIG_X86_64
1311 if (is_long_mode(vcpu)) {
1312 nr = vcpu->regs[VCPU_REGS_RAX];
1313 a0 = vcpu->regs[VCPU_REGS_RDI];
1314 a1 = vcpu->regs[VCPU_REGS_RSI];
1315 a2 = vcpu->regs[VCPU_REGS_RDX];
1316 a3 = vcpu->regs[VCPU_REGS_RCX];
1317 a4 = vcpu->regs[VCPU_REGS_R8];
1318 a5 = vcpu->regs[VCPU_REGS_R9];
1319 } else
1320#endif
1321 {
1322 nr = vcpu->regs[VCPU_REGS_RBX] & -1u;
1323 a0 = vcpu->regs[VCPU_REGS_RAX] & -1u;
1324 a1 = vcpu->regs[VCPU_REGS_RCX] & -1u;
1325 a2 = vcpu->regs[VCPU_REGS_RDX] & -1u;
1326 a3 = vcpu->regs[VCPU_REGS_RSI] & -1u;
1327 a4 = vcpu->regs[VCPU_REGS_RDI] & -1u;
1328 a5 = vcpu->regs[VCPU_REGS_RBP] & -1u;
1329 }
1330 switch (nr) {
1331 default:
b4e63f56
AK
1332 run->hypercall.args[0] = a0;
1333 run->hypercall.args[1] = a1;
1334 run->hypercall.args[2] = a2;
1335 run->hypercall.args[3] = a3;
1336 run->hypercall.args[4] = a4;
1337 run->hypercall.args[5] = a5;
1338 run->hypercall.ret = ret;
1339 run->hypercall.longmode = is_long_mode(vcpu);
1340 kvm_arch_ops->decache_regs(vcpu);
1341 return 0;
270fd9b9
AK
1342 }
1343 vcpu->regs[VCPU_REGS_RAX] = ret;
9b22bf57 1344 kvm_arch_ops->decache_regs(vcpu);
270fd9b9
AK
1345 return 1;
1346}
1347EXPORT_SYMBOL_GPL(kvm_hypercall);
1348
6aa8b732
AK
1349static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1350{
1351 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1352}
1353
1354void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1355{
1356 struct descriptor_table dt = { limit, base };
1357
1358 kvm_arch_ops->set_gdt(vcpu, &dt);
1359}
1360
1361void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1362{
1363 struct descriptor_table dt = { limit, base };
1364
1365 kvm_arch_ops->set_idt(vcpu, &dt);
1366}
1367
1368void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1369 unsigned long *rflags)
1370{
1371 lmsw(vcpu, msw);
1372 *rflags = kvm_arch_ops->get_rflags(vcpu);
1373}
1374
1375unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1376{
25c4c276 1377 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
6aa8b732
AK
1378 switch (cr) {
1379 case 0:
1380 return vcpu->cr0;
1381 case 2:
1382 return vcpu->cr2;
1383 case 3:
1384 return vcpu->cr3;
1385 case 4:
1386 return vcpu->cr4;
1387 default:
1388 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1389 return 0;
1390 }
1391}
1392
1393void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1394 unsigned long *rflags)
1395{
1396 switch (cr) {
1397 case 0:
1398 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
1399 *rflags = kvm_arch_ops->get_rflags(vcpu);
1400 break;
1401 case 2:
1402 vcpu->cr2 = val;
1403 break;
1404 case 3:
1405 set_cr3(vcpu, val);
1406 break;
1407 case 4:
1408 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1409 break;
1410 default:
1411 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1412 }
1413}
1414
102d8325
IM
1415/*
1416 * Register the para guest with the host:
1417 */
1418static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1419{
1420 struct kvm_vcpu_para_state *para_state;
1421 hpa_t para_state_hpa, hypercall_hpa;
1422 struct page *para_state_page;
1423 unsigned char *hypercall;
1424 gpa_t hypercall_gpa;
1425
1426 printk(KERN_DEBUG "kvm: guest trying to enter paravirtual mode\n");
1427 printk(KERN_DEBUG ".... para_state_gpa: %08Lx\n", para_state_gpa);
1428
1429 /*
1430 * Needs to be page aligned:
1431 */
1432 if (para_state_gpa != PAGE_ALIGN(para_state_gpa))
1433 goto err_gp;
1434
1435 para_state_hpa = gpa_to_hpa(vcpu, para_state_gpa);
1436 printk(KERN_DEBUG ".... para_state_hpa: %08Lx\n", para_state_hpa);
1437 if (is_error_hpa(para_state_hpa))
1438 goto err_gp;
1439
ab51a434 1440 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
102d8325
IM
1441 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1442 para_state = kmap_atomic(para_state_page, KM_USER0);
1443
1444 printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
1445 printk(KERN_DEBUG ".... size: %d\n", para_state->size);
1446
1447 para_state->host_version = KVM_PARA_API_VERSION;
1448 /*
1449 * We cannot support guests that try to register themselves
1450 * with a newer API version than the host supports:
1451 */
1452 if (para_state->guest_version > KVM_PARA_API_VERSION) {
1453 para_state->ret = -KVM_EINVAL;
1454 goto err_kunmap_skip;
1455 }
1456
1457 hypercall_gpa = para_state->hypercall_gpa;
1458 hypercall_hpa = gpa_to_hpa(vcpu, hypercall_gpa);
1459 printk(KERN_DEBUG ".... hypercall_hpa: %08Lx\n", hypercall_hpa);
1460 if (is_error_hpa(hypercall_hpa)) {
1461 para_state->ret = -KVM_EINVAL;
1462 goto err_kunmap_skip;
1463 }
1464
1465 printk(KERN_DEBUG "kvm: para guest successfully registered.\n");
1466 vcpu->para_state_page = para_state_page;
1467 vcpu->para_state_gpa = para_state_gpa;
1468 vcpu->hypercall_gpa = hypercall_gpa;
1469
ab51a434 1470 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
102d8325
IM
1471 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1472 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1473 kvm_arch_ops->patch_hypercall(vcpu, hypercall);
1474 kunmap_atomic(hypercall, KM_USER1);
1475
1476 para_state->ret = 0;
1477err_kunmap_skip:
1478 kunmap_atomic(para_state, KM_USER0);
1479 return 0;
1480err_gp:
1481 return 1;
1482}
1483
3bab1f5d
AK
1484int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1485{
1486 u64 data;
1487
1488 switch (msr) {
1489 case 0xc0010010: /* SYSCFG */
1490 case 0xc0010015: /* HWCR */
1491 case MSR_IA32_PLATFORM_ID:
1492 case MSR_IA32_P5_MC_ADDR:
1493 case MSR_IA32_P5_MC_TYPE:
1494 case MSR_IA32_MC0_CTL:
1495 case MSR_IA32_MCG_STATUS:
1496 case MSR_IA32_MCG_CAP:
1497 case MSR_IA32_MC0_MISC:
1498 case MSR_IA32_MC0_MISC+4:
1499 case MSR_IA32_MC0_MISC+8:
1500 case MSR_IA32_MC0_MISC+12:
1501 case MSR_IA32_MC0_MISC+16:
1502 case MSR_IA32_UCODE_REV:
a8d13ea2 1503 case MSR_IA32_PERF_STATUS:
2dc7094b 1504 case MSR_IA32_EBL_CR_POWERON:
3bab1f5d
AK
1505 /* MTRR registers */
1506 case 0xfe:
1507 case 0x200 ... 0x2ff:
1508 data = 0;
1509 break;
a8d13ea2
AK
1510 case 0xcd: /* fsb frequency */
1511 data = 3;
1512 break;
3bab1f5d
AK
1513 case MSR_IA32_APICBASE:
1514 data = vcpu->apic_base;
1515 break;
6f00e68f
AK
1516 case MSR_IA32_MISC_ENABLE:
1517 data = vcpu->ia32_misc_enable_msr;
1518 break;
3bab1f5d
AK
1519#ifdef CONFIG_X86_64
1520 case MSR_EFER:
1521 data = vcpu->shadow_efer;
1522 break;
1523#endif
1524 default:
1525 printk(KERN_ERR "kvm: unhandled rdmsr: 0x%x\n", msr);
1526 return 1;
1527 }
1528 *pdata = data;
1529 return 0;
1530}
1531EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1532
6aa8b732
AK
1533/*
1534 * Reads an msr value (of 'msr_index') into 'pdata'.
1535 * Returns 0 on success, non-0 otherwise.
1536 * Assumes vcpu_load() was already called.
1537 */
35f3f286 1538int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
6aa8b732
AK
1539{
1540 return kvm_arch_ops->get_msr(vcpu, msr_index, pdata);
1541}
1542
05b3e0c2 1543#ifdef CONFIG_X86_64
6aa8b732 1544
3bab1f5d 1545static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
6aa8b732 1546{
6aa8b732
AK
1547 if (efer & EFER_RESERVED_BITS) {
1548 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1549 efer);
1550 inject_gp(vcpu);
1551 return;
1552 }
1553
1554 if (is_paging(vcpu)
1555 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1556 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1557 inject_gp(vcpu);
1558 return;
1559 }
1560
7725f0ba
AK
1561 kvm_arch_ops->set_efer(vcpu, efer);
1562
6aa8b732
AK
1563 efer &= ~EFER_LMA;
1564 efer |= vcpu->shadow_efer & EFER_LMA;
1565
1566 vcpu->shadow_efer = efer;
6aa8b732 1567}
6aa8b732
AK
1568
1569#endif
1570
3bab1f5d
AK
1571int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1572{
1573 switch (msr) {
1574#ifdef CONFIG_X86_64
1575 case MSR_EFER:
1576 set_efer(vcpu, data);
1577 break;
1578#endif
1579 case MSR_IA32_MC0_STATUS:
1580 printk(KERN_WARNING "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
1581 __FUNCTION__, data);
1582 break;
0e5bf0d0
SK
1583 case MSR_IA32_MCG_STATUS:
1584 printk(KERN_WARNING "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
1585 __FUNCTION__, data);
1586 break;
3bab1f5d
AK
1587 case MSR_IA32_UCODE_REV:
1588 case MSR_IA32_UCODE_WRITE:
1589 case 0x200 ... 0x2ff: /* MTRRs */
1590 break;
1591 case MSR_IA32_APICBASE:
1592 vcpu->apic_base = data;
1593 break;
6f00e68f
AK
1594 case MSR_IA32_MISC_ENABLE:
1595 vcpu->ia32_misc_enable_msr = data;
1596 break;
102d8325
IM
1597 /*
1598 * This is the 'probe whether the host is KVM' logic:
1599 */
1600 case MSR_KVM_API_MAGIC:
1601 return vcpu_register_para(vcpu, data);
1602
3bab1f5d
AK
1603 default:
1604 printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
1605 return 1;
1606 }
1607 return 0;
1608}
1609EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1610
6aa8b732
AK
1611/*
1612 * Writes msr value into into the appropriate "register".
1613 * Returns 0 on success, non-0 otherwise.
1614 * Assumes vcpu_load() was already called.
1615 */
35f3f286 1616int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
6aa8b732
AK
1617{
1618 return kvm_arch_ops->set_msr(vcpu, msr_index, data);
1619}
1620
1621void kvm_resched(struct kvm_vcpu *vcpu)
1622{
3fca0365
YD
1623 if (!need_resched())
1624 return;
6aa8b732
AK
1625 vcpu_put(vcpu);
1626 cond_resched();
bccf2150 1627 vcpu_load(vcpu);
6aa8b732
AK
1628}
1629EXPORT_SYMBOL_GPL(kvm_resched);
1630
1631void load_msrs(struct vmx_msr_entry *e, int n)
1632{
1633 int i;
1634
1635 for (i = 0; i < n; ++i)
1636 wrmsrl(e[i].index, e[i].data);
1637}
1638EXPORT_SYMBOL_GPL(load_msrs);
1639
1640void save_msrs(struct vmx_msr_entry *e, int n)
1641{
1642 int i;
1643
1644 for (i = 0; i < n; ++i)
1645 rdmsrl(e[i].index, e[i].data);
1646}
1647EXPORT_SYMBOL_GPL(save_msrs);
1648
06465c5a
AK
1649void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1650{
1651 int i;
1652 u32 function;
1653 struct kvm_cpuid_entry *e, *best;
1654
1655 kvm_arch_ops->cache_regs(vcpu);
1656 function = vcpu->regs[VCPU_REGS_RAX];
1657 vcpu->regs[VCPU_REGS_RAX] = 0;
1658 vcpu->regs[VCPU_REGS_RBX] = 0;
1659 vcpu->regs[VCPU_REGS_RCX] = 0;
1660 vcpu->regs[VCPU_REGS_RDX] = 0;
1661 best = NULL;
1662 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1663 e = &vcpu->cpuid_entries[i];
1664 if (e->function == function) {
1665 best = e;
1666 break;
1667 }
1668 /*
1669 * Both basic or both extended?
1670 */
1671 if (((e->function ^ function) & 0x80000000) == 0)
1672 if (!best || e->function > best->function)
1673 best = e;
1674 }
1675 if (best) {
1676 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1677 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1678 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1679 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1680 }
1681 kvm_arch_ops->decache_regs(vcpu);
1682 kvm_arch_ops->skip_emulated_instruction(vcpu);
1683}
1684EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1685
039576c0 1686static int pio_copy_data(struct kvm_vcpu *vcpu)
46fc1477 1687{
039576c0
AK
1688 void *p = vcpu->pio_data;
1689 void *q;
1690 unsigned bytes;
1691 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1692
1693 kvm_arch_ops->vcpu_put(vcpu);
1694 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1695 PAGE_KERNEL);
1696 if (!q) {
1697 kvm_arch_ops->vcpu_load(vcpu);
1698 free_pio_guest_pages(vcpu);
1699 return -ENOMEM;
1700 }
1701 q += vcpu->pio.guest_page_offset;
1702 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1703 if (vcpu->pio.in)
1704 memcpy(q, p, bytes);
1705 else
1706 memcpy(p, q, bytes);
1707 q -= vcpu->pio.guest_page_offset;
1708 vunmap(q);
1709 kvm_arch_ops->vcpu_load(vcpu);
1710 free_pio_guest_pages(vcpu);
1711 return 0;
1712}
1713
1714static int complete_pio(struct kvm_vcpu *vcpu)
1715{
1716 struct kvm_pio_request *io = &vcpu->pio;
46fc1477 1717 long delta;
039576c0 1718 int r;
46fc1477
AK
1719
1720 kvm_arch_ops->cache_regs(vcpu);
1721
1722 if (!io->string) {
039576c0
AK
1723 if (io->in)
1724 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
46fc1477
AK
1725 io->size);
1726 } else {
039576c0
AK
1727 if (io->in) {
1728 r = pio_copy_data(vcpu);
1729 if (r) {
1730 kvm_arch_ops->cache_regs(vcpu);
1731 return r;
1732 }
1733 }
1734
46fc1477
AK
1735 delta = 1;
1736 if (io->rep) {
039576c0 1737 delta *= io->cur_count;
46fc1477
AK
1738 /*
1739 * The size of the register should really depend on
1740 * current address size.
1741 */
1742 vcpu->regs[VCPU_REGS_RCX] -= delta;
1743 }
039576c0 1744 if (io->down)
46fc1477
AK
1745 delta = -delta;
1746 delta *= io->size;
039576c0 1747 if (io->in)
46fc1477
AK
1748 vcpu->regs[VCPU_REGS_RDI] += delta;
1749 else
1750 vcpu->regs[VCPU_REGS_RSI] += delta;
1751 }
1752
46fc1477
AK
1753 kvm_arch_ops->decache_regs(vcpu);
1754
039576c0
AK
1755 io->count -= io->cur_count;
1756 io->cur_count = 0;
1757
1758 if (!io->count)
1759 kvm_arch_ops->skip_emulated_instruction(vcpu);
1760 return 0;
46fc1477
AK
1761}
1762
65619eb5
ED
1763static void kernel_pio(struct kvm_io_device *pio_dev,
1764 struct kvm_vcpu *vcpu,
1765 void *pd)
74906345
ED
1766{
1767 /* TODO: String I/O for in kernel device */
1768
1769 if (vcpu->pio.in)
1770 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1771 vcpu->pio.size,
65619eb5 1772 pd);
74906345
ED
1773 else
1774 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1775 vcpu->pio.size,
65619eb5
ED
1776 pd);
1777}
1778
1779static void pio_string_write(struct kvm_io_device *pio_dev,
1780 struct kvm_vcpu *vcpu)
1781{
1782 struct kvm_pio_request *io = &vcpu->pio;
1783 void *pd = vcpu->pio_data;
1784 int i;
1785
1786 for (i = 0; i < io->cur_count; i++) {
1787 kvm_iodevice_write(pio_dev, io->port,
1788 io->size,
1789 pd);
1790 pd += io->size;
1791 }
74906345
ED
1792}
1793
039576c0
AK
1794int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1795 int size, unsigned long count, int string, int down,
1796 gva_t address, int rep, unsigned port)
1797{
1798 unsigned now, in_page;
65619eb5 1799 int i, ret = 0;
039576c0
AK
1800 int nr_pages = 1;
1801 struct page *page;
74906345 1802 struct kvm_io_device *pio_dev;
039576c0
AK
1803
1804 vcpu->run->exit_reason = KVM_EXIT_IO;
1805 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1806 vcpu->run->io.size = size;
1807 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1808 vcpu->run->io.count = count;
1809 vcpu->run->io.port = port;
1810 vcpu->pio.count = count;
1811 vcpu->pio.cur_count = count;
1812 vcpu->pio.size = size;
1813 vcpu->pio.in = in;
74906345 1814 vcpu->pio.port = port;
039576c0
AK
1815 vcpu->pio.string = string;
1816 vcpu->pio.down = down;
1817 vcpu->pio.guest_page_offset = offset_in_page(address);
1818 vcpu->pio.rep = rep;
1819
74906345 1820 pio_dev = vcpu_find_pio_dev(vcpu, port);
039576c0
AK
1821 if (!string) {
1822 kvm_arch_ops->cache_regs(vcpu);
1823 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1824 kvm_arch_ops->decache_regs(vcpu);
74906345 1825 if (pio_dev) {
65619eb5 1826 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
74906345
ED
1827 complete_pio(vcpu);
1828 return 1;
1829 }
039576c0
AK
1830 return 0;
1831 }
1832
1833 if (!count) {
1834 kvm_arch_ops->skip_emulated_instruction(vcpu);
1835 return 1;
1836 }
1837
1838 now = min(count, PAGE_SIZE / size);
1839
1840 if (!down)
1841 in_page = PAGE_SIZE - offset_in_page(address);
1842 else
1843 in_page = offset_in_page(address) + size;
1844 now = min(count, (unsigned long)in_page / size);
1845 if (!now) {
1846 /*
1847 * String I/O straddles page boundary. Pin two guest pages
1848 * so that we satisfy atomicity constraints. Do just one
1849 * transaction to avoid complexity.
1850 */
1851 nr_pages = 2;
1852 now = 1;
1853 }
1854 if (down) {
1855 /*
1856 * String I/O in reverse. Yuck. Kill the guest, fix later.
1857 */
1858 printk(KERN_ERR "kvm: guest string pio down\n");
1859 inject_gp(vcpu);
1860 return 1;
1861 }
1862 vcpu->run->io.count = now;
1863 vcpu->pio.cur_count = now;
1864
1865 for (i = 0; i < nr_pages; ++i) {
1866 spin_lock(&vcpu->kvm->lock);
1867 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1868 if (page)
1869 get_page(page);
1870 vcpu->pio.guest_pages[i] = page;
1871 spin_unlock(&vcpu->kvm->lock);
1872 if (!page) {
1873 inject_gp(vcpu);
1874 free_pio_guest_pages(vcpu);
1875 return 1;
1876 }
1877 }
1878
65619eb5
ED
1879 if (!vcpu->pio.in) {
1880 /* string PIO write */
1881 ret = pio_copy_data(vcpu);
1882 if (ret >= 0 && pio_dev) {
1883 pio_string_write(pio_dev, vcpu);
1884 complete_pio(vcpu);
1885 if (vcpu->pio.count == 0)
1886 ret = 1;
1887 }
1888 } else if (pio_dev)
1889 printk(KERN_ERR "no string pio read support yet, "
1890 "port %x size %d count %ld\n",
1891 port, size, count);
1892
1893 return ret;
039576c0
AK
1894}
1895EXPORT_SYMBOL_GPL(kvm_setup_pio);
1896
bccf2150 1897static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
6aa8b732 1898{
6aa8b732 1899 int r;
1961d276 1900 sigset_t sigsaved;
6aa8b732 1901
bccf2150 1902 vcpu_load(vcpu);
6aa8b732 1903
1961d276
AK
1904 if (vcpu->sigset_active)
1905 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1906
54810342
DL
1907 /* re-sync apic's tpr */
1908 vcpu->cr8 = kvm_run->cr8;
1909
02c83209
AK
1910 if (vcpu->pio.cur_count) {
1911 r = complete_pio(vcpu);
1912 if (r)
1913 goto out;
1914 }
1915
1916 if (vcpu->mmio_needed) {
1917 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1918 vcpu->mmio_read_completed = 1;
1919 vcpu->mmio_needed = 0;
1920 r = emulate_instruction(vcpu, kvm_run,
1921 vcpu->mmio_fault_cr2, 0);
1922 if (r == EMULATE_DO_MMIO) {
1923 /*
1924 * Read-modify-write. Back to userspace.
1925 */
1926 kvm_run->exit_reason = KVM_EXIT_MMIO;
1927 r = 0;
1928 goto out;
46fc1477 1929 }
6aa8b732
AK
1930 }
1931
8eb7d334 1932 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
b4e63f56
AK
1933 kvm_arch_ops->cache_regs(vcpu);
1934 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
1935 kvm_arch_ops->decache_regs(vcpu);
1936 }
1937
6aa8b732
AK
1938 r = kvm_arch_ops->run(vcpu, kvm_run);
1939
039576c0 1940out:
1961d276
AK
1941 if (vcpu->sigset_active)
1942 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1943
6aa8b732
AK
1944 vcpu_put(vcpu);
1945 return r;
1946}
1947
bccf2150
AK
1948static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
1949 struct kvm_regs *regs)
6aa8b732 1950{
bccf2150 1951 vcpu_load(vcpu);
6aa8b732
AK
1952
1953 kvm_arch_ops->cache_regs(vcpu);
1954
1955 regs->rax = vcpu->regs[VCPU_REGS_RAX];
1956 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
1957 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
1958 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
1959 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
1960 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
1961 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
1962 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
05b3e0c2 1963#ifdef CONFIG_X86_64
6aa8b732
AK
1964 regs->r8 = vcpu->regs[VCPU_REGS_R8];
1965 regs->r9 = vcpu->regs[VCPU_REGS_R9];
1966 regs->r10 = vcpu->regs[VCPU_REGS_R10];
1967 regs->r11 = vcpu->regs[VCPU_REGS_R11];
1968 regs->r12 = vcpu->regs[VCPU_REGS_R12];
1969 regs->r13 = vcpu->regs[VCPU_REGS_R13];
1970 regs->r14 = vcpu->regs[VCPU_REGS_R14];
1971 regs->r15 = vcpu->regs[VCPU_REGS_R15];
1972#endif
1973
1974 regs->rip = vcpu->rip;
1975 regs->rflags = kvm_arch_ops->get_rflags(vcpu);
1976
1977 /*
1978 * Don't leak debug flags in case they were set for guest debugging
1979 */
1980 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
1981 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1982
1983 vcpu_put(vcpu);
1984
1985 return 0;
1986}
1987
bccf2150
AK
1988static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
1989 struct kvm_regs *regs)
6aa8b732 1990{
bccf2150 1991 vcpu_load(vcpu);
6aa8b732
AK
1992
1993 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
1994 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
1995 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
1996 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
1997 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
1998 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
1999 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2000 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
05b3e0c2 2001#ifdef CONFIG_X86_64
6aa8b732
AK
2002 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2003 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2004 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2005 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2006 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2007 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2008 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2009 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2010#endif
2011
2012 vcpu->rip = regs->rip;
2013 kvm_arch_ops->set_rflags(vcpu, regs->rflags);
2014
2015 kvm_arch_ops->decache_regs(vcpu);
2016
2017 vcpu_put(vcpu);
2018
2019 return 0;
2020}
2021
2022static void get_segment(struct kvm_vcpu *vcpu,
2023 struct kvm_segment *var, int seg)
2024{
2025 return kvm_arch_ops->get_segment(vcpu, var, seg);
2026}
2027
bccf2150
AK
2028static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2029 struct kvm_sregs *sregs)
6aa8b732 2030{
6aa8b732
AK
2031 struct descriptor_table dt;
2032
bccf2150 2033 vcpu_load(vcpu);
6aa8b732
AK
2034
2035 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2036 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2037 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2038 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2039 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2040 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2041
2042 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2043 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2044
2045 kvm_arch_ops->get_idt(vcpu, &dt);
2046 sregs->idt.limit = dt.limit;
2047 sregs->idt.base = dt.base;
2048 kvm_arch_ops->get_gdt(vcpu, &dt);
2049 sregs->gdt.limit = dt.limit;
2050 sregs->gdt.base = dt.base;
2051
25c4c276 2052 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
6aa8b732
AK
2053 sregs->cr0 = vcpu->cr0;
2054 sregs->cr2 = vcpu->cr2;
2055 sregs->cr3 = vcpu->cr3;
2056 sregs->cr4 = vcpu->cr4;
2057 sregs->cr8 = vcpu->cr8;
2058 sregs->efer = vcpu->shadow_efer;
2059 sregs->apic_base = vcpu->apic_base;
2060
2061 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2062 sizeof sregs->interrupt_bitmap);
2063
2064 vcpu_put(vcpu);
2065
2066 return 0;
2067}
2068
2069static void set_segment(struct kvm_vcpu *vcpu,
2070 struct kvm_segment *var, int seg)
2071{
2072 return kvm_arch_ops->set_segment(vcpu, var, seg);
2073}
2074
bccf2150
AK
2075static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2076 struct kvm_sregs *sregs)
6aa8b732 2077{
6aa8b732
AK
2078 int mmu_reset_needed = 0;
2079 int i;
2080 struct descriptor_table dt;
2081
bccf2150 2082 vcpu_load(vcpu);
6aa8b732 2083
6aa8b732
AK
2084 dt.limit = sregs->idt.limit;
2085 dt.base = sregs->idt.base;
2086 kvm_arch_ops->set_idt(vcpu, &dt);
2087 dt.limit = sregs->gdt.limit;
2088 dt.base = sregs->gdt.base;
2089 kvm_arch_ops->set_gdt(vcpu, &dt);
2090
2091 vcpu->cr2 = sregs->cr2;
2092 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2093 vcpu->cr3 = sregs->cr3;
2094
2095 vcpu->cr8 = sregs->cr8;
2096
2097 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
05b3e0c2 2098#ifdef CONFIG_X86_64
6aa8b732
AK
2099 kvm_arch_ops->set_efer(vcpu, sregs->efer);
2100#endif
2101 vcpu->apic_base = sregs->apic_base;
2102
25c4c276 2103 kvm_arch_ops->decache_cr4_guest_bits(vcpu);
399badf3 2104
6aa8b732 2105 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
f6528b03 2106 kvm_arch_ops->set_cr0(vcpu, sregs->cr0);
6aa8b732
AK
2107
2108 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
2109 kvm_arch_ops->set_cr4(vcpu, sregs->cr4);
1b0973bd
AK
2110 if (!is_long_mode(vcpu) && is_pae(vcpu))
2111 load_pdptrs(vcpu, vcpu->cr3);
6aa8b732
AK
2112
2113 if (mmu_reset_needed)
2114 kvm_mmu_reset_context(vcpu);
2115
2116 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2117 sizeof vcpu->irq_pending);
2118 vcpu->irq_summary = 0;
2119 for (i = 0; i < NR_IRQ_WORDS; ++i)
2120 if (vcpu->irq_pending[i])
2121 __set_bit(i, &vcpu->irq_summary);
2122
024aa1c0
AK
2123 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2124 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2125 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2126 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2127 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2128 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2129
2130 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2131 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2132
6aa8b732
AK
2133 vcpu_put(vcpu);
2134
2135 return 0;
2136}
2137
2138/*
2139 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2140 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
bf591b24
MR
2141 *
2142 * This list is modified at module load time to reflect the
2143 * capabilities of the host cpu.
6aa8b732
AK
2144 */
2145static u32 msrs_to_save[] = {
2146 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
2147 MSR_K6_STAR,
05b3e0c2 2148#ifdef CONFIG_X86_64
6aa8b732
AK
2149 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
2150#endif
2151 MSR_IA32_TIME_STAMP_COUNTER,
2152};
2153
bf591b24
MR
2154static unsigned num_msrs_to_save;
2155
6f00e68f
AK
2156static u32 emulated_msrs[] = {
2157 MSR_IA32_MISC_ENABLE,
2158};
2159
bf591b24
MR
2160static __init void kvm_init_msr_list(void)
2161{
2162 u32 dummy[2];
2163 unsigned i, j;
2164
2165 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2166 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2167 continue;
2168 if (j < i)
2169 msrs_to_save[j] = msrs_to_save[i];
2170 j++;
2171 }
2172 num_msrs_to_save = j;
2173}
6aa8b732
AK
2174
2175/*
2176 * Adapt set_msr() to msr_io()'s calling convention
2177 */
2178static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2179{
35f3f286 2180 return kvm_set_msr(vcpu, index, *data);
6aa8b732
AK
2181}
2182
2183/*
2184 * Read or write a bunch of msrs. All parameters are kernel addresses.
2185 *
2186 * @return number of msrs set successfully.
2187 */
bccf2150 2188static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
6aa8b732
AK
2189 struct kvm_msr_entry *entries,
2190 int (*do_msr)(struct kvm_vcpu *vcpu,
2191 unsigned index, u64 *data))
2192{
6aa8b732
AK
2193 int i;
2194
bccf2150 2195 vcpu_load(vcpu);
6aa8b732
AK
2196
2197 for (i = 0; i < msrs->nmsrs; ++i)
2198 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2199 break;
2200
2201 vcpu_put(vcpu);
2202
2203 return i;
2204}
2205
2206/*
2207 * Read or write a bunch of msrs. Parameters are user addresses.
2208 *
2209 * @return number of msrs set successfully.
2210 */
bccf2150 2211static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
6aa8b732
AK
2212 int (*do_msr)(struct kvm_vcpu *vcpu,
2213 unsigned index, u64 *data),
2214 int writeback)
2215{
2216 struct kvm_msrs msrs;
2217 struct kvm_msr_entry *entries;
2218 int r, n;
2219 unsigned size;
2220
2221 r = -EFAULT;
2222 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2223 goto out;
2224
2225 r = -E2BIG;
2226 if (msrs.nmsrs >= MAX_IO_MSRS)
2227 goto out;
2228
2229 r = -ENOMEM;
2230 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2231 entries = vmalloc(size);
2232 if (!entries)
2233 goto out;
2234
2235 r = -EFAULT;
2236 if (copy_from_user(entries, user_msrs->entries, size))
2237 goto out_free;
2238
bccf2150 2239 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
6aa8b732
AK
2240 if (r < 0)
2241 goto out_free;
2242
2243 r = -EFAULT;
2244 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2245 goto out_free;
2246
2247 r = n;
2248
2249out_free:
2250 vfree(entries);
2251out:
2252 return r;
2253}
2254
2255/*
2256 * Translate a guest virtual address to a guest physical address.
2257 */
bccf2150
AK
2258static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2259 struct kvm_translation *tr)
6aa8b732
AK
2260{
2261 unsigned long vaddr = tr->linear_address;
6aa8b732
AK
2262 gpa_t gpa;
2263
bccf2150
AK
2264 vcpu_load(vcpu);
2265 spin_lock(&vcpu->kvm->lock);
6aa8b732
AK
2266 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2267 tr->physical_address = gpa;
2268 tr->valid = gpa != UNMAPPED_GVA;
2269 tr->writeable = 1;
2270 tr->usermode = 0;
bccf2150 2271 spin_unlock(&vcpu->kvm->lock);
6aa8b732
AK
2272 vcpu_put(vcpu);
2273
2274 return 0;
2275}
2276
bccf2150
AK
2277static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2278 struct kvm_interrupt *irq)
6aa8b732 2279{
6aa8b732
AK
2280 if (irq->irq < 0 || irq->irq >= 256)
2281 return -EINVAL;
bccf2150 2282 vcpu_load(vcpu);
6aa8b732
AK
2283
2284 set_bit(irq->irq, vcpu->irq_pending);
2285 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2286
2287 vcpu_put(vcpu);
2288
2289 return 0;
2290}
2291
bccf2150
AK
2292static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2293 struct kvm_debug_guest *dbg)
6aa8b732 2294{
6aa8b732
AK
2295 int r;
2296
bccf2150 2297 vcpu_load(vcpu);
6aa8b732
AK
2298
2299 r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
2300
2301 vcpu_put(vcpu);
2302
2303 return r;
2304}
2305
9a2bb7f4
AK
2306static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2307 unsigned long address,
2308 int *type)
2309{
2310 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2311 unsigned long pgoff;
2312 struct page *page;
2313
9a2bb7f4 2314 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
039576c0
AK
2315 if (pgoff == 0)
2316 page = virt_to_page(vcpu->run);
2317 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2318 page = virt_to_page(vcpu->pio_data);
2319 else
9a2bb7f4 2320 return NOPAGE_SIGBUS;
9a2bb7f4 2321 get_page(page);
cd0d9137
NAQ
2322 if (type != NULL)
2323 *type = VM_FAULT_MINOR;
2324
9a2bb7f4
AK
2325 return page;
2326}
2327
2328static struct vm_operations_struct kvm_vcpu_vm_ops = {
2329 .nopage = kvm_vcpu_nopage,
2330};
2331
2332static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2333{
2334 vma->vm_ops = &kvm_vcpu_vm_ops;
2335 return 0;
2336}
2337
bccf2150
AK
2338static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2339{
2340 struct kvm_vcpu *vcpu = filp->private_data;
2341
2342 fput(vcpu->kvm->filp);
2343 return 0;
2344}
2345
2346static struct file_operations kvm_vcpu_fops = {
2347 .release = kvm_vcpu_release,
2348 .unlocked_ioctl = kvm_vcpu_ioctl,
2349 .compat_ioctl = kvm_vcpu_ioctl,
9a2bb7f4 2350 .mmap = kvm_vcpu_mmap,
bccf2150
AK
2351};
2352
2353/*
2354 * Allocates an inode for the vcpu.
2355 */
2356static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2357{
2358 int fd, r;
2359 struct inode *inode;
2360 struct file *file;
2361
d6d28168
AK
2362 r = anon_inode_getfd(&fd, &inode, &file,
2363 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2364 if (r)
2365 return r;
bccf2150 2366 atomic_inc(&vcpu->kvm->filp->f_count);
bccf2150 2367 return fd;
bccf2150
AK
2368}
2369
c5ea7660
AK
2370/*
2371 * Creates some virtual cpus. Good luck creating more than one.
2372 */
2373static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2374{
2375 int r;
2376 struct kvm_vcpu *vcpu;
9a2bb7f4 2377 struct page *page;
c5ea7660
AK
2378
2379 r = -EINVAL;
2380 if (!valid_vcpu(n))
2381 goto out;
2382
2383 vcpu = &kvm->vcpus[n];
dad3795d 2384 vcpu->vcpu_id = n;
c5ea7660
AK
2385
2386 mutex_lock(&vcpu->mutex);
2387
2388 if (vcpu->vmcs) {
2389 mutex_unlock(&vcpu->mutex);
2390 return -EEXIST;
2391 }
2392
9a2bb7f4
AK
2393 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2394 r = -ENOMEM;
2395 if (!page)
2396 goto out_unlock;
2397 vcpu->run = page_address(page);
2398
039576c0
AK
2399 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2400 r = -ENOMEM;
2401 if (!page)
2402 goto out_free_run;
2403 vcpu->pio_data = page_address(page);
2404
c5ea7660
AK
2405 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
2406 FX_IMAGE_ALIGN);
2407 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
d917a6b9 2408 vcpu->cr0 = 0x10;
c5ea7660
AK
2409
2410 r = kvm_arch_ops->vcpu_create(vcpu);
2411 if (r < 0)
2412 goto out_free_vcpus;
2413
2414 r = kvm_mmu_create(vcpu);
2415 if (r < 0)
2416 goto out_free_vcpus;
2417
2418 kvm_arch_ops->vcpu_load(vcpu);
2419 r = kvm_mmu_setup(vcpu);
2420 if (r >= 0)
2421 r = kvm_arch_ops->vcpu_setup(vcpu);
2422 vcpu_put(vcpu);
2423
2424 if (r < 0)
2425 goto out_free_vcpus;
2426
bccf2150
AK
2427 r = create_vcpu_fd(vcpu);
2428 if (r < 0)
2429 goto out_free_vcpus;
2430
39c3b86e
AK
2431 spin_lock(&kvm_lock);
2432 if (n >= kvm->nvcpus)
2433 kvm->nvcpus = n + 1;
2434 spin_unlock(&kvm_lock);
2435
bccf2150 2436 return r;
c5ea7660
AK
2437
2438out_free_vcpus:
2439 kvm_free_vcpu(vcpu);
039576c0
AK
2440out_free_run:
2441 free_page((unsigned long)vcpu->run);
2442 vcpu->run = NULL;
9a2bb7f4 2443out_unlock:
c5ea7660
AK
2444 mutex_unlock(&vcpu->mutex);
2445out:
2446 return r;
2447}
2448
2cc51560
ED
2449static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2450{
2451 u64 efer;
2452 int i;
2453 struct kvm_cpuid_entry *e, *entry;
2454
2455 rdmsrl(MSR_EFER, efer);
2456 entry = NULL;
2457 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2458 e = &vcpu->cpuid_entries[i];
2459 if (e->function == 0x80000001) {
2460 entry = e;
2461 break;
2462 }
2463 }
4c981b43 2464 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
2cc51560 2465 entry->edx &= ~(1 << 20);
4c981b43 2466 printk(KERN_INFO "kvm: guest NX capability removed\n");
2cc51560
ED
2467 }
2468}
2469
06465c5a
AK
2470static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2471 struct kvm_cpuid *cpuid,
2472 struct kvm_cpuid_entry __user *entries)
2473{
2474 int r;
2475
2476 r = -E2BIG;
2477 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2478 goto out;
2479 r = -EFAULT;
2480 if (copy_from_user(&vcpu->cpuid_entries, entries,
2481 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2482 goto out;
2483 vcpu->cpuid_nent = cpuid->nent;
2cc51560 2484 cpuid_fix_nx_cap(vcpu);
06465c5a
AK
2485 return 0;
2486
2487out:
2488 return r;
2489}
2490
1961d276
AK
2491static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2492{
2493 if (sigset) {
2494 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2495 vcpu->sigset_active = 1;
2496 vcpu->sigset = *sigset;
2497 } else
2498 vcpu->sigset_active = 0;
2499 return 0;
2500}
2501
b8836737
AK
2502/*
2503 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2504 * we have asm/x86/processor.h
2505 */
2506struct fxsave {
2507 u16 cwd;
2508 u16 swd;
2509 u16 twd;
2510 u16 fop;
2511 u64 rip;
2512 u64 rdp;
2513 u32 mxcsr;
2514 u32 mxcsr_mask;
2515 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2516#ifdef CONFIG_X86_64
2517 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2518#else
2519 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2520#endif
2521};
2522
2523static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2524{
2525 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2526
2527 vcpu_load(vcpu);
2528
2529 memcpy(fpu->fpr, fxsave->st_space, 128);
2530 fpu->fcw = fxsave->cwd;
2531 fpu->fsw = fxsave->swd;
2532 fpu->ftwx = fxsave->twd;
2533 fpu->last_opcode = fxsave->fop;
2534 fpu->last_ip = fxsave->rip;
2535 fpu->last_dp = fxsave->rdp;
2536 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2537
2538 vcpu_put(vcpu);
2539
2540 return 0;
2541}
2542
2543static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2544{
2545 struct fxsave *fxsave = (struct fxsave *)vcpu->guest_fx_image;
2546
2547 vcpu_load(vcpu);
2548
2549 memcpy(fxsave->st_space, fpu->fpr, 128);
2550 fxsave->cwd = fpu->fcw;
2551 fxsave->swd = fpu->fsw;
2552 fxsave->twd = fpu->ftwx;
2553 fxsave->fop = fpu->last_opcode;
2554 fxsave->rip = fpu->last_ip;
2555 fxsave->rdp = fpu->last_dp;
2556 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2557
2558 vcpu_put(vcpu);
2559
2560 return 0;
2561}
2562
bccf2150
AK
2563static long kvm_vcpu_ioctl(struct file *filp,
2564 unsigned int ioctl, unsigned long arg)
6aa8b732 2565{
bccf2150 2566 struct kvm_vcpu *vcpu = filp->private_data;
2f366987 2567 void __user *argp = (void __user *)arg;
6aa8b732
AK
2568 int r = -EINVAL;
2569
2570 switch (ioctl) {
9a2bb7f4 2571 case KVM_RUN:
f0fe5108
AK
2572 r = -EINVAL;
2573 if (arg)
2574 goto out;
9a2bb7f4 2575 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
6aa8b732 2576 break;
6aa8b732
AK
2577 case KVM_GET_REGS: {
2578 struct kvm_regs kvm_regs;
2579
bccf2150
AK
2580 memset(&kvm_regs, 0, sizeof kvm_regs);
2581 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
6aa8b732
AK
2582 if (r)
2583 goto out;
2584 r = -EFAULT;
2f366987 2585 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
6aa8b732
AK
2586 goto out;
2587 r = 0;
2588 break;
2589 }
2590 case KVM_SET_REGS: {
2591 struct kvm_regs kvm_regs;
2592
2593 r = -EFAULT;
2f366987 2594 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
6aa8b732 2595 goto out;
bccf2150 2596 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
6aa8b732
AK
2597 if (r)
2598 goto out;
2599 r = 0;
2600 break;
2601 }
2602 case KVM_GET_SREGS: {
2603 struct kvm_sregs kvm_sregs;
2604
bccf2150
AK
2605 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2606 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
2607 if (r)
2608 goto out;
2609 r = -EFAULT;
2f366987 2610 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
6aa8b732
AK
2611 goto out;
2612 r = 0;
2613 break;
2614 }
2615 case KVM_SET_SREGS: {
2616 struct kvm_sregs kvm_sregs;
2617
2618 r = -EFAULT;
2f366987 2619 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
6aa8b732 2620 goto out;
bccf2150 2621 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
6aa8b732
AK
2622 if (r)
2623 goto out;
2624 r = 0;
2625 break;
2626 }
2627 case KVM_TRANSLATE: {
2628 struct kvm_translation tr;
2629
2630 r = -EFAULT;
2f366987 2631 if (copy_from_user(&tr, argp, sizeof tr))
6aa8b732 2632 goto out;
bccf2150 2633 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
6aa8b732
AK
2634 if (r)
2635 goto out;
2636 r = -EFAULT;
2f366987 2637 if (copy_to_user(argp, &tr, sizeof tr))
6aa8b732
AK
2638 goto out;
2639 r = 0;
2640 break;
2641 }
2642 case KVM_INTERRUPT: {
2643 struct kvm_interrupt irq;
2644
2645 r = -EFAULT;
2f366987 2646 if (copy_from_user(&irq, argp, sizeof irq))
6aa8b732 2647 goto out;
bccf2150 2648 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
6aa8b732
AK
2649 if (r)
2650 goto out;
2651 r = 0;
2652 break;
2653 }
2654 case KVM_DEBUG_GUEST: {
2655 struct kvm_debug_guest dbg;
2656
2657 r = -EFAULT;
2f366987 2658 if (copy_from_user(&dbg, argp, sizeof dbg))
6aa8b732 2659 goto out;
bccf2150 2660 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
6aa8b732
AK
2661 if (r)
2662 goto out;
2663 r = 0;
2664 break;
2665 }
bccf2150 2666 case KVM_GET_MSRS:
35f3f286 2667 r = msr_io(vcpu, argp, kvm_get_msr, 1);
bccf2150
AK
2668 break;
2669 case KVM_SET_MSRS:
2670 r = msr_io(vcpu, argp, do_set_msr, 0);
2671 break;
06465c5a
AK
2672 case KVM_SET_CPUID: {
2673 struct kvm_cpuid __user *cpuid_arg = argp;
2674 struct kvm_cpuid cpuid;
2675
2676 r = -EFAULT;
2677 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2678 goto out;
2679 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2680 if (r)
2681 goto out;
2682 break;
2683 }
1961d276
AK
2684 case KVM_SET_SIGNAL_MASK: {
2685 struct kvm_signal_mask __user *sigmask_arg = argp;
2686 struct kvm_signal_mask kvm_sigmask;
2687 sigset_t sigset, *p;
2688
2689 p = NULL;
2690 if (argp) {
2691 r = -EFAULT;
2692 if (copy_from_user(&kvm_sigmask, argp,
2693 sizeof kvm_sigmask))
2694 goto out;
2695 r = -EINVAL;
2696 if (kvm_sigmask.len != sizeof sigset)
2697 goto out;
2698 r = -EFAULT;
2699 if (copy_from_user(&sigset, sigmask_arg->sigset,
2700 sizeof sigset))
2701 goto out;
2702 p = &sigset;
2703 }
2704 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2705 break;
2706 }
b8836737
AK
2707 case KVM_GET_FPU: {
2708 struct kvm_fpu fpu;
2709
2710 memset(&fpu, 0, sizeof fpu);
2711 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
2712 if (r)
2713 goto out;
2714 r = -EFAULT;
2715 if (copy_to_user(argp, &fpu, sizeof fpu))
2716 goto out;
2717 r = 0;
2718 break;
2719 }
2720 case KVM_SET_FPU: {
2721 struct kvm_fpu fpu;
2722
2723 r = -EFAULT;
2724 if (copy_from_user(&fpu, argp, sizeof fpu))
2725 goto out;
2726 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
2727 if (r)
2728 goto out;
2729 r = 0;
2730 break;
2731 }
bccf2150
AK
2732 default:
2733 ;
2734 }
2735out:
2736 return r;
2737}
2738
2739static long kvm_vm_ioctl(struct file *filp,
2740 unsigned int ioctl, unsigned long arg)
2741{
2742 struct kvm *kvm = filp->private_data;
2743 void __user *argp = (void __user *)arg;
2744 int r = -EINVAL;
2745
2746 switch (ioctl) {
2747 case KVM_CREATE_VCPU:
2748 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2749 if (r < 0)
2750 goto out;
2751 break;
6aa8b732
AK
2752 case KVM_SET_MEMORY_REGION: {
2753 struct kvm_memory_region kvm_mem;
2754
2755 r = -EFAULT;
2f366987 2756 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
6aa8b732 2757 goto out;
2c6f5df9 2758 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
6aa8b732
AK
2759 if (r)
2760 goto out;
2761 break;
2762 }
2763 case KVM_GET_DIRTY_LOG: {
2764 struct kvm_dirty_log log;
2765
2766 r = -EFAULT;
2f366987 2767 if (copy_from_user(&log, argp, sizeof log))
6aa8b732 2768 goto out;
2c6f5df9 2769 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
6aa8b732
AK
2770 if (r)
2771 goto out;
2772 break;
2773 }
e8207547
AK
2774 case KVM_SET_MEMORY_ALIAS: {
2775 struct kvm_memory_alias alias;
2776
2777 r = -EFAULT;
2778 if (copy_from_user(&alias, argp, sizeof alias))
2779 goto out;
2780 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
2781 if (r)
2782 goto out;
2783 break;
2784 }
f17abe9a
AK
2785 default:
2786 ;
2787 }
2788out:
2789 return r;
2790}
2791
2792static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
2793 unsigned long address,
2794 int *type)
2795{
2796 struct kvm *kvm = vma->vm_file->private_data;
2797 unsigned long pgoff;
f17abe9a
AK
2798 struct page *page;
2799
f17abe9a 2800 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
954bbbc2 2801 page = gfn_to_page(kvm, pgoff);
f17abe9a
AK
2802 if (!page)
2803 return NOPAGE_SIGBUS;
2804 get_page(page);
cd0d9137
NAQ
2805 if (type != NULL)
2806 *type = VM_FAULT_MINOR;
2807
f17abe9a
AK
2808 return page;
2809}
2810
2811static struct vm_operations_struct kvm_vm_vm_ops = {
2812 .nopage = kvm_vm_nopage,
2813};
2814
2815static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2816{
2817 vma->vm_ops = &kvm_vm_vm_ops;
2818 return 0;
2819}
2820
2821static struct file_operations kvm_vm_fops = {
2822 .release = kvm_vm_release,
2823 .unlocked_ioctl = kvm_vm_ioctl,
2824 .compat_ioctl = kvm_vm_ioctl,
2825 .mmap = kvm_vm_mmap,
2826};
2827
2828static int kvm_dev_ioctl_create_vm(void)
2829{
2830 int fd, r;
2831 struct inode *inode;
2832 struct file *file;
2833 struct kvm *kvm;
2834
f17abe9a 2835 kvm = kvm_create_vm();
d6d28168
AK
2836 if (IS_ERR(kvm))
2837 return PTR_ERR(kvm);
2838 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
2839 if (r) {
2840 kvm_destroy_vm(kvm);
2841 return r;
f17abe9a
AK
2842 }
2843
bccf2150 2844 kvm->filp = file;
f17abe9a 2845
f17abe9a 2846 return fd;
f17abe9a
AK
2847}
2848
2849static long kvm_dev_ioctl(struct file *filp,
2850 unsigned int ioctl, unsigned long arg)
2851{
2852 void __user *argp = (void __user *)arg;
07c45a36 2853 long r = -EINVAL;
f17abe9a
AK
2854
2855 switch (ioctl) {
2856 case KVM_GET_API_VERSION:
f0fe5108
AK
2857 r = -EINVAL;
2858 if (arg)
2859 goto out;
f17abe9a
AK
2860 r = KVM_API_VERSION;
2861 break;
2862 case KVM_CREATE_VM:
f0fe5108
AK
2863 r = -EINVAL;
2864 if (arg)
2865 goto out;
f17abe9a
AK
2866 r = kvm_dev_ioctl_create_vm();
2867 break;
6aa8b732 2868 case KVM_GET_MSR_INDEX_LIST: {
2f366987 2869 struct kvm_msr_list __user *user_msr_list = argp;
6aa8b732
AK
2870 struct kvm_msr_list msr_list;
2871 unsigned n;
2872
2873 r = -EFAULT;
2874 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2875 goto out;
2876 n = msr_list.nmsrs;
6f00e68f 2877 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
6aa8b732
AK
2878 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2879 goto out;
2880 r = -E2BIG;
bf591b24 2881 if (n < num_msrs_to_save)
6aa8b732
AK
2882 goto out;
2883 r = -EFAULT;
2884 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
bf591b24 2885 num_msrs_to_save * sizeof(u32)))
6aa8b732 2886 goto out;
6f00e68f
AK
2887 if (copy_to_user(user_msr_list->indices
2888 + num_msrs_to_save * sizeof(u32),
2889 &emulated_msrs,
2890 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2891 goto out;
6aa8b732 2892 r = 0;
cc1d8955 2893 break;
6aa8b732 2894 }
5d308f45
AK
2895 case KVM_CHECK_EXTENSION:
2896 /*
2897 * No extensions defined at present.
2898 */
2899 r = 0;
2900 break;
07c45a36
AK
2901 case KVM_GET_VCPU_MMAP_SIZE:
2902 r = -EINVAL;
2903 if (arg)
2904 goto out;
039576c0 2905 r = 2 * PAGE_SIZE;
07c45a36 2906 break;
6aa8b732
AK
2907 default:
2908 ;
2909 }
2910out:
2911 return r;
2912}
2913
6aa8b732
AK
2914static struct file_operations kvm_chardev_ops = {
2915 .open = kvm_dev_open,
2916 .release = kvm_dev_release,
2917 .unlocked_ioctl = kvm_dev_ioctl,
2918 .compat_ioctl = kvm_dev_ioctl,
6aa8b732
AK
2919};
2920
2921static struct miscdevice kvm_dev = {
bbe4432e 2922 KVM_MINOR,
6aa8b732
AK
2923 "kvm",
2924 &kvm_chardev_ops,
2925};
2926
2927static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2928 void *v)
2929{
2930 if (val == SYS_RESTART) {
2931 /*
2932 * Some (well, at least mine) BIOSes hang on reboot if
2933 * in vmx root mode.
2934 */
2935 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
1b6c0168 2936 on_each_cpu(hardware_disable, NULL, 0, 1);
6aa8b732
AK
2937 }
2938 return NOTIFY_OK;
2939}
2940
2941static struct notifier_block kvm_reboot_notifier = {
2942 .notifier_call = kvm_reboot,
2943 .priority = 0,
2944};
2945
774c47f1
AK
2946/*
2947 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
2948 * cached on it.
2949 */
2950static void decache_vcpus_on_cpu(int cpu)
2951{
2952 struct kvm *vm;
2953 struct kvm_vcpu *vcpu;
2954 int i;
2955
2956 spin_lock(&kvm_lock);
2957 list_for_each_entry(vm, &vm_list, vm_list)
2958 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2959 vcpu = &vm->vcpus[i];
2960 /*
2961 * If the vcpu is locked, then it is running on some
2962 * other cpu and therefore it is not cached on the
2963 * cpu in question.
2964 *
2965 * If it's not locked, check the last cpu it executed
2966 * on.
2967 */
2968 if (mutex_trylock(&vcpu->mutex)) {
2969 if (vcpu->cpu == cpu) {
2970 kvm_arch_ops->vcpu_decache(vcpu);
2971 vcpu->cpu = -1;
2972 }
2973 mutex_unlock(&vcpu->mutex);
2974 }
2975 }
2976 spin_unlock(&kvm_lock);
2977}
2978
1b6c0168
AK
2979static void hardware_enable(void *junk)
2980{
2981 int cpu = raw_smp_processor_id();
2982
2983 if (cpu_isset(cpu, cpus_hardware_enabled))
2984 return;
2985 cpu_set(cpu, cpus_hardware_enabled);
2986 kvm_arch_ops->hardware_enable(NULL);
2987}
2988
2989static void hardware_disable(void *junk)
2990{
2991 int cpu = raw_smp_processor_id();
2992
2993 if (!cpu_isset(cpu, cpus_hardware_enabled))
2994 return;
2995 cpu_clear(cpu, cpus_hardware_enabled);
2996 decache_vcpus_on_cpu(cpu);
2997 kvm_arch_ops->hardware_disable(NULL);
2998}
2999
774c47f1
AK
3000static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
3001 void *v)
3002{
3003 int cpu = (long)v;
3004
3005 switch (val) {
cec9ad27
AK
3006 case CPU_DYING:
3007 case CPU_DYING_FROZEN:
6ec8a856
AK
3008 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3009 cpu);
3010 hardware_disable(NULL);
3011 break;
774c47f1 3012 case CPU_UP_CANCELED:
8bb78442 3013 case CPU_UP_CANCELED_FROZEN:
43934a38
JK
3014 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3015 cpu);
1b6c0168 3016 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
774c47f1 3017 break;
43934a38 3018 case CPU_ONLINE:
8bb78442 3019 case CPU_ONLINE_FROZEN:
43934a38
JK
3020 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
3021 cpu);
1b6c0168 3022 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
774c47f1
AK
3023 break;
3024 }
3025 return NOTIFY_OK;
3026}
3027
2eeb2e94
GH
3028void kvm_io_bus_init(struct kvm_io_bus *bus)
3029{
3030 memset(bus, 0, sizeof(*bus));
3031}
3032
3033void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3034{
3035 int i;
3036
3037 for (i = 0; i < bus->dev_count; i++) {
3038 struct kvm_io_device *pos = bus->devs[i];
3039
3040 kvm_iodevice_destructor(pos);
3041 }
3042}
3043
3044struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3045{
3046 int i;
3047
3048 for (i = 0; i < bus->dev_count; i++) {
3049 struct kvm_io_device *pos = bus->devs[i];
3050
3051 if (pos->in_range(pos, addr))
3052 return pos;
3053 }
3054
3055 return NULL;
3056}
3057
3058void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3059{
3060 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3061
3062 bus->devs[bus->dev_count++] = dev;
3063}
3064
774c47f1
AK
3065static struct notifier_block kvm_cpu_notifier = {
3066 .notifier_call = kvm_cpu_hotplug,
3067 .priority = 20, /* must be > scheduler priority */
3068};
3069
1165f5fe
AK
3070static u64 stat_get(void *_offset)
3071{
3072 unsigned offset = (long)_offset;
3073 u64 total = 0;
3074 struct kvm *kvm;
3075 struct kvm_vcpu *vcpu;
3076 int i;
3077
3078 spin_lock(&kvm_lock);
3079 list_for_each_entry(kvm, &vm_list, vm_list)
3080 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3081 vcpu = &kvm->vcpus[i];
3082 total += *(u32 *)((void *)vcpu + offset);
3083 }
3084 spin_unlock(&kvm_lock);
3085 return total;
3086}
3087
3088static void stat_set(void *offset, u64 val)
3089{
3090}
3091
3092DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, stat_set, "%llu\n");
3093
6aa8b732
AK
3094static __init void kvm_init_debug(void)
3095{
3096 struct kvm_stats_debugfs_item *p;
3097
8b6d44c7 3098 debugfs_dir = debugfs_create_dir("kvm", NULL);
6aa8b732 3099 for (p = debugfs_entries; p->name; ++p)
1165f5fe
AK
3100 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
3101 (void *)(long)p->offset,
3102 &stat_fops);
6aa8b732
AK
3103}
3104
3105static void kvm_exit_debug(void)
3106{
3107 struct kvm_stats_debugfs_item *p;
3108
3109 for (p = debugfs_entries; p->name; ++p)
3110 debugfs_remove(p->dentry);
3111 debugfs_remove(debugfs_dir);
3112}
3113
59ae6c6b
AK
3114static int kvm_suspend(struct sys_device *dev, pm_message_t state)
3115{
4267c41a 3116 hardware_disable(NULL);
59ae6c6b
AK
3117 return 0;
3118}
3119
3120static int kvm_resume(struct sys_device *dev)
3121{
4267c41a 3122 hardware_enable(NULL);
59ae6c6b
AK
3123 return 0;
3124}
3125
3126static struct sysdev_class kvm_sysdev_class = {
3127 set_kset_name("kvm"),
3128 .suspend = kvm_suspend,
3129 .resume = kvm_resume,
3130};
3131
3132static struct sys_device kvm_sysdev = {
3133 .id = 0,
3134 .cls = &kvm_sysdev_class,
3135};
3136
6aa8b732
AK
3137hpa_t bad_page_address;
3138
3139int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3140{
3141 int r;
3142
09db28b8
YI
3143 if (kvm_arch_ops) {
3144 printk(KERN_ERR "kvm: already loaded the other module\n");
3145 return -EEXIST;
3146 }
3147
e097f35c 3148 if (!ops->cpu_has_kvm_support()) {
6aa8b732
AK
3149 printk(KERN_ERR "kvm: no hardware support\n");
3150 return -EOPNOTSUPP;
3151 }
e097f35c 3152 if (ops->disabled_by_bios()) {
6aa8b732
AK
3153 printk(KERN_ERR "kvm: disabled by bios\n");
3154 return -EOPNOTSUPP;
3155 }
3156
e097f35c
YI
3157 kvm_arch_ops = ops;
3158
6aa8b732
AK
3159 r = kvm_arch_ops->hardware_setup();
3160 if (r < 0)
ca45aaae 3161 goto out;
6aa8b732 3162
1b6c0168 3163 on_each_cpu(hardware_enable, NULL, 0, 1);
774c47f1
AK
3164 r = register_cpu_notifier(&kvm_cpu_notifier);
3165 if (r)
3166 goto out_free_1;
6aa8b732
AK
3167 register_reboot_notifier(&kvm_reboot_notifier);
3168
59ae6c6b
AK
3169 r = sysdev_class_register(&kvm_sysdev_class);
3170 if (r)
3171 goto out_free_2;
3172
3173 r = sysdev_register(&kvm_sysdev);
3174 if (r)
3175 goto out_free_3;
3176
6aa8b732
AK
3177 kvm_chardev_ops.owner = module;
3178
3179 r = misc_register(&kvm_dev);
3180 if (r) {
3181 printk (KERN_ERR "kvm: misc device register failed\n");
3182 goto out_free;
3183 }
3184
3185 return r;
3186
3187out_free:
59ae6c6b
AK
3188 sysdev_unregister(&kvm_sysdev);
3189out_free_3:
3190 sysdev_class_unregister(&kvm_sysdev_class);
3191out_free_2:
6aa8b732 3192 unregister_reboot_notifier(&kvm_reboot_notifier);
774c47f1
AK
3193 unregister_cpu_notifier(&kvm_cpu_notifier);
3194out_free_1:
1b6c0168 3195 on_each_cpu(hardware_disable, NULL, 0, 1);
6aa8b732 3196 kvm_arch_ops->hardware_unsetup();
ca45aaae
AK
3197out:
3198 kvm_arch_ops = NULL;
6aa8b732
AK
3199 return r;
3200}
3201
3202void kvm_exit_arch(void)
3203{
3204 misc_deregister(&kvm_dev);
59ae6c6b
AK
3205 sysdev_unregister(&kvm_sysdev);
3206 sysdev_class_unregister(&kvm_sysdev_class);
6aa8b732 3207 unregister_reboot_notifier(&kvm_reboot_notifier);
59ae6c6b 3208 unregister_cpu_notifier(&kvm_cpu_notifier);
1b6c0168 3209 on_each_cpu(hardware_disable, NULL, 0, 1);
6aa8b732 3210 kvm_arch_ops->hardware_unsetup();
09db28b8 3211 kvm_arch_ops = NULL;
6aa8b732
AK
3212}
3213
3214static __init int kvm_init(void)
3215{
3216 static struct page *bad_page;
37e29d90
AK
3217 int r;
3218
b5a33a75
AK
3219 r = kvm_mmu_module_init();
3220 if (r)
3221 goto out4;
3222
6aa8b732
AK
3223 kvm_init_debug();
3224
bf591b24
MR
3225 kvm_init_msr_list();
3226
6aa8b732
AK
3227 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
3228 r = -ENOMEM;
3229 goto out;
3230 }
3231
3232 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
3233 memset(__va(bad_page_address), 0, PAGE_SIZE);
3234
58e690e6 3235 return 0;
6aa8b732
AK
3236
3237out:
3238 kvm_exit_debug();
b5a33a75
AK
3239 kvm_mmu_module_exit();
3240out4:
6aa8b732
AK
3241 return r;
3242}
3243
3244static __exit void kvm_exit(void)
3245{
3246 kvm_exit_debug();
3247 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
b5a33a75 3248 kvm_mmu_module_exit();
6aa8b732
AK
3249}
3250
3251module_init(kvm_init)
3252module_exit(kvm_exit)
3253
3254EXPORT_SYMBOL_GPL(kvm_init_arch);
3255EXPORT_SYMBOL_GPL(kvm_exit_arch);