KVM: MMU: Merge shadow level check in FNAME(fetch)
[linux-2.6-block.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
edf88417 17#include <linux/kvm_host.h>
5fb76f9b 18#include "segment_descriptor.h"
313a3dc7 19#include "irq.h"
1d737c8a 20#include "mmu.h"
313a3dc7
CO
21
22#include <linux/kvm.h>
23#include <linux/fs.h>
24#include <linux/vmalloc.h>
5fb76f9b 25#include <linux/module.h>
0de10343 26#include <linux/mman.h>
2bacc55c 27#include <linux/highmem.h>
043405e1
CO
28
29#include <asm/uaccess.h>
d825ed0a 30#include <asm/msr.h>
043405e1 31
313a3dc7 32#define MAX_IO_MSRS 256
a03490ed
CO
33#define CR0_RESERVED_BITS \
34 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
35 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
36 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
37#define CR4_RESERVED_BITS \
38 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
39 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
40 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
41 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
42
43#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
15c4a640 44#define EFER_RESERVED_BITS 0xfffffffffffff2fe
313a3dc7 45
ba1389b7
AK
46#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
47#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 48
97896d04
ZX
49struct kvm_x86_ops *kvm_x86_ops;
50
417bc304 51struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
52 { "pf_fixed", VCPU_STAT(pf_fixed) },
53 { "pf_guest", VCPU_STAT(pf_guest) },
54 { "tlb_flush", VCPU_STAT(tlb_flush) },
55 { "invlpg", VCPU_STAT(invlpg) },
56 { "exits", VCPU_STAT(exits) },
57 { "io_exits", VCPU_STAT(io_exits) },
58 { "mmio_exits", VCPU_STAT(mmio_exits) },
59 { "signal_exits", VCPU_STAT(signal_exits) },
60 { "irq_window", VCPU_STAT(irq_window_exits) },
61 { "halt_exits", VCPU_STAT(halt_exits) },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "request_irq", VCPU_STAT(request_irq_exits) },
64 { "irq_exits", VCPU_STAT(irq_exits) },
65 { "host_state_reload", VCPU_STAT(host_state_reload) },
66 { "efer_reload", VCPU_STAT(efer_reload) },
67 { "fpu_reload", VCPU_STAT(fpu_reload) },
68 { "insn_emulation", VCPU_STAT(insn_emulation) },
69 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
4cee5764
AK
70 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
71 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
72 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
73 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
74 { "mmu_flooded", VM_STAT(mmu_flooded) },
75 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 76 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
0f74a24c 77 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
417bc304
HB
78 { NULL }
79};
80
81
5fb76f9b
CO
82unsigned long segment_base(u16 selector)
83{
84 struct descriptor_table gdt;
85 struct segment_descriptor *d;
86 unsigned long table_base;
87 unsigned long v;
88
89 if (selector == 0)
90 return 0;
91
92 asm("sgdt %0" : "=m"(gdt));
93 table_base = gdt.base;
94
95 if (selector & 4) { /* from ldt */
96 u16 ldt_selector;
97
98 asm("sldt %0" : "=g"(ldt_selector));
99 table_base = segment_base(ldt_selector);
100 }
101 d = (struct segment_descriptor *)(table_base + (selector & ~7));
102 v = d->base_low | ((unsigned long)d->base_mid << 16) |
103 ((unsigned long)d->base_high << 24);
104#ifdef CONFIG_X86_64
105 if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
106 v |= ((unsigned long) \
107 ((struct segment_descriptor_64 *)d)->base_higher) << 32;
108#endif
109 return v;
110}
111EXPORT_SYMBOL_GPL(segment_base);
112
6866b83e
CO
113u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
114{
115 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 116 return vcpu->arch.apic_base;
6866b83e 117 else
ad312c7c 118 return vcpu->arch.apic_base;
6866b83e
CO
119}
120EXPORT_SYMBOL_GPL(kvm_get_apic_base);
121
122void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
123{
124 /* TODO: reserve bits check */
125 if (irqchip_in_kernel(vcpu->kvm))
126 kvm_lapic_set_base(vcpu, data);
127 else
ad312c7c 128 vcpu->arch.apic_base = data;
6866b83e
CO
129}
130EXPORT_SYMBOL_GPL(kvm_set_apic_base);
131
298101da
AK
132void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
133{
ad312c7c
ZX
134 WARN_ON(vcpu->arch.exception.pending);
135 vcpu->arch.exception.pending = true;
136 vcpu->arch.exception.has_error_code = false;
137 vcpu->arch.exception.nr = nr;
298101da
AK
138}
139EXPORT_SYMBOL_GPL(kvm_queue_exception);
140
c3c91fee
AK
141void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
142 u32 error_code)
143{
144 ++vcpu->stat.pf_guest;
ad312c7c 145 if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
c3c91fee
AK
146 printk(KERN_DEBUG "kvm: inject_page_fault:"
147 " double fault 0x%lx\n", addr);
ad312c7c
ZX
148 vcpu->arch.exception.nr = DF_VECTOR;
149 vcpu->arch.exception.error_code = 0;
c3c91fee
AK
150 return;
151 }
ad312c7c 152 vcpu->arch.cr2 = addr;
c3c91fee
AK
153 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
154}
155
298101da
AK
156void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
157{
ad312c7c
ZX
158 WARN_ON(vcpu->arch.exception.pending);
159 vcpu->arch.exception.pending = true;
160 vcpu->arch.exception.has_error_code = true;
161 vcpu->arch.exception.nr = nr;
162 vcpu->arch.exception.error_code = error_code;
298101da
AK
163}
164EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
165
166static void __queue_exception(struct kvm_vcpu *vcpu)
167{
ad312c7c
ZX
168 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
169 vcpu->arch.exception.has_error_code,
170 vcpu->arch.exception.error_code);
298101da
AK
171}
172
a03490ed
CO
173/*
174 * Load the pae pdptrs. Return true is they are all valid.
175 */
176int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
177{
178 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
179 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
180 int i;
181 int ret;
ad312c7c 182 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 183
10589a46 184 down_read(&current->mm->mmap_sem);
a03490ed
CO
185 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
186 offset * sizeof(u64), sizeof(pdpte));
187 if (ret < 0) {
188 ret = 0;
189 goto out;
190 }
191 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
192 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
193 ret = 0;
194 goto out;
195 }
196 }
197 ret = 1;
198
ad312c7c 199 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 200out:
10589a46 201 up_read(&current->mm->mmap_sem);
a03490ed
CO
202
203 return ret;
204}
205
d835dfec
AK
206static bool pdptrs_changed(struct kvm_vcpu *vcpu)
207{
ad312c7c 208 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
209 bool changed = true;
210 int r;
211
212 if (is_long_mode(vcpu) || !is_pae(vcpu))
213 return false;
214
10589a46 215 down_read(&current->mm->mmap_sem);
ad312c7c 216 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
217 if (r < 0)
218 goto out;
ad312c7c 219 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 220out:
10589a46 221 up_read(&current->mm->mmap_sem);
d835dfec
AK
222
223 return changed;
224}
225
a03490ed
CO
226void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
227{
228 if (cr0 & CR0_RESERVED_BITS) {
229 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 230 cr0, vcpu->arch.cr0);
c1a5d4f9 231 kvm_inject_gp(vcpu, 0);
a03490ed
CO
232 return;
233 }
234
235 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
236 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 237 kvm_inject_gp(vcpu, 0);
a03490ed
CO
238 return;
239 }
240
241 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
242 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
243 "and a clear PE flag\n");
c1a5d4f9 244 kvm_inject_gp(vcpu, 0);
a03490ed
CO
245 return;
246 }
247
248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
249#ifdef CONFIG_X86_64
ad312c7c 250 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
251 int cs_db, cs_l;
252
253 if (!is_pae(vcpu)) {
254 printk(KERN_DEBUG "set_cr0: #GP, start paging "
255 "in long mode while PAE is disabled\n");
c1a5d4f9 256 kvm_inject_gp(vcpu, 0);
a03490ed
CO
257 return;
258 }
259 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
260 if (cs_l) {
261 printk(KERN_DEBUG "set_cr0: #GP, start paging "
262 "in long mode while CS.L == 1\n");
c1a5d4f9 263 kvm_inject_gp(vcpu, 0);
a03490ed
CO
264 return;
265
266 }
267 } else
268#endif
ad312c7c 269 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
270 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
271 "reserved bits\n");
c1a5d4f9 272 kvm_inject_gp(vcpu, 0);
a03490ed
CO
273 return;
274 }
275
276 }
277
278 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 279 vcpu->arch.cr0 = cr0;
a03490ed 280
a03490ed 281 kvm_mmu_reset_context(vcpu);
a03490ed
CO
282 return;
283}
284EXPORT_SYMBOL_GPL(set_cr0);
285
286void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
287{
ad312c7c 288 set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
a03490ed
CO
289}
290EXPORT_SYMBOL_GPL(lmsw);
291
292void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
293{
294 if (cr4 & CR4_RESERVED_BITS) {
295 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 296 kvm_inject_gp(vcpu, 0);
a03490ed
CO
297 return;
298 }
299
300 if (is_long_mode(vcpu)) {
301 if (!(cr4 & X86_CR4_PAE)) {
302 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
303 "in long mode\n");
c1a5d4f9 304 kvm_inject_gp(vcpu, 0);
a03490ed
CO
305 return;
306 }
307 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
ad312c7c 308 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 309 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 310 kvm_inject_gp(vcpu, 0);
a03490ed
CO
311 return;
312 }
313
314 if (cr4 & X86_CR4_VMXE) {
315 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 316 kvm_inject_gp(vcpu, 0);
a03490ed
CO
317 return;
318 }
319 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 320 vcpu->arch.cr4 = cr4;
a03490ed 321 kvm_mmu_reset_context(vcpu);
a03490ed
CO
322}
323EXPORT_SYMBOL_GPL(set_cr4);
324
325void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
326{
ad312c7c 327 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
d835dfec
AK
328 kvm_mmu_flush_tlb(vcpu);
329 return;
330 }
331
a03490ed
CO
332 if (is_long_mode(vcpu)) {
333 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
334 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 335 kvm_inject_gp(vcpu, 0);
a03490ed
CO
336 return;
337 }
338 } else {
339 if (is_pae(vcpu)) {
340 if (cr3 & CR3_PAE_RESERVED_BITS) {
341 printk(KERN_DEBUG
342 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 343 kvm_inject_gp(vcpu, 0);
a03490ed
CO
344 return;
345 }
346 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
347 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
348 "reserved bits\n");
c1a5d4f9 349 kvm_inject_gp(vcpu, 0);
a03490ed
CO
350 return;
351 }
352 }
353 /*
354 * We don't check reserved bits in nonpae mode, because
355 * this isn't enforced, and VMware depends on this.
356 */
357 }
358
10589a46 359 down_read(&current->mm->mmap_sem);
a03490ed
CO
360 /*
361 * Does the new cr3 value map to physical memory? (Note, we
362 * catch an invalid cr3 even in real-mode, because it would
363 * cause trouble later on when we turn on paging anyway.)
364 *
365 * A real CPU would silently accept an invalid cr3 and would
366 * attempt to use it - with largely undefined (and often hard
367 * to debug) behavior on the guest side.
368 */
369 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 370 kvm_inject_gp(vcpu, 0);
a03490ed 371 else {
ad312c7c
ZX
372 vcpu->arch.cr3 = cr3;
373 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 374 }
10589a46 375 up_read(&current->mm->mmap_sem);
a03490ed
CO
376}
377EXPORT_SYMBOL_GPL(set_cr3);
378
379void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
380{
381 if (cr8 & CR8_RESERVED_BITS) {
382 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 383 kvm_inject_gp(vcpu, 0);
a03490ed
CO
384 return;
385 }
386 if (irqchip_in_kernel(vcpu->kvm))
387 kvm_lapic_set_tpr(vcpu, cr8);
388 else
ad312c7c 389 vcpu->arch.cr8 = cr8;
a03490ed
CO
390}
391EXPORT_SYMBOL_GPL(set_cr8);
392
393unsigned long get_cr8(struct kvm_vcpu *vcpu)
394{
395 if (irqchip_in_kernel(vcpu->kvm))
396 return kvm_lapic_get_cr8(vcpu);
397 else
ad312c7c 398 return vcpu->arch.cr8;
a03490ed
CO
399}
400EXPORT_SYMBOL_GPL(get_cr8);
401
043405e1
CO
402/*
403 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
404 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
405 *
406 * This list is modified at module load time to reflect the
407 * capabilities of the host cpu.
408 */
409static u32 msrs_to_save[] = {
410 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
411 MSR_K6_STAR,
412#ifdef CONFIG_X86_64
413 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
414#endif
415 MSR_IA32_TIME_STAMP_COUNTER,
416};
417
418static unsigned num_msrs_to_save;
419
420static u32 emulated_msrs[] = {
421 MSR_IA32_MISC_ENABLE,
422};
423
15c4a640
CO
424#ifdef CONFIG_X86_64
425
426static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
427{
428 if (efer & EFER_RESERVED_BITS) {
429 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
430 efer);
c1a5d4f9 431 kvm_inject_gp(vcpu, 0);
15c4a640
CO
432 return;
433 }
434
435 if (is_paging(vcpu)
ad312c7c 436 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 437 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 438 kvm_inject_gp(vcpu, 0);
15c4a640
CO
439 return;
440 }
441
442 kvm_x86_ops->set_efer(vcpu, efer);
443
444 efer &= ~EFER_LMA;
ad312c7c 445 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 446
ad312c7c 447 vcpu->arch.shadow_efer = efer;
15c4a640
CO
448}
449
450#endif
451
452/*
453 * Writes msr value into into the appropriate "register".
454 * Returns 0 on success, non-0 otherwise.
455 * Assumes vcpu_load() was already called.
456 */
457int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
458{
459 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
460}
461
313a3dc7
CO
462/*
463 * Adapt set_msr() to msr_io()'s calling convention
464 */
465static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
466{
467 return kvm_set_msr(vcpu, index, *data);
468}
469
15c4a640
CO
470
471int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
472{
473 switch (msr) {
474#ifdef CONFIG_X86_64
475 case MSR_EFER:
476 set_efer(vcpu, data);
477 break;
478#endif
479 case MSR_IA32_MC0_STATUS:
480 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
481 __FUNCTION__, data);
482 break;
483 case MSR_IA32_MCG_STATUS:
484 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
485 __FUNCTION__, data);
486 break;
487 case MSR_IA32_UCODE_REV:
488 case MSR_IA32_UCODE_WRITE:
489 case 0x200 ... 0x2ff: /* MTRRs */
490 break;
491 case MSR_IA32_APICBASE:
492 kvm_set_apic_base(vcpu, data);
493 break;
494 case MSR_IA32_MISC_ENABLE:
ad312c7c 495 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640
CO
496 break;
497 default:
565f1fbd 498 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
499 return 1;
500 }
501 return 0;
502}
503EXPORT_SYMBOL_GPL(kvm_set_msr_common);
504
505
506/*
507 * Reads an msr value (of 'msr_index') into 'pdata'.
508 * Returns 0 on success, non-0 otherwise.
509 * Assumes vcpu_load() was already called.
510 */
511int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
512{
513 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
514}
515
516int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
517{
518 u64 data;
519
520 switch (msr) {
521 case 0xc0010010: /* SYSCFG */
522 case 0xc0010015: /* HWCR */
523 case MSR_IA32_PLATFORM_ID:
524 case MSR_IA32_P5_MC_ADDR:
525 case MSR_IA32_P5_MC_TYPE:
526 case MSR_IA32_MC0_CTL:
527 case MSR_IA32_MCG_STATUS:
528 case MSR_IA32_MCG_CAP:
529 case MSR_IA32_MC0_MISC:
530 case MSR_IA32_MC0_MISC+4:
531 case MSR_IA32_MC0_MISC+8:
532 case MSR_IA32_MC0_MISC+12:
533 case MSR_IA32_MC0_MISC+16:
534 case MSR_IA32_UCODE_REV:
535 case MSR_IA32_PERF_STATUS:
536 case MSR_IA32_EBL_CR_POWERON:
537 /* MTRR registers */
538 case 0xfe:
539 case 0x200 ... 0x2ff:
540 data = 0;
541 break;
542 case 0xcd: /* fsb frequency */
543 data = 3;
544 break;
545 case MSR_IA32_APICBASE:
546 data = kvm_get_apic_base(vcpu);
547 break;
548 case MSR_IA32_MISC_ENABLE:
ad312c7c 549 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640
CO
550 break;
551#ifdef CONFIG_X86_64
552 case MSR_EFER:
ad312c7c 553 data = vcpu->arch.shadow_efer;
15c4a640
CO
554 break;
555#endif
556 default:
557 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
558 return 1;
559 }
560 *pdata = data;
561 return 0;
562}
563EXPORT_SYMBOL_GPL(kvm_get_msr_common);
564
313a3dc7
CO
565/*
566 * Read or write a bunch of msrs. All parameters are kernel addresses.
567 *
568 * @return number of msrs set successfully.
569 */
570static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
571 struct kvm_msr_entry *entries,
572 int (*do_msr)(struct kvm_vcpu *vcpu,
573 unsigned index, u64 *data))
574{
575 int i;
576
577 vcpu_load(vcpu);
578
579 for (i = 0; i < msrs->nmsrs; ++i)
580 if (do_msr(vcpu, entries[i].index, &entries[i].data))
581 break;
582
583 vcpu_put(vcpu);
584
585 return i;
586}
587
588/*
589 * Read or write a bunch of msrs. Parameters are user addresses.
590 *
591 * @return number of msrs set successfully.
592 */
593static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
594 int (*do_msr)(struct kvm_vcpu *vcpu,
595 unsigned index, u64 *data),
596 int writeback)
597{
598 struct kvm_msrs msrs;
599 struct kvm_msr_entry *entries;
600 int r, n;
601 unsigned size;
602
603 r = -EFAULT;
604 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
605 goto out;
606
607 r = -E2BIG;
608 if (msrs.nmsrs >= MAX_IO_MSRS)
609 goto out;
610
611 r = -ENOMEM;
612 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
613 entries = vmalloc(size);
614 if (!entries)
615 goto out;
616
617 r = -EFAULT;
618 if (copy_from_user(entries, user_msrs->entries, size))
619 goto out_free;
620
621 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
622 if (r < 0)
623 goto out_free;
624
625 r = -EFAULT;
626 if (writeback && copy_to_user(user_msrs->entries, entries, size))
627 goto out_free;
628
629 r = n;
630
631out_free:
632 vfree(entries);
633out:
634 return r;
635}
636
e9b11c17
ZX
637/*
638 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
639 * cached on it.
640 */
641void decache_vcpus_on_cpu(int cpu)
642{
643 struct kvm *vm;
644 struct kvm_vcpu *vcpu;
645 int i;
646
647 spin_lock(&kvm_lock);
648 list_for_each_entry(vm, &vm_list, vm_list)
649 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
650 vcpu = vm->vcpus[i];
651 if (!vcpu)
652 continue;
653 /*
654 * If the vcpu is locked, then it is running on some
655 * other cpu and therefore it is not cached on the
656 * cpu in question.
657 *
658 * If it's not locked, check the last cpu it executed
659 * on.
660 */
661 if (mutex_trylock(&vcpu->mutex)) {
662 if (vcpu->cpu == cpu) {
663 kvm_x86_ops->vcpu_decache(vcpu);
664 vcpu->cpu = -1;
665 }
666 mutex_unlock(&vcpu->mutex);
667 }
668 }
669 spin_unlock(&kvm_lock);
670}
671
018d00d2
ZX
672int kvm_dev_ioctl_check_extension(long ext)
673{
674 int r;
675
676 switch (ext) {
677 case KVM_CAP_IRQCHIP:
678 case KVM_CAP_HLT:
679 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
680 case KVM_CAP_USER_MEMORY:
681 case KVM_CAP_SET_TSS_ADDR:
07716717 682 case KVM_CAP_EXT_CPUID:
018d00d2
ZX
683 r = 1;
684 break;
774ead3a
AK
685 case KVM_CAP_VAPIC:
686 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
687 break;
018d00d2
ZX
688 default:
689 r = 0;
690 break;
691 }
692 return r;
693
694}
695
043405e1
CO
696long kvm_arch_dev_ioctl(struct file *filp,
697 unsigned int ioctl, unsigned long arg)
698{
699 void __user *argp = (void __user *)arg;
700 long r;
701
702 switch (ioctl) {
703 case KVM_GET_MSR_INDEX_LIST: {
704 struct kvm_msr_list __user *user_msr_list = argp;
705 struct kvm_msr_list msr_list;
706 unsigned n;
707
708 r = -EFAULT;
709 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
710 goto out;
711 n = msr_list.nmsrs;
712 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
713 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
714 goto out;
715 r = -E2BIG;
716 if (n < num_msrs_to_save)
717 goto out;
718 r = -EFAULT;
719 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
720 num_msrs_to_save * sizeof(u32)))
721 goto out;
722 if (copy_to_user(user_msr_list->indices
723 + num_msrs_to_save * sizeof(u32),
724 &emulated_msrs,
725 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
726 goto out;
727 r = 0;
728 break;
729 }
730 default:
731 r = -EINVAL;
732 }
733out:
734 return r;
735}
736
313a3dc7
CO
737void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
738{
739 kvm_x86_ops->vcpu_load(vcpu, cpu);
740}
741
742void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
743{
744 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 745 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
746}
747
07716717 748static int is_efer_nx(void)
313a3dc7
CO
749{
750 u64 efer;
313a3dc7
CO
751
752 rdmsrl(MSR_EFER, efer);
07716717
DK
753 return efer & EFER_NX;
754}
755
756static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
757{
758 int i;
759 struct kvm_cpuid_entry2 *e, *entry;
760
313a3dc7 761 entry = NULL;
ad312c7c
ZX
762 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
763 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
764 if (e->function == 0x80000001) {
765 entry = e;
766 break;
767 }
768 }
07716717 769 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
770 entry->edx &= ~(1 << 20);
771 printk(KERN_INFO "kvm: guest NX capability removed\n");
772 }
773}
774
07716717 775/* when an old userspace process fills a new kernel module */
313a3dc7
CO
776static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
777 struct kvm_cpuid *cpuid,
778 struct kvm_cpuid_entry __user *entries)
07716717
DK
779{
780 int r, i;
781 struct kvm_cpuid_entry *cpuid_entries;
782
783 r = -E2BIG;
784 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
785 goto out;
786 r = -ENOMEM;
787 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
788 if (!cpuid_entries)
789 goto out;
790 r = -EFAULT;
791 if (copy_from_user(cpuid_entries, entries,
792 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
793 goto out_free;
794 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
795 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
796 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
797 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
798 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
799 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
800 vcpu->arch.cpuid_entries[i].index = 0;
801 vcpu->arch.cpuid_entries[i].flags = 0;
802 vcpu->arch.cpuid_entries[i].padding[0] = 0;
803 vcpu->arch.cpuid_entries[i].padding[1] = 0;
804 vcpu->arch.cpuid_entries[i].padding[2] = 0;
805 }
806 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
807 cpuid_fix_nx_cap(vcpu);
808 r = 0;
809
810out_free:
811 vfree(cpuid_entries);
812out:
813 return r;
814}
815
816static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
817 struct kvm_cpuid2 *cpuid,
818 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
819{
820 int r;
821
822 r = -E2BIG;
823 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
824 goto out;
825 r = -EFAULT;
ad312c7c 826 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 827 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 828 goto out;
ad312c7c 829 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
830 return 0;
831
832out:
833 return r;
834}
835
07716717
DK
836static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
837 struct kvm_cpuid2 *cpuid,
838 struct kvm_cpuid_entry2 __user *entries)
839{
840 int r;
841
842 r = -E2BIG;
ad312c7c 843 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
844 goto out;
845 r = -EFAULT;
ad312c7c
ZX
846 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
847 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
848 goto out;
849 return 0;
850
851out:
ad312c7c 852 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
853 return r;
854}
855
856static inline u32 bit(int bitno)
857{
858 return 1 << (bitno & 31);
859}
860
861static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
862 u32 index)
863{
864 entry->function = function;
865 entry->index = index;
866 cpuid_count(entry->function, entry->index,
867 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
868 entry->flags = 0;
869}
870
871static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
872 u32 index, int *nent, int maxnent)
873{
874 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
875 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
876 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
877 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
878 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
879 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
880 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
881 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
882 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
883 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
884 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
885 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
886 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
887 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
888 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
889 bit(X86_FEATURE_PGE) |
890 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
891 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
892 bit(X86_FEATURE_SYSCALL) |
893 (bit(X86_FEATURE_NX) && is_efer_nx()) |
894#ifdef CONFIG_X86_64
895 bit(X86_FEATURE_LM) |
896#endif
897 bit(X86_FEATURE_MMXEXT) |
898 bit(X86_FEATURE_3DNOWEXT) |
899 bit(X86_FEATURE_3DNOW);
900 const u32 kvm_supported_word3_x86_features =
901 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
902 const u32 kvm_supported_word6_x86_features =
903 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
904
905 /* all func 2 cpuid_count() should be called on the same cpu */
906 get_cpu();
907 do_cpuid_1_ent(entry, function, index);
908 ++*nent;
909
910 switch (function) {
911 case 0:
912 entry->eax = min(entry->eax, (u32)0xb);
913 break;
914 case 1:
915 entry->edx &= kvm_supported_word0_x86_features;
916 entry->ecx &= kvm_supported_word3_x86_features;
917 break;
918 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
919 * may return different values. This forces us to get_cpu() before
920 * issuing the first command, and also to emulate this annoying behavior
921 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
922 case 2: {
923 int t, times = entry->eax & 0xff;
924
925 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
926 for (t = 1; t < times && *nent < maxnent; ++t) {
927 do_cpuid_1_ent(&entry[t], function, 0);
928 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
929 ++*nent;
930 }
931 break;
932 }
933 /* function 4 and 0xb have additional index. */
934 case 4: {
935 int index, cache_type;
936
937 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
938 /* read more entries until cache_type is zero */
939 for (index = 1; *nent < maxnent; ++index) {
940 cache_type = entry[index - 1].eax & 0x1f;
941 if (!cache_type)
942 break;
943 do_cpuid_1_ent(&entry[index], function, index);
944 entry[index].flags |=
945 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
946 ++*nent;
947 }
948 break;
949 }
950 case 0xb: {
951 int index, level_type;
952
953 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
954 /* read more entries until level_type is zero */
955 for (index = 1; *nent < maxnent; ++index) {
956 level_type = entry[index - 1].ecx & 0xff;
957 if (!level_type)
958 break;
959 do_cpuid_1_ent(&entry[index], function, index);
960 entry[index].flags |=
961 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
962 ++*nent;
963 }
964 break;
965 }
966 case 0x80000000:
967 entry->eax = min(entry->eax, 0x8000001a);
968 break;
969 case 0x80000001:
970 entry->edx &= kvm_supported_word1_x86_features;
971 entry->ecx &= kvm_supported_word6_x86_features;
972 break;
973 }
974 put_cpu();
975}
976
977static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
978 struct kvm_cpuid2 *cpuid,
979 struct kvm_cpuid_entry2 __user *entries)
980{
981 struct kvm_cpuid_entry2 *cpuid_entries;
982 int limit, nent = 0, r = -E2BIG;
983 u32 func;
984
985 if (cpuid->nent < 1)
986 goto out;
987 r = -ENOMEM;
988 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
989 if (!cpuid_entries)
990 goto out;
991
992 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
993 limit = cpuid_entries[0].eax;
994 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
995 do_cpuid_ent(&cpuid_entries[nent], func, 0,
996 &nent, cpuid->nent);
997 r = -E2BIG;
998 if (nent >= cpuid->nent)
999 goto out_free;
1000
1001 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1002 limit = cpuid_entries[nent - 1].eax;
1003 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1004 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1005 &nent, cpuid->nent);
1006 r = -EFAULT;
1007 if (copy_to_user(entries, cpuid_entries,
1008 nent * sizeof(struct kvm_cpuid_entry2)))
1009 goto out_free;
1010 cpuid->nent = nent;
1011 r = 0;
1012
1013out_free:
1014 vfree(cpuid_entries);
1015out:
1016 return r;
1017}
1018
313a3dc7
CO
1019static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1020 struct kvm_lapic_state *s)
1021{
1022 vcpu_load(vcpu);
ad312c7c 1023 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1024 vcpu_put(vcpu);
1025
1026 return 0;
1027}
1028
1029static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1030 struct kvm_lapic_state *s)
1031{
1032 vcpu_load(vcpu);
ad312c7c 1033 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1034 kvm_apic_post_state_restore(vcpu);
1035 vcpu_put(vcpu);
1036
1037 return 0;
1038}
1039
f77bc6a4
ZX
1040static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1041 struct kvm_interrupt *irq)
1042{
1043 if (irq->irq < 0 || irq->irq >= 256)
1044 return -EINVAL;
1045 if (irqchip_in_kernel(vcpu->kvm))
1046 return -ENXIO;
1047 vcpu_load(vcpu);
1048
ad312c7c
ZX
1049 set_bit(irq->irq, vcpu->arch.irq_pending);
1050 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1051
1052 vcpu_put(vcpu);
1053
1054 return 0;
1055}
1056
b209749f
AK
1057static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1058 struct kvm_tpr_access_ctl *tac)
1059{
1060 if (tac->flags)
1061 return -EINVAL;
1062 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1063 return 0;
1064}
1065
313a3dc7
CO
1066long kvm_arch_vcpu_ioctl(struct file *filp,
1067 unsigned int ioctl, unsigned long arg)
1068{
1069 struct kvm_vcpu *vcpu = filp->private_data;
1070 void __user *argp = (void __user *)arg;
1071 int r;
1072
1073 switch (ioctl) {
1074 case KVM_GET_LAPIC: {
1075 struct kvm_lapic_state lapic;
1076
1077 memset(&lapic, 0, sizeof lapic);
1078 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1079 if (r)
1080 goto out;
1081 r = -EFAULT;
1082 if (copy_to_user(argp, &lapic, sizeof lapic))
1083 goto out;
1084 r = 0;
1085 break;
1086 }
1087 case KVM_SET_LAPIC: {
1088 struct kvm_lapic_state lapic;
1089
1090 r = -EFAULT;
1091 if (copy_from_user(&lapic, argp, sizeof lapic))
1092 goto out;
1093 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1094 if (r)
1095 goto out;
1096 r = 0;
1097 break;
1098 }
f77bc6a4
ZX
1099 case KVM_INTERRUPT: {
1100 struct kvm_interrupt irq;
1101
1102 r = -EFAULT;
1103 if (copy_from_user(&irq, argp, sizeof irq))
1104 goto out;
1105 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1106 if (r)
1107 goto out;
1108 r = 0;
1109 break;
1110 }
313a3dc7
CO
1111 case KVM_SET_CPUID: {
1112 struct kvm_cpuid __user *cpuid_arg = argp;
1113 struct kvm_cpuid cpuid;
1114
1115 r = -EFAULT;
1116 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1117 goto out;
1118 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1119 if (r)
1120 goto out;
1121 break;
1122 }
07716717
DK
1123 case KVM_SET_CPUID2: {
1124 struct kvm_cpuid2 __user *cpuid_arg = argp;
1125 struct kvm_cpuid2 cpuid;
1126
1127 r = -EFAULT;
1128 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1129 goto out;
1130 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1131 cpuid_arg->entries);
1132 if (r)
1133 goto out;
1134 break;
1135 }
1136 case KVM_GET_CPUID2: {
1137 struct kvm_cpuid2 __user *cpuid_arg = argp;
1138 struct kvm_cpuid2 cpuid;
1139
1140 r = -EFAULT;
1141 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1142 goto out;
1143 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1144 cpuid_arg->entries);
1145 if (r)
1146 goto out;
1147 r = -EFAULT;
1148 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1149 goto out;
1150 r = 0;
1151 break;
1152 }
313a3dc7
CO
1153 case KVM_GET_MSRS:
1154 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1155 break;
1156 case KVM_SET_MSRS:
1157 r = msr_io(vcpu, argp, do_set_msr, 0);
1158 break;
b209749f
AK
1159 case KVM_TPR_ACCESS_REPORTING: {
1160 struct kvm_tpr_access_ctl tac;
1161
1162 r = -EFAULT;
1163 if (copy_from_user(&tac, argp, sizeof tac))
1164 goto out;
1165 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1166 if (r)
1167 goto out;
1168 r = -EFAULT;
1169 if (copy_to_user(argp, &tac, sizeof tac))
1170 goto out;
1171 r = 0;
1172 break;
1173 };
b93463aa
AK
1174 case KVM_SET_VAPIC_ADDR: {
1175 struct kvm_vapic_addr va;
1176
1177 r = -EINVAL;
1178 if (!irqchip_in_kernel(vcpu->kvm))
1179 goto out;
1180 r = -EFAULT;
1181 if (copy_from_user(&va, argp, sizeof va))
1182 goto out;
1183 r = 0;
1184 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1185 break;
1186 }
313a3dc7
CO
1187 default:
1188 r = -EINVAL;
1189 }
1190out:
1191 return r;
1192}
1193
1fe779f8
CO
1194static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1195{
1196 int ret;
1197
1198 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1199 return -1;
1200 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1201 return ret;
1202}
1203
1204static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1205 u32 kvm_nr_mmu_pages)
1206{
1207 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1208 return -EINVAL;
1209
10589a46 1210 down_write(&current->mm->mmap_sem);
1fe779f8
CO
1211
1212 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1213 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1214
10589a46 1215 up_write(&current->mm->mmap_sem);
1fe779f8
CO
1216 return 0;
1217}
1218
1219static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1220{
f05e70ac 1221 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1222}
1223
e9f85cde
ZX
1224gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1225{
1226 int i;
1227 struct kvm_mem_alias *alias;
1228
d69fb81f
ZX
1229 for (i = 0; i < kvm->arch.naliases; ++i) {
1230 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1231 if (gfn >= alias->base_gfn
1232 && gfn < alias->base_gfn + alias->npages)
1233 return alias->target_gfn + gfn - alias->base_gfn;
1234 }
1235 return gfn;
1236}
1237
1fe779f8
CO
1238/*
1239 * Set a new alias region. Aliases map a portion of physical memory into
1240 * another portion. This is useful for memory windows, for example the PC
1241 * VGA region.
1242 */
1243static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1244 struct kvm_memory_alias *alias)
1245{
1246 int r, n;
1247 struct kvm_mem_alias *p;
1248
1249 r = -EINVAL;
1250 /* General sanity checks */
1251 if (alias->memory_size & (PAGE_SIZE - 1))
1252 goto out;
1253 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1254 goto out;
1255 if (alias->slot >= KVM_ALIAS_SLOTS)
1256 goto out;
1257 if (alias->guest_phys_addr + alias->memory_size
1258 < alias->guest_phys_addr)
1259 goto out;
1260 if (alias->target_phys_addr + alias->memory_size
1261 < alias->target_phys_addr)
1262 goto out;
1263
10589a46 1264 down_write(&current->mm->mmap_sem);
1fe779f8 1265
d69fb81f 1266 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1267 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1268 p->npages = alias->memory_size >> PAGE_SHIFT;
1269 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1270
1271 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1272 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1273 break;
d69fb81f 1274 kvm->arch.naliases = n;
1fe779f8
CO
1275
1276 kvm_mmu_zap_all(kvm);
1277
10589a46 1278 up_write(&current->mm->mmap_sem);
1fe779f8
CO
1279
1280 return 0;
1281
1282out:
1283 return r;
1284}
1285
1286static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1287{
1288 int r;
1289
1290 r = 0;
1291 switch (chip->chip_id) {
1292 case KVM_IRQCHIP_PIC_MASTER:
1293 memcpy(&chip->chip.pic,
1294 &pic_irqchip(kvm)->pics[0],
1295 sizeof(struct kvm_pic_state));
1296 break;
1297 case KVM_IRQCHIP_PIC_SLAVE:
1298 memcpy(&chip->chip.pic,
1299 &pic_irqchip(kvm)->pics[1],
1300 sizeof(struct kvm_pic_state));
1301 break;
1302 case KVM_IRQCHIP_IOAPIC:
1303 memcpy(&chip->chip.ioapic,
1304 ioapic_irqchip(kvm),
1305 sizeof(struct kvm_ioapic_state));
1306 break;
1307 default:
1308 r = -EINVAL;
1309 break;
1310 }
1311 return r;
1312}
1313
1314static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1315{
1316 int r;
1317
1318 r = 0;
1319 switch (chip->chip_id) {
1320 case KVM_IRQCHIP_PIC_MASTER:
1321 memcpy(&pic_irqchip(kvm)->pics[0],
1322 &chip->chip.pic,
1323 sizeof(struct kvm_pic_state));
1324 break;
1325 case KVM_IRQCHIP_PIC_SLAVE:
1326 memcpy(&pic_irqchip(kvm)->pics[1],
1327 &chip->chip.pic,
1328 sizeof(struct kvm_pic_state));
1329 break;
1330 case KVM_IRQCHIP_IOAPIC:
1331 memcpy(ioapic_irqchip(kvm),
1332 &chip->chip.ioapic,
1333 sizeof(struct kvm_ioapic_state));
1334 break;
1335 default:
1336 r = -EINVAL;
1337 break;
1338 }
1339 kvm_pic_update_irq(pic_irqchip(kvm));
1340 return r;
1341}
1342
5bb064dc
ZX
1343/*
1344 * Get (and clear) the dirty memory log for a memory slot.
1345 */
1346int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1347 struct kvm_dirty_log *log)
1348{
1349 int r;
1350 int n;
1351 struct kvm_memory_slot *memslot;
1352 int is_dirty = 0;
1353
10589a46 1354 down_write(&current->mm->mmap_sem);
5bb064dc
ZX
1355
1356 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1357 if (r)
1358 goto out;
1359
1360 /* If nothing is dirty, don't bother messing with page tables. */
1361 if (is_dirty) {
1362 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1363 kvm_flush_remote_tlbs(kvm);
1364 memslot = &kvm->memslots[log->slot];
1365 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1366 memset(memslot->dirty_bitmap, 0, n);
1367 }
1368 r = 0;
1369out:
10589a46 1370 up_write(&current->mm->mmap_sem);
5bb064dc
ZX
1371 return r;
1372}
1373
1fe779f8
CO
1374long kvm_arch_vm_ioctl(struct file *filp,
1375 unsigned int ioctl, unsigned long arg)
1376{
1377 struct kvm *kvm = filp->private_data;
1378 void __user *argp = (void __user *)arg;
1379 int r = -EINVAL;
1380
1381 switch (ioctl) {
1382 case KVM_SET_TSS_ADDR:
1383 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1384 if (r < 0)
1385 goto out;
1386 break;
1387 case KVM_SET_MEMORY_REGION: {
1388 struct kvm_memory_region kvm_mem;
1389 struct kvm_userspace_memory_region kvm_userspace_mem;
1390
1391 r = -EFAULT;
1392 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1393 goto out;
1394 kvm_userspace_mem.slot = kvm_mem.slot;
1395 kvm_userspace_mem.flags = kvm_mem.flags;
1396 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1397 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1398 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1399 if (r)
1400 goto out;
1401 break;
1402 }
1403 case KVM_SET_NR_MMU_PAGES:
1404 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1405 if (r)
1406 goto out;
1407 break;
1408 case KVM_GET_NR_MMU_PAGES:
1409 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1410 break;
1411 case KVM_SET_MEMORY_ALIAS: {
1412 struct kvm_memory_alias alias;
1413
1414 r = -EFAULT;
1415 if (copy_from_user(&alias, argp, sizeof alias))
1416 goto out;
1417 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1418 if (r)
1419 goto out;
1420 break;
1421 }
1422 case KVM_CREATE_IRQCHIP:
1423 r = -ENOMEM;
d7deeeb0
ZX
1424 kvm->arch.vpic = kvm_create_pic(kvm);
1425 if (kvm->arch.vpic) {
1fe779f8
CO
1426 r = kvm_ioapic_init(kvm);
1427 if (r) {
d7deeeb0
ZX
1428 kfree(kvm->arch.vpic);
1429 kvm->arch.vpic = NULL;
1fe779f8
CO
1430 goto out;
1431 }
1432 } else
1433 goto out;
1434 break;
1435 case KVM_IRQ_LINE: {
1436 struct kvm_irq_level irq_event;
1437
1438 r = -EFAULT;
1439 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1440 goto out;
1441 if (irqchip_in_kernel(kvm)) {
1442 mutex_lock(&kvm->lock);
1443 if (irq_event.irq < 16)
1444 kvm_pic_set_irq(pic_irqchip(kvm),
1445 irq_event.irq,
1446 irq_event.level);
d7deeeb0 1447 kvm_ioapic_set_irq(kvm->arch.vioapic,
1fe779f8
CO
1448 irq_event.irq,
1449 irq_event.level);
1450 mutex_unlock(&kvm->lock);
1451 r = 0;
1452 }
1453 break;
1454 }
1455 case KVM_GET_IRQCHIP: {
1456 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1457 struct kvm_irqchip chip;
1458
1459 r = -EFAULT;
1460 if (copy_from_user(&chip, argp, sizeof chip))
1461 goto out;
1462 r = -ENXIO;
1463 if (!irqchip_in_kernel(kvm))
1464 goto out;
1465 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1466 if (r)
1467 goto out;
1468 r = -EFAULT;
1469 if (copy_to_user(argp, &chip, sizeof chip))
1470 goto out;
1471 r = 0;
1472 break;
1473 }
1474 case KVM_SET_IRQCHIP: {
1475 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1476 struct kvm_irqchip chip;
1477
1478 r = -EFAULT;
1479 if (copy_from_user(&chip, argp, sizeof chip))
1480 goto out;
1481 r = -ENXIO;
1482 if (!irqchip_in_kernel(kvm))
1483 goto out;
1484 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1485 if (r)
1486 goto out;
1487 r = 0;
1488 break;
1489 }
07716717
DK
1490 case KVM_GET_SUPPORTED_CPUID: {
1491 struct kvm_cpuid2 __user *cpuid_arg = argp;
1492 struct kvm_cpuid2 cpuid;
1493
1494 r = -EFAULT;
1495 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1496 goto out;
1497 r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
1498 cpuid_arg->entries);
1499 if (r)
1500 goto out;
1501
1502 r = -EFAULT;
1503 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1504 goto out;
1505 r = 0;
1506 break;
1507 }
1fe779f8
CO
1508 default:
1509 ;
1510 }
1511out:
1512 return r;
1513}
1514
a16b043c 1515static void kvm_init_msr_list(void)
043405e1
CO
1516{
1517 u32 dummy[2];
1518 unsigned i, j;
1519
1520 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1521 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1522 continue;
1523 if (j < i)
1524 msrs_to_save[j] = msrs_to_save[i];
1525 j++;
1526 }
1527 num_msrs_to_save = j;
1528}
1529
bbd9b64e
CO
1530/*
1531 * Only apic need an MMIO device hook, so shortcut now..
1532 */
1533static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1534 gpa_t addr)
1535{
1536 struct kvm_io_device *dev;
1537
ad312c7c
ZX
1538 if (vcpu->arch.apic) {
1539 dev = &vcpu->arch.apic->dev;
bbd9b64e
CO
1540 if (dev->in_range(dev, addr))
1541 return dev;
1542 }
1543 return NULL;
1544}
1545
1546
1547static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1548 gpa_t addr)
1549{
1550 struct kvm_io_device *dev;
1551
1552 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1553 if (dev == NULL)
1554 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1555 return dev;
1556}
1557
1558int emulator_read_std(unsigned long addr,
1559 void *val,
1560 unsigned int bytes,
1561 struct kvm_vcpu *vcpu)
1562{
1563 void *data = val;
10589a46 1564 int r = X86EMUL_CONTINUE;
bbd9b64e 1565
10589a46 1566 down_read(&current->mm->mmap_sem);
bbd9b64e 1567 while (bytes) {
ad312c7c 1568 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1569 unsigned offset = addr & (PAGE_SIZE-1);
1570 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1571 int ret;
1572
10589a46
MT
1573 if (gpa == UNMAPPED_GVA) {
1574 r = X86EMUL_PROPAGATE_FAULT;
1575 goto out;
1576 }
bbd9b64e 1577 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
10589a46
MT
1578 if (ret < 0) {
1579 r = X86EMUL_UNHANDLEABLE;
1580 goto out;
1581 }
bbd9b64e
CO
1582
1583 bytes -= tocopy;
1584 data += tocopy;
1585 addr += tocopy;
1586 }
10589a46
MT
1587out:
1588 up_read(&current->mm->mmap_sem);
1589 return r;
bbd9b64e
CO
1590}
1591EXPORT_SYMBOL_GPL(emulator_read_std);
1592
bbd9b64e
CO
1593static int emulator_read_emulated(unsigned long addr,
1594 void *val,
1595 unsigned int bytes,
1596 struct kvm_vcpu *vcpu)
1597{
1598 struct kvm_io_device *mmio_dev;
1599 gpa_t gpa;
1600
1601 if (vcpu->mmio_read_completed) {
1602 memcpy(val, vcpu->mmio_data, bytes);
1603 vcpu->mmio_read_completed = 0;
1604 return X86EMUL_CONTINUE;
1605 }
1606
10589a46 1607 down_read(&current->mm->mmap_sem);
ad312c7c 1608 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
10589a46 1609 up_read(&current->mm->mmap_sem);
bbd9b64e
CO
1610
1611 /* For APIC access vmexit */
1612 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1613 goto mmio;
1614
1615 if (emulator_read_std(addr, val, bytes, vcpu)
1616 == X86EMUL_CONTINUE)
1617 return X86EMUL_CONTINUE;
1618 if (gpa == UNMAPPED_GVA)
1619 return X86EMUL_PROPAGATE_FAULT;
1620
1621mmio:
1622 /*
1623 * Is this MMIO handled locally?
1624 */
10589a46 1625 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1626 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1627 if (mmio_dev) {
1628 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 1629 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1630 return X86EMUL_CONTINUE;
1631 }
10589a46 1632 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1633
1634 vcpu->mmio_needed = 1;
1635 vcpu->mmio_phys_addr = gpa;
1636 vcpu->mmio_size = bytes;
1637 vcpu->mmio_is_write = 0;
1638
1639 return X86EMUL_UNHANDLEABLE;
1640}
1641
1642static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1643 const void *val, int bytes)
1644{
1645 int ret;
1646
10589a46 1647 down_read(&current->mm->mmap_sem);
bbd9b64e 1648 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
10589a46
MT
1649 if (ret < 0) {
1650 up_read(&current->mm->mmap_sem);
bbd9b64e 1651 return 0;
10589a46 1652 }
bbd9b64e 1653 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
10589a46 1654 up_read(&current->mm->mmap_sem);
bbd9b64e
CO
1655 return 1;
1656}
1657
1658static int emulator_write_emulated_onepage(unsigned long addr,
1659 const void *val,
1660 unsigned int bytes,
1661 struct kvm_vcpu *vcpu)
1662{
1663 struct kvm_io_device *mmio_dev;
10589a46
MT
1664 gpa_t gpa;
1665
1666 down_read(&current->mm->mmap_sem);
1667 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1668 up_read(&current->mm->mmap_sem);
bbd9b64e
CO
1669
1670 if (gpa == UNMAPPED_GVA) {
c3c91fee 1671 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
1672 return X86EMUL_PROPAGATE_FAULT;
1673 }
1674
1675 /* For APIC access vmexit */
1676 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1677 goto mmio;
1678
1679 if (emulator_write_phys(vcpu, gpa, val, bytes))
1680 return X86EMUL_CONTINUE;
1681
1682mmio:
1683 /*
1684 * Is this MMIO handled locally?
1685 */
10589a46 1686 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1687 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1688 if (mmio_dev) {
1689 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 1690 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1691 return X86EMUL_CONTINUE;
1692 }
10589a46 1693 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1694
1695 vcpu->mmio_needed = 1;
1696 vcpu->mmio_phys_addr = gpa;
1697 vcpu->mmio_size = bytes;
1698 vcpu->mmio_is_write = 1;
1699 memcpy(vcpu->mmio_data, val, bytes);
1700
1701 return X86EMUL_CONTINUE;
1702}
1703
1704int emulator_write_emulated(unsigned long addr,
1705 const void *val,
1706 unsigned int bytes,
1707 struct kvm_vcpu *vcpu)
1708{
1709 /* Crossing a page boundary? */
1710 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1711 int rc, now;
1712
1713 now = -addr & ~PAGE_MASK;
1714 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1715 if (rc != X86EMUL_CONTINUE)
1716 return rc;
1717 addr += now;
1718 val += now;
1719 bytes -= now;
1720 }
1721 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1722}
1723EXPORT_SYMBOL_GPL(emulator_write_emulated);
1724
1725static int emulator_cmpxchg_emulated(unsigned long addr,
1726 const void *old,
1727 const void *new,
1728 unsigned int bytes,
1729 struct kvm_vcpu *vcpu)
1730{
1731 static int reported;
1732
1733 if (!reported) {
1734 reported = 1;
1735 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1736 }
2bacc55c
MT
1737#ifndef CONFIG_X86_64
1738 /* guests cmpxchg8b have to be emulated atomically */
1739 if (bytes == 8) {
10589a46 1740 gpa_t gpa;
2bacc55c
MT
1741 struct page *page;
1742 char *addr;
1743 u64 val;
1744
10589a46
MT
1745 down_read(&current->mm->mmap_sem);
1746 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1747
2bacc55c
MT
1748 if (gpa == UNMAPPED_GVA ||
1749 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1750 goto emul_write;
1751
1752 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
1753 goto emul_write;
1754
1755 val = *(u64 *)new;
1756 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1757 addr = kmap_atomic(page, KM_USER0);
1758 set_64bit((u64 *)(addr + offset_in_page(gpa)), val);
1759 kunmap_atomic(addr, KM_USER0);
1760 kvm_release_page_dirty(page);
10589a46
MT
1761 emul_write:
1762 up_read(&current->mm->mmap_sem);
2bacc55c 1763 }
2bacc55c
MT
1764#endif
1765
bbd9b64e
CO
1766 return emulator_write_emulated(addr, new, bytes, vcpu);
1767}
1768
1769static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1770{
1771 return kvm_x86_ops->get_segment_base(vcpu, seg);
1772}
1773
1774int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1775{
1776 return X86EMUL_CONTINUE;
1777}
1778
1779int emulate_clts(struct kvm_vcpu *vcpu)
1780{
ad312c7c 1781 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
1782 return X86EMUL_CONTINUE;
1783}
1784
1785int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
1786{
1787 struct kvm_vcpu *vcpu = ctxt->vcpu;
1788
1789 switch (dr) {
1790 case 0 ... 3:
1791 *dest = kvm_x86_ops->get_dr(vcpu, dr);
1792 return X86EMUL_CONTINUE;
1793 default:
1794 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
1795 return X86EMUL_UNHANDLEABLE;
1796 }
1797}
1798
1799int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1800{
1801 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1802 int exception;
1803
1804 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
1805 if (exception) {
1806 /* FIXME: better handling */
1807 return X86EMUL_UNHANDLEABLE;
1808 }
1809 return X86EMUL_CONTINUE;
1810}
1811
1812void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
1813{
1814 static int reported;
1815 u8 opcodes[4];
ad312c7c 1816 unsigned long rip = vcpu->arch.rip;
bbd9b64e
CO
1817 unsigned long rip_linear;
1818
1819 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
1820
1821 if (reported)
1822 return;
1823
1824 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
1825
1826 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1827 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
1828 reported = 1;
1829}
1830EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
1831
1832struct x86_emulate_ops emulate_ops = {
1833 .read_std = emulator_read_std,
bbd9b64e
CO
1834 .read_emulated = emulator_read_emulated,
1835 .write_emulated = emulator_write_emulated,
1836 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1837};
1838
1839int emulate_instruction(struct kvm_vcpu *vcpu,
1840 struct kvm_run *run,
1841 unsigned long cr2,
1842 u16 error_code,
1843 int no_decode)
1844{
1845 int r;
1846
ad312c7c 1847 vcpu->arch.mmio_fault_cr2 = cr2;
bbd9b64e
CO
1848 kvm_x86_ops->cache_regs(vcpu);
1849
1850 vcpu->mmio_is_write = 0;
ad312c7c 1851 vcpu->arch.pio.string = 0;
bbd9b64e
CO
1852
1853 if (!no_decode) {
1854 int cs_db, cs_l;
1855 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1856
ad312c7c
ZX
1857 vcpu->arch.emulate_ctxt.vcpu = vcpu;
1858 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1859 vcpu->arch.emulate_ctxt.mode =
1860 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
1861 ? X86EMUL_MODE_REAL : cs_l
1862 ? X86EMUL_MODE_PROT64 : cs_db
1863 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1864
ad312c7c
ZX
1865 if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1866 vcpu->arch.emulate_ctxt.cs_base = 0;
1867 vcpu->arch.emulate_ctxt.ds_base = 0;
1868 vcpu->arch.emulate_ctxt.es_base = 0;
1869 vcpu->arch.emulate_ctxt.ss_base = 0;
bbd9b64e 1870 } else {
ad312c7c 1871 vcpu->arch.emulate_ctxt.cs_base =
bbd9b64e 1872 get_segment_base(vcpu, VCPU_SREG_CS);
ad312c7c 1873 vcpu->arch.emulate_ctxt.ds_base =
bbd9b64e 1874 get_segment_base(vcpu, VCPU_SREG_DS);
ad312c7c 1875 vcpu->arch.emulate_ctxt.es_base =
bbd9b64e 1876 get_segment_base(vcpu, VCPU_SREG_ES);
ad312c7c 1877 vcpu->arch.emulate_ctxt.ss_base =
bbd9b64e
CO
1878 get_segment_base(vcpu, VCPU_SREG_SS);
1879 }
1880
ad312c7c 1881 vcpu->arch.emulate_ctxt.gs_base =
bbd9b64e 1882 get_segment_base(vcpu, VCPU_SREG_GS);
ad312c7c 1883 vcpu->arch.emulate_ctxt.fs_base =
bbd9b64e
CO
1884 get_segment_base(vcpu, VCPU_SREG_FS);
1885
ad312c7c 1886 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
f2b5756b 1887 ++vcpu->stat.insn_emulation;
bbd9b64e 1888 if (r) {
f2b5756b 1889 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
1890 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1891 return EMULATE_DONE;
1892 return EMULATE_FAIL;
1893 }
1894 }
1895
ad312c7c 1896 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 1897
ad312c7c 1898 if (vcpu->arch.pio.string)
bbd9b64e
CO
1899 return EMULATE_DO_MMIO;
1900
1901 if ((r || vcpu->mmio_is_write) && run) {
1902 run->exit_reason = KVM_EXIT_MMIO;
1903 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1904 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1905 run->mmio.len = vcpu->mmio_size;
1906 run->mmio.is_write = vcpu->mmio_is_write;
1907 }
1908
1909 if (r) {
1910 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1911 return EMULATE_DONE;
1912 if (!vcpu->mmio_needed) {
1913 kvm_report_emulation_failure(vcpu, "mmio");
1914 return EMULATE_FAIL;
1915 }
1916 return EMULATE_DO_MMIO;
1917 }
1918
1919 kvm_x86_ops->decache_regs(vcpu);
ad312c7c 1920 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
1921
1922 if (vcpu->mmio_is_write) {
1923 vcpu->mmio_needed = 0;
1924 return EMULATE_DO_MMIO;
1925 }
1926
1927 return EMULATE_DONE;
1928}
1929EXPORT_SYMBOL_GPL(emulate_instruction);
1930
de7d789a
CO
1931static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
1932{
1933 int i;
1934
ad312c7c
ZX
1935 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
1936 if (vcpu->arch.pio.guest_pages[i]) {
1937 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
1938 vcpu->arch.pio.guest_pages[i] = NULL;
de7d789a
CO
1939 }
1940}
1941
1942static int pio_copy_data(struct kvm_vcpu *vcpu)
1943{
ad312c7c 1944 void *p = vcpu->arch.pio_data;
de7d789a
CO
1945 void *q;
1946 unsigned bytes;
ad312c7c 1947 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
de7d789a 1948
ad312c7c 1949 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
de7d789a
CO
1950 PAGE_KERNEL);
1951 if (!q) {
1952 free_pio_guest_pages(vcpu);
1953 return -ENOMEM;
1954 }
ad312c7c
ZX
1955 q += vcpu->arch.pio.guest_page_offset;
1956 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
1957 if (vcpu->arch.pio.in)
de7d789a
CO
1958 memcpy(q, p, bytes);
1959 else
1960 memcpy(p, q, bytes);
ad312c7c 1961 q -= vcpu->arch.pio.guest_page_offset;
de7d789a
CO
1962 vunmap(q);
1963 free_pio_guest_pages(vcpu);
1964 return 0;
1965}
1966
1967int complete_pio(struct kvm_vcpu *vcpu)
1968{
ad312c7c 1969 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
1970 long delta;
1971 int r;
1972
1973 kvm_x86_ops->cache_regs(vcpu);
1974
1975 if (!io->string) {
1976 if (io->in)
ad312c7c 1977 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
de7d789a
CO
1978 io->size);
1979 } else {
1980 if (io->in) {
1981 r = pio_copy_data(vcpu);
1982 if (r) {
1983 kvm_x86_ops->cache_regs(vcpu);
1984 return r;
1985 }
1986 }
1987
1988 delta = 1;
1989 if (io->rep) {
1990 delta *= io->cur_count;
1991 /*
1992 * The size of the register should really depend on
1993 * current address size.
1994 */
ad312c7c 1995 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
de7d789a
CO
1996 }
1997 if (io->down)
1998 delta = -delta;
1999 delta *= io->size;
2000 if (io->in)
ad312c7c 2001 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
de7d789a 2002 else
ad312c7c 2003 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
de7d789a
CO
2004 }
2005
2006 kvm_x86_ops->decache_regs(vcpu);
2007
2008 io->count -= io->cur_count;
2009 io->cur_count = 0;
2010
2011 return 0;
2012}
2013
2014static void kernel_pio(struct kvm_io_device *pio_dev,
2015 struct kvm_vcpu *vcpu,
2016 void *pd)
2017{
2018 /* TODO: String I/O for in kernel device */
2019
2020 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2021 if (vcpu->arch.pio.in)
2022 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2023 vcpu->arch.pio.size,
de7d789a
CO
2024 pd);
2025 else
ad312c7c
ZX
2026 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2027 vcpu->arch.pio.size,
de7d789a
CO
2028 pd);
2029 mutex_unlock(&vcpu->kvm->lock);
2030}
2031
2032static void pio_string_write(struct kvm_io_device *pio_dev,
2033 struct kvm_vcpu *vcpu)
2034{
ad312c7c
ZX
2035 struct kvm_pio_request *io = &vcpu->arch.pio;
2036 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2037 int i;
2038
2039 mutex_lock(&vcpu->kvm->lock);
2040 for (i = 0; i < io->cur_count; i++) {
2041 kvm_iodevice_write(pio_dev, io->port,
2042 io->size,
2043 pd);
2044 pd += io->size;
2045 }
2046 mutex_unlock(&vcpu->kvm->lock);
2047}
2048
2049static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2050 gpa_t addr)
2051{
2052 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
2053}
2054
2055int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2056 int size, unsigned port)
2057{
2058 struct kvm_io_device *pio_dev;
2059
2060 vcpu->run->exit_reason = KVM_EXIT_IO;
2061 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2062 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2063 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2064 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2065 vcpu->run->io.port = vcpu->arch.pio.port = port;
2066 vcpu->arch.pio.in = in;
2067 vcpu->arch.pio.string = 0;
2068 vcpu->arch.pio.down = 0;
2069 vcpu->arch.pio.guest_page_offset = 0;
2070 vcpu->arch.pio.rep = 0;
de7d789a
CO
2071
2072 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2073 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
de7d789a
CO
2074 kvm_x86_ops->decache_regs(vcpu);
2075
2076 kvm_x86_ops->skip_emulated_instruction(vcpu);
2077
2078 pio_dev = vcpu_find_pio_dev(vcpu, port);
2079 if (pio_dev) {
ad312c7c 2080 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2081 complete_pio(vcpu);
2082 return 1;
2083 }
2084 return 0;
2085}
2086EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2087
2088int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2089 int size, unsigned long count, int down,
2090 gva_t address, int rep, unsigned port)
2091{
2092 unsigned now, in_page;
2093 int i, ret = 0;
2094 int nr_pages = 1;
2095 struct page *page;
2096 struct kvm_io_device *pio_dev;
2097
2098 vcpu->run->exit_reason = KVM_EXIT_IO;
2099 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2100 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2101 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2102 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2103 vcpu->run->io.port = vcpu->arch.pio.port = port;
2104 vcpu->arch.pio.in = in;
2105 vcpu->arch.pio.string = 1;
2106 vcpu->arch.pio.down = down;
2107 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2108 vcpu->arch.pio.rep = rep;
de7d789a
CO
2109
2110 if (!count) {
2111 kvm_x86_ops->skip_emulated_instruction(vcpu);
2112 return 1;
2113 }
2114
2115 if (!down)
2116 in_page = PAGE_SIZE - offset_in_page(address);
2117 else
2118 in_page = offset_in_page(address) + size;
2119 now = min(count, (unsigned long)in_page / size);
2120 if (!now) {
2121 /*
2122 * String I/O straddles page boundary. Pin two guest pages
2123 * so that we satisfy atomicity constraints. Do just one
2124 * transaction to avoid complexity.
2125 */
2126 nr_pages = 2;
2127 now = 1;
2128 }
2129 if (down) {
2130 /*
2131 * String I/O in reverse. Yuck. Kill the guest, fix later.
2132 */
2133 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2134 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2135 return 1;
2136 }
2137 vcpu->run->io.count = now;
ad312c7c 2138 vcpu->arch.pio.cur_count = now;
de7d789a 2139
ad312c7c 2140 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2141 kvm_x86_ops->skip_emulated_instruction(vcpu);
2142
2143 for (i = 0; i < nr_pages; ++i) {
10589a46 2144 down_read(&current->mm->mmap_sem);
de7d789a 2145 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
ad312c7c 2146 vcpu->arch.pio.guest_pages[i] = page;
10589a46 2147 up_read(&current->mm->mmap_sem);
de7d789a 2148 if (!page) {
c1a5d4f9 2149 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2150 free_pio_guest_pages(vcpu);
2151 return 1;
2152 }
2153 }
2154
2155 pio_dev = vcpu_find_pio_dev(vcpu, port);
ad312c7c 2156 if (!vcpu->arch.pio.in) {
de7d789a
CO
2157 /* string PIO write */
2158 ret = pio_copy_data(vcpu);
2159 if (ret >= 0 && pio_dev) {
2160 pio_string_write(pio_dev, vcpu);
2161 complete_pio(vcpu);
ad312c7c 2162 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2163 ret = 1;
2164 }
2165 } else if (pio_dev)
2166 pr_unimpl(vcpu, "no string pio read support yet, "
2167 "port %x size %d count %ld\n",
2168 port, size, count);
2169
2170 return ret;
2171}
2172EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2173
f8c16bba 2174int kvm_arch_init(void *opaque)
043405e1 2175{
56c6d28a 2176 int r;
f8c16bba
ZX
2177 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2178
56c6d28a
ZX
2179 r = kvm_mmu_module_init();
2180 if (r)
2181 goto out_fail;
2182
043405e1 2183 kvm_init_msr_list();
f8c16bba
ZX
2184
2185 if (kvm_x86_ops) {
2186 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2187 r = -EEXIST;
2188 goto out;
f8c16bba
ZX
2189 }
2190
2191 if (!ops->cpu_has_kvm_support()) {
2192 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2193 r = -EOPNOTSUPP;
2194 goto out;
f8c16bba
ZX
2195 }
2196 if (ops->disabled_by_bios()) {
2197 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2198 r = -EOPNOTSUPP;
2199 goto out;
f8c16bba
ZX
2200 }
2201
2202 kvm_x86_ops = ops;
56c6d28a 2203 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
f8c16bba 2204 return 0;
56c6d28a
ZX
2205
2206out:
2207 kvm_mmu_module_exit();
2208out_fail:
2209 return r;
043405e1 2210}
8776e519 2211
f8c16bba
ZX
2212void kvm_arch_exit(void)
2213{
2214 kvm_x86_ops = NULL;
56c6d28a
ZX
2215 kvm_mmu_module_exit();
2216}
f8c16bba 2217
8776e519
HB
2218int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2219{
2220 ++vcpu->stat.halt_exits;
2221 if (irqchip_in_kernel(vcpu->kvm)) {
ad312c7c 2222 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
8776e519 2223 kvm_vcpu_block(vcpu);
ad312c7c 2224 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
8776e519
HB
2225 return -EINTR;
2226 return 1;
2227 } else {
2228 vcpu->run->exit_reason = KVM_EXIT_HLT;
2229 return 0;
2230 }
2231}
2232EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2233
2234int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2235{
2236 unsigned long nr, a0, a1, a2, a3, ret;
2237
2238 kvm_x86_ops->cache_regs(vcpu);
2239
ad312c7c
ZX
2240 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2241 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2242 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2243 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2244 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
8776e519
HB
2245
2246 if (!is_long_mode(vcpu)) {
2247 nr &= 0xFFFFFFFF;
2248 a0 &= 0xFFFFFFFF;
2249 a1 &= 0xFFFFFFFF;
2250 a2 &= 0xFFFFFFFF;
2251 a3 &= 0xFFFFFFFF;
2252 }
2253
2254 switch (nr) {
b93463aa
AK
2255 case KVM_HC_VAPIC_POLL_IRQ:
2256 ret = 0;
2257 break;
8776e519
HB
2258 default:
2259 ret = -KVM_ENOSYS;
2260 break;
2261 }
ad312c7c 2262 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
8776e519
HB
2263 kvm_x86_ops->decache_regs(vcpu);
2264 return 0;
2265}
2266EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2267
2268int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2269{
2270 char instruction[3];
2271 int ret = 0;
2272
8776e519
HB
2273
2274 /*
2275 * Blow out the MMU to ensure that no other VCPU has an active mapping
2276 * to ensure that the updated hypercall appears atomically across all
2277 * VCPUs.
2278 */
2279 kvm_mmu_zap_all(vcpu->kvm);
2280
2281 kvm_x86_ops->cache_regs(vcpu);
2282 kvm_x86_ops->patch_hypercall(vcpu, instruction);
ad312c7c 2283 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
8776e519
HB
2284 != X86EMUL_CONTINUE)
2285 ret = -EFAULT;
2286
8776e519
HB
2287 return ret;
2288}
2289
2290static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2291{
2292 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2293}
2294
2295void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2296{
2297 struct descriptor_table dt = { limit, base };
2298
2299 kvm_x86_ops->set_gdt(vcpu, &dt);
2300}
2301
2302void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2303{
2304 struct descriptor_table dt = { limit, base };
2305
2306 kvm_x86_ops->set_idt(vcpu, &dt);
2307}
2308
2309void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2310 unsigned long *rflags)
2311{
2312 lmsw(vcpu, msw);
2313 *rflags = kvm_x86_ops->get_rflags(vcpu);
2314}
2315
2316unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2317{
2318 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2319 switch (cr) {
2320 case 0:
ad312c7c 2321 return vcpu->arch.cr0;
8776e519 2322 case 2:
ad312c7c 2323 return vcpu->arch.cr2;
8776e519 2324 case 3:
ad312c7c 2325 return vcpu->arch.cr3;
8776e519 2326 case 4:
ad312c7c 2327 return vcpu->arch.cr4;
152ff9be
JR
2328 case 8:
2329 return get_cr8(vcpu);
8776e519
HB
2330 default:
2331 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2332 return 0;
2333 }
2334}
2335
2336void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2337 unsigned long *rflags)
2338{
2339 switch (cr) {
2340 case 0:
ad312c7c 2341 set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2342 *rflags = kvm_x86_ops->get_rflags(vcpu);
2343 break;
2344 case 2:
ad312c7c 2345 vcpu->arch.cr2 = val;
8776e519
HB
2346 break;
2347 case 3:
2348 set_cr3(vcpu, val);
2349 break;
2350 case 4:
ad312c7c 2351 set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2352 break;
152ff9be
JR
2353 case 8:
2354 set_cr8(vcpu, val & 0xfUL);
2355 break;
8776e519
HB
2356 default:
2357 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
2358 }
2359}
2360
07716717
DK
2361static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2362{
ad312c7c
ZX
2363 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2364 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2365
2366 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2367 /* when no next entry is found, the current entry[i] is reselected */
2368 for (j = i + 1; j == i; j = (j + 1) % nent) {
ad312c7c 2369 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2370 if (ej->function == e->function) {
2371 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2372 return j;
2373 }
2374 }
2375 return 0; /* silence gcc, even though control never reaches here */
2376}
2377
2378/* find an entry with matching function, matching index (if needed), and that
2379 * should be read next (if it's stateful) */
2380static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2381 u32 function, u32 index)
2382{
2383 if (e->function != function)
2384 return 0;
2385 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2386 return 0;
2387 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2388 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2389 return 0;
2390 return 1;
2391}
2392
8776e519
HB
2393void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2394{
2395 int i;
07716717
DK
2396 u32 function, index;
2397 struct kvm_cpuid_entry2 *e, *best;
8776e519
HB
2398
2399 kvm_x86_ops->cache_regs(vcpu);
ad312c7c
ZX
2400 function = vcpu->arch.regs[VCPU_REGS_RAX];
2401 index = vcpu->arch.regs[VCPU_REGS_RCX];
2402 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2403 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2404 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2405 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
8776e519 2406 best = NULL;
ad312c7c
ZX
2407 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2408 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
2409 if (is_matching_cpuid_entry(e, function, index)) {
2410 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2411 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2412 best = e;
2413 break;
2414 }
2415 /*
2416 * Both basic or both extended?
2417 */
2418 if (((e->function ^ function) & 0x80000000) == 0)
2419 if (!best || e->function > best->function)
2420 best = e;
2421 }
2422 if (best) {
ad312c7c
ZX
2423 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2424 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2425 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2426 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
8776e519
HB
2427 }
2428 kvm_x86_ops->decache_regs(vcpu);
2429 kvm_x86_ops->skip_emulated_instruction(vcpu);
2430}
2431EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2432
b6c7a5dc
HB
2433/*
2434 * Check if userspace requested an interrupt window, and that the
2435 * interrupt window is open.
2436 *
2437 * No need to exit to userspace if we already have an interrupt queued.
2438 */
2439static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2440 struct kvm_run *kvm_run)
2441{
ad312c7c 2442 return (!vcpu->arch.irq_summary &&
b6c7a5dc 2443 kvm_run->request_interrupt_window &&
ad312c7c 2444 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
2445 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2446}
2447
2448static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2449 struct kvm_run *kvm_run)
2450{
2451 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2452 kvm_run->cr8 = get_cr8(vcpu);
2453 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2454 if (irqchip_in_kernel(vcpu->kvm))
2455 kvm_run->ready_for_interrupt_injection = 1;
2456 else
2457 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
2458 (vcpu->arch.interrupt_window_open &&
2459 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
2460}
2461
b93463aa
AK
2462static void vapic_enter(struct kvm_vcpu *vcpu)
2463{
2464 struct kvm_lapic *apic = vcpu->arch.apic;
2465 struct page *page;
2466
2467 if (!apic || !apic->vapic_addr)
2468 return;
2469
10589a46 2470 down_read(&current->mm->mmap_sem);
b93463aa
AK
2471 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2472 vcpu->arch.apic->vapic_page = page;
10589a46 2473 up_read(&current->mm->mmap_sem);
b93463aa
AK
2474}
2475
2476static void vapic_exit(struct kvm_vcpu *vcpu)
2477{
2478 struct kvm_lapic *apic = vcpu->arch.apic;
2479
2480 if (!apic || !apic->vapic_addr)
2481 return;
2482
2483 kvm_release_page_dirty(apic->vapic_page);
2484 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2485}
2486
b6c7a5dc
HB
2487static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2488{
2489 int r;
2490
ad312c7c 2491 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
b6c7a5dc 2492 pr_debug("vcpu %d received sipi with vector # %x\n",
ad312c7c 2493 vcpu->vcpu_id, vcpu->arch.sipi_vector);
b6c7a5dc
HB
2494 kvm_lapic_reset(vcpu);
2495 r = kvm_x86_ops->vcpu_reset(vcpu);
2496 if (r)
2497 return r;
ad312c7c 2498 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
b6c7a5dc
HB
2499 }
2500
b93463aa
AK
2501 vapic_enter(vcpu);
2502
b6c7a5dc
HB
2503preempted:
2504 if (vcpu->guest_debug.enabled)
2505 kvm_x86_ops->guest_debug_pre(vcpu);
2506
2507again:
2508 r = kvm_mmu_reload(vcpu);
2509 if (unlikely(r))
2510 goto out;
2511
b93463aa
AK
2512 if (vcpu->requests)
2513 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2514 &vcpu->requests)) {
2515 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
2516 r = 0;
2517 goto out;
2518 }
2519
b6c7a5dc
HB
2520 kvm_inject_pending_timer_irqs(vcpu);
2521
2522 preempt_disable();
2523
2524 kvm_x86_ops->prepare_guest_switch(vcpu);
2525 kvm_load_guest_fpu(vcpu);
2526
2527 local_irq_disable();
2528
2529 if (signal_pending(current)) {
2530 local_irq_enable();
2531 preempt_enable();
2532 r = -EINTR;
2533 kvm_run->exit_reason = KVM_EXIT_INTR;
2534 ++vcpu->stat.signal_exits;
2535 goto out;
2536 }
2537
ad312c7c 2538 if (vcpu->arch.exception.pending)
298101da
AK
2539 __queue_exception(vcpu);
2540 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2541 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 2542 else
b6c7a5dc
HB
2543 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2544
b93463aa
AK
2545 kvm_lapic_sync_to_vapic(vcpu);
2546
b6c7a5dc
HB
2547 vcpu->guest_mode = 1;
2548 kvm_guest_enter();
2549
2550 if (vcpu->requests)
2551 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2552 kvm_x86_ops->tlb_flush(vcpu);
2553
2554 kvm_x86_ops->run(vcpu, kvm_run);
2555
2556 vcpu->guest_mode = 0;
2557 local_irq_enable();
2558
2559 ++vcpu->stat.exits;
2560
2561 /*
2562 * We must have an instruction between local_irq_enable() and
2563 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2564 * the interrupt shadow. The stat.exits increment will do nicely.
2565 * But we need to prevent reordering, hence this barrier():
2566 */
2567 barrier();
2568
2569 kvm_guest_exit();
2570
2571 preempt_enable();
2572
2573 /*
2574 * Profile KVM exit RIPs:
2575 */
2576 if (unlikely(prof_on == KVM_PROFILING)) {
2577 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2578 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
b6c7a5dc
HB
2579 }
2580
ad312c7c
ZX
2581 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2582 vcpu->arch.exception.pending = false;
298101da 2583
b93463aa
AK
2584 kvm_lapic_sync_from_vapic(vcpu);
2585
b6c7a5dc
HB
2586 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2587
2588 if (r > 0) {
2589 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2590 r = -EINTR;
2591 kvm_run->exit_reason = KVM_EXIT_INTR;
2592 ++vcpu->stat.request_irq_exits;
2593 goto out;
2594 }
e1beb1d3 2595 if (!need_resched())
b6c7a5dc 2596 goto again;
b6c7a5dc
HB
2597 }
2598
2599out:
2600 if (r > 0) {
2601 kvm_resched(vcpu);
2602 goto preempted;
2603 }
2604
2605 post_kvm_run_save(vcpu, kvm_run);
2606
b93463aa
AK
2607 vapic_exit(vcpu);
2608
b6c7a5dc
HB
2609 return r;
2610}
2611
2612int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2613{
2614 int r;
2615 sigset_t sigsaved;
2616
2617 vcpu_load(vcpu);
2618
ad312c7c 2619 if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
b6c7a5dc
HB
2620 kvm_vcpu_block(vcpu);
2621 vcpu_put(vcpu);
2622 return -EAGAIN;
2623 }
2624
2625 if (vcpu->sigset_active)
2626 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2627
2628 /* re-sync apic's tpr */
2629 if (!irqchip_in_kernel(vcpu->kvm))
2630 set_cr8(vcpu, kvm_run->cr8);
2631
ad312c7c 2632 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
2633 r = complete_pio(vcpu);
2634 if (r)
2635 goto out;
2636 }
2637#if CONFIG_HAS_IOMEM
2638 if (vcpu->mmio_needed) {
2639 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2640 vcpu->mmio_read_completed = 1;
2641 vcpu->mmio_needed = 0;
2642 r = emulate_instruction(vcpu, kvm_run,
ad312c7c 2643 vcpu->arch.mmio_fault_cr2, 0, 1);
b6c7a5dc
HB
2644 if (r == EMULATE_DO_MMIO) {
2645 /*
2646 * Read-modify-write. Back to userspace.
2647 */
2648 r = 0;
2649 goto out;
2650 }
2651 }
2652#endif
2653 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2654 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2655 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
b6c7a5dc
HB
2656 kvm_x86_ops->decache_regs(vcpu);
2657 }
2658
2659 r = __vcpu_run(vcpu, kvm_run);
2660
2661out:
2662 if (vcpu->sigset_active)
2663 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2664
2665 vcpu_put(vcpu);
2666 return r;
2667}
2668
2669int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2670{
2671 vcpu_load(vcpu);
2672
2673 kvm_x86_ops->cache_regs(vcpu);
2674
ad312c7c
ZX
2675 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
2676 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
2677 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
2678 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
2679 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
2680 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
2681 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2682 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
b6c7a5dc 2683#ifdef CONFIG_X86_64
ad312c7c
ZX
2684 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
2685 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
2686 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
2687 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
2688 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
2689 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
2690 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
2691 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
b6c7a5dc
HB
2692#endif
2693
ad312c7c 2694 regs->rip = vcpu->arch.rip;
b6c7a5dc
HB
2695 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
2696
2697 /*
2698 * Don't leak debug flags in case they were set for guest debugging
2699 */
2700 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2701 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2702
2703 vcpu_put(vcpu);
2704
2705 return 0;
2706}
2707
2708int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2709{
2710 vcpu_load(vcpu);
2711
ad312c7c
ZX
2712 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
2713 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
2714 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
2715 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
2716 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
2717 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
2718 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
2719 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
b6c7a5dc 2720#ifdef CONFIG_X86_64
ad312c7c
ZX
2721 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
2722 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
2723 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
2724 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
2725 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
2726 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
2727 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
2728 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
b6c7a5dc
HB
2729#endif
2730
ad312c7c 2731 vcpu->arch.rip = regs->rip;
b6c7a5dc
HB
2732 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
2733
2734 kvm_x86_ops->decache_regs(vcpu);
2735
2736 vcpu_put(vcpu);
2737
2738 return 0;
2739}
2740
2741static void get_segment(struct kvm_vcpu *vcpu,
2742 struct kvm_segment *var, int seg)
2743{
2744 return kvm_x86_ops->get_segment(vcpu, var, seg);
2745}
2746
2747void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2748{
2749 struct kvm_segment cs;
2750
2751 get_segment(vcpu, &cs, VCPU_SREG_CS);
2752 *db = cs.db;
2753 *l = cs.l;
2754}
2755EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2756
2757int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2758 struct kvm_sregs *sregs)
2759{
2760 struct descriptor_table dt;
2761 int pending_vec;
2762
2763 vcpu_load(vcpu);
2764
2765 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2766 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2767 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2768 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2769 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2770 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2771
2772 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2773 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2774
2775 kvm_x86_ops->get_idt(vcpu, &dt);
2776 sregs->idt.limit = dt.limit;
2777 sregs->idt.base = dt.base;
2778 kvm_x86_ops->get_gdt(vcpu, &dt);
2779 sregs->gdt.limit = dt.limit;
2780 sregs->gdt.base = dt.base;
2781
2782 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
2783 sregs->cr0 = vcpu->arch.cr0;
2784 sregs->cr2 = vcpu->arch.cr2;
2785 sregs->cr3 = vcpu->arch.cr3;
2786 sregs->cr4 = vcpu->arch.cr4;
b6c7a5dc 2787 sregs->cr8 = get_cr8(vcpu);
ad312c7c 2788 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
2789 sregs->apic_base = kvm_get_apic_base(vcpu);
2790
2791 if (irqchip_in_kernel(vcpu->kvm)) {
2792 memset(sregs->interrupt_bitmap, 0,
2793 sizeof sregs->interrupt_bitmap);
2794 pending_vec = kvm_x86_ops->get_irq(vcpu);
2795 if (pending_vec >= 0)
2796 set_bit(pending_vec,
2797 (unsigned long *)sregs->interrupt_bitmap);
2798 } else
ad312c7c 2799 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
2800 sizeof sregs->interrupt_bitmap);
2801
2802 vcpu_put(vcpu);
2803
2804 return 0;
2805}
2806
2807static void set_segment(struct kvm_vcpu *vcpu,
2808 struct kvm_segment *var, int seg)
2809{
2810 return kvm_x86_ops->set_segment(vcpu, var, seg);
2811}
2812
2813int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2814 struct kvm_sregs *sregs)
2815{
2816 int mmu_reset_needed = 0;
2817 int i, pending_vec, max_bits;
2818 struct descriptor_table dt;
2819
2820 vcpu_load(vcpu);
2821
2822 dt.limit = sregs->idt.limit;
2823 dt.base = sregs->idt.base;
2824 kvm_x86_ops->set_idt(vcpu, &dt);
2825 dt.limit = sregs->gdt.limit;
2826 dt.base = sregs->gdt.base;
2827 kvm_x86_ops->set_gdt(vcpu, &dt);
2828
ad312c7c
ZX
2829 vcpu->arch.cr2 = sregs->cr2;
2830 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
2831 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc
HB
2832
2833 set_cr8(vcpu, sregs->cr8);
2834
ad312c7c 2835 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc
HB
2836#ifdef CONFIG_X86_64
2837 kvm_x86_ops->set_efer(vcpu, sregs->efer);
2838#endif
2839 kvm_set_apic_base(vcpu, sregs->apic_base);
2840
2841 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2842
ad312c7c
ZX
2843 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
2844 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc
HB
2845 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
2846
ad312c7c 2847 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
2848 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
2849 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 2850 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
2851
2852 if (mmu_reset_needed)
2853 kvm_mmu_reset_context(vcpu);
2854
2855 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
2856 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
2857 sizeof vcpu->arch.irq_pending);
2858 vcpu->arch.irq_summary = 0;
2859 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
2860 if (vcpu->arch.irq_pending[i])
2861 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
2862 } else {
2863 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2864 pending_vec = find_first_bit(
2865 (const unsigned long *)sregs->interrupt_bitmap,
2866 max_bits);
2867 /* Only pending external irq is handled here */
2868 if (pending_vec < max_bits) {
2869 kvm_x86_ops->set_irq(vcpu, pending_vec);
2870 pr_debug("Set back pending irq %d\n",
2871 pending_vec);
2872 }
2873 }
2874
2875 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2876 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2877 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2878 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2879 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2880 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2881
2882 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2883 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2884
2885 vcpu_put(vcpu);
2886
2887 return 0;
2888}
2889
2890int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2891 struct kvm_debug_guest *dbg)
2892{
2893 int r;
2894
2895 vcpu_load(vcpu);
2896
2897 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
2898
2899 vcpu_put(vcpu);
2900
2901 return r;
2902}
2903
d0752060
HB
2904/*
2905 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2906 * we have asm/x86/processor.h
2907 */
2908struct fxsave {
2909 u16 cwd;
2910 u16 swd;
2911 u16 twd;
2912 u16 fop;
2913 u64 rip;
2914 u64 rdp;
2915 u32 mxcsr;
2916 u32 mxcsr_mask;
2917 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2918#ifdef CONFIG_X86_64
2919 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2920#else
2921 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2922#endif
2923};
2924
8b006791
ZX
2925/*
2926 * Translate a guest virtual address to a guest physical address.
2927 */
2928int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2929 struct kvm_translation *tr)
2930{
2931 unsigned long vaddr = tr->linear_address;
2932 gpa_t gpa;
2933
2934 vcpu_load(vcpu);
10589a46 2935 down_read(&current->mm->mmap_sem);
ad312c7c 2936 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
10589a46 2937 up_read(&current->mm->mmap_sem);
8b006791
ZX
2938 tr->physical_address = gpa;
2939 tr->valid = gpa != UNMAPPED_GVA;
2940 tr->writeable = 1;
2941 tr->usermode = 0;
8b006791
ZX
2942 vcpu_put(vcpu);
2943
2944 return 0;
2945}
2946
d0752060
HB
2947int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2948{
ad312c7c 2949 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
2950
2951 vcpu_load(vcpu);
2952
2953 memcpy(fpu->fpr, fxsave->st_space, 128);
2954 fpu->fcw = fxsave->cwd;
2955 fpu->fsw = fxsave->swd;
2956 fpu->ftwx = fxsave->twd;
2957 fpu->last_opcode = fxsave->fop;
2958 fpu->last_ip = fxsave->rip;
2959 fpu->last_dp = fxsave->rdp;
2960 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2961
2962 vcpu_put(vcpu);
2963
2964 return 0;
2965}
2966
2967int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2968{
ad312c7c 2969 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
2970
2971 vcpu_load(vcpu);
2972
2973 memcpy(fxsave->st_space, fpu->fpr, 128);
2974 fxsave->cwd = fpu->fcw;
2975 fxsave->swd = fpu->fsw;
2976 fxsave->twd = fpu->ftwx;
2977 fxsave->fop = fpu->last_opcode;
2978 fxsave->rip = fpu->last_ip;
2979 fxsave->rdp = fpu->last_dp;
2980 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2981
2982 vcpu_put(vcpu);
2983
2984 return 0;
2985}
2986
2987void fx_init(struct kvm_vcpu *vcpu)
2988{
2989 unsigned after_mxcsr_mask;
2990
2991 /* Initialize guest FPU by resetting ours and saving into guest's */
2992 preempt_disable();
ad312c7c 2993 fx_save(&vcpu->arch.host_fx_image);
d0752060 2994 fpu_init();
ad312c7c
ZX
2995 fx_save(&vcpu->arch.guest_fx_image);
2996 fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
2997 preempt_enable();
2998
ad312c7c 2999 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 3000 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
3001 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
3002 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
3003 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
3004}
3005EXPORT_SYMBOL_GPL(fx_init);
3006
3007void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3008{
3009 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
3010 return;
3011
3012 vcpu->guest_fpu_loaded = 1;
ad312c7c
ZX
3013 fx_save(&vcpu->arch.host_fx_image);
3014 fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
3015}
3016EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3017
3018void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3019{
3020 if (!vcpu->guest_fpu_loaded)
3021 return;
3022
3023 vcpu->guest_fpu_loaded = 0;
ad312c7c
ZX
3024 fx_save(&vcpu->arch.guest_fx_image);
3025 fx_restore(&vcpu->arch.host_fx_image);
f096ed85 3026 ++vcpu->stat.fpu_reload;
d0752060
HB
3027}
3028EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
3029
3030void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
3031{
3032 kvm_x86_ops->vcpu_free(vcpu);
3033}
3034
3035struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3036 unsigned int id)
3037{
26e5215f
AK
3038 return kvm_x86_ops->vcpu_create(kvm, id);
3039}
e9b11c17 3040
26e5215f
AK
3041int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
3042{
3043 int r;
e9b11c17
ZX
3044
3045 /* We do fxsave: this must be aligned. */
ad312c7c 3046 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17
ZX
3047
3048 vcpu_load(vcpu);
3049 r = kvm_arch_vcpu_reset(vcpu);
3050 if (r == 0)
3051 r = kvm_mmu_setup(vcpu);
3052 vcpu_put(vcpu);
3053 if (r < 0)
3054 goto free_vcpu;
3055
26e5215f 3056 return 0;
e9b11c17
ZX
3057free_vcpu:
3058 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 3059 return r;
e9b11c17
ZX
3060}
3061
d40ccc62 3062void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
3063{
3064 vcpu_load(vcpu);
3065 kvm_mmu_unload(vcpu);
3066 vcpu_put(vcpu);
3067
3068 kvm_x86_ops->vcpu_free(vcpu);
3069}
3070
3071int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
3072{
3073 return kvm_x86_ops->vcpu_reset(vcpu);
3074}
3075
3076void kvm_arch_hardware_enable(void *garbage)
3077{
3078 kvm_x86_ops->hardware_enable(garbage);
3079}
3080
3081void kvm_arch_hardware_disable(void *garbage)
3082{
3083 kvm_x86_ops->hardware_disable(garbage);
3084}
3085
3086int kvm_arch_hardware_setup(void)
3087{
3088 return kvm_x86_ops->hardware_setup();
3089}
3090
3091void kvm_arch_hardware_unsetup(void)
3092{
3093 kvm_x86_ops->hardware_unsetup();
3094}
3095
3096void kvm_arch_check_processor_compat(void *rtn)
3097{
3098 kvm_x86_ops->check_processor_compatibility(rtn);
3099}
3100
3101int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3102{
3103 struct page *page;
3104 struct kvm *kvm;
3105 int r;
3106
3107 BUG_ON(vcpu->kvm == NULL);
3108 kvm = vcpu->kvm;
3109
ad312c7c 3110 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 3111 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
ad312c7c 3112 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
e9b11c17 3113 else
ad312c7c 3114 vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
3115
3116 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3117 if (!page) {
3118 r = -ENOMEM;
3119 goto fail;
3120 }
ad312c7c 3121 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
3122
3123 r = kvm_mmu_create(vcpu);
3124 if (r < 0)
3125 goto fail_free_pio_data;
3126
3127 if (irqchip_in_kernel(kvm)) {
3128 r = kvm_create_lapic(vcpu);
3129 if (r < 0)
3130 goto fail_mmu_destroy;
3131 }
3132
3133 return 0;
3134
3135fail_mmu_destroy:
3136 kvm_mmu_destroy(vcpu);
3137fail_free_pio_data:
ad312c7c 3138 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
3139fail:
3140 return r;
3141}
3142
3143void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3144{
3145 kvm_free_lapic(vcpu);
3146 kvm_mmu_destroy(vcpu);
ad312c7c 3147 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 3148}
d19a9cd2
ZX
3149
3150struct kvm *kvm_arch_create_vm(void)
3151{
3152 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
3153
3154 if (!kvm)
3155 return ERR_PTR(-ENOMEM);
3156
f05e70ac 3157 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
d19a9cd2
ZX
3158
3159 return kvm;
3160}
3161
3162static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
3163{
3164 vcpu_load(vcpu);
3165 kvm_mmu_unload(vcpu);
3166 vcpu_put(vcpu);
3167}
3168
3169static void kvm_free_vcpus(struct kvm *kvm)
3170{
3171 unsigned int i;
3172
3173 /*
3174 * Unpin any mmu pages first.
3175 */
3176 for (i = 0; i < KVM_MAX_VCPUS; ++i)
3177 if (kvm->vcpus[i])
3178 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
3179 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3180 if (kvm->vcpus[i]) {
3181 kvm_arch_vcpu_free(kvm->vcpus[i]);
3182 kvm->vcpus[i] = NULL;
3183 }
3184 }
3185
3186}
3187
3188void kvm_arch_destroy_vm(struct kvm *kvm)
3189{
d7deeeb0
ZX
3190 kfree(kvm->arch.vpic);
3191 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
3192 kvm_free_vcpus(kvm);
3193 kvm_free_physmem(kvm);
3194 kfree(kvm);
3195}
0de10343
ZX
3196
3197int kvm_arch_set_memory_region(struct kvm *kvm,
3198 struct kvm_userspace_memory_region *mem,
3199 struct kvm_memory_slot old,
3200 int user_alloc)
3201{
3202 int npages = mem->memory_size >> PAGE_SHIFT;
3203 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3204
3205 /*To keep backward compatibility with older userspace,
3206 *x86 needs to hanlde !user_alloc case.
3207 */
3208 if (!user_alloc) {
3209 if (npages && !old.rmap) {
0de10343
ZX
3210 memslot->userspace_addr = do_mmap(NULL, 0,
3211 npages * PAGE_SIZE,
3212 PROT_READ | PROT_WRITE,
3213 MAP_SHARED | MAP_ANONYMOUS,
3214 0);
0de10343
ZX
3215
3216 if (IS_ERR((void *)memslot->userspace_addr))
3217 return PTR_ERR((void *)memslot->userspace_addr);
3218 } else {
3219 if (!old.user_alloc && old.rmap) {
3220 int ret;
3221
0de10343
ZX
3222 ret = do_munmap(current->mm, old.userspace_addr,
3223 old.npages * PAGE_SIZE);
0de10343
ZX
3224 if (ret < 0)
3225 printk(KERN_WARNING
3226 "kvm_vm_ioctl_set_memory_region: "
3227 "failed to munmap memory\n");
3228 }
3229 }
3230 }
3231
f05e70ac 3232 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
3233 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3234 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3235 }
3236
3237 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3238 kvm_flush_remote_tlbs(kvm);
3239
3240 return 0;
3241}
1d737c8a
ZX
3242
3243int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3244{
3245 return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
3246 || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
3247}
5736199a
ZX
3248
3249static void vcpu_kick_intr(void *info)
3250{
3251#ifdef DEBUG
3252 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
3253 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
3254#endif
3255}
3256
3257void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3258{
3259 int ipi_pcpu = vcpu->cpu;
3260
3261 if (waitqueue_active(&vcpu->wq)) {
3262 wake_up_interruptible(&vcpu->wq);
3263 ++vcpu->stat.halt_wakeup;
3264 }
3265 if (vcpu->guest_mode)
3266 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
3267}