KVM: IOAPIC/LAPIC: Enable NMI support
[linux-2.6-block.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Avi Kivity <avi@qumranet.com>
10 * Yaniv Kamay <yaniv@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
16
edf88417 17#include <linux/kvm_host.h>
313a3dc7 18#include "irq.h"
1d737c8a 19#include "mmu.h"
7837699f 20#include "i8254.h"
37817f29 21#include "tss.h"
313a3dc7 22
18068523 23#include <linux/clocksource.h>
313a3dc7
CO
24#include <linux/kvm.h>
25#include <linux/fs.h>
26#include <linux/vmalloc.h>
5fb76f9b 27#include <linux/module.h>
0de10343 28#include <linux/mman.h>
2bacc55c 29#include <linux/highmem.h>
043405e1
CO
30
31#include <asm/uaccess.h>
d825ed0a 32#include <asm/msr.h>
a5f61300 33#include <asm/desc.h>
043405e1 34
313a3dc7 35#define MAX_IO_MSRS 256
a03490ed
CO
36#define CR0_RESERVED_BITS \
37 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
38 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
39 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
40#define CR4_RESERVED_BITS \
41 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
42 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
43 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
44 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
45
46#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
47/* EFER defaults:
48 * - enable syscall per default because its emulated by KVM
49 * - enable LME and LMA per default on 64 bit KVM
50 */
51#ifdef CONFIG_X86_64
52static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
53#else
54static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
55#endif
313a3dc7 56
ba1389b7
AK
57#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 59
674eea0f
AK
60static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
61 struct kvm_cpuid_entry2 __user *entries);
62
97896d04
ZX
63struct kvm_x86_ops *kvm_x86_ops;
64
417bc304 65struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
66 { "pf_fixed", VCPU_STAT(pf_fixed) },
67 { "pf_guest", VCPU_STAT(pf_guest) },
68 { "tlb_flush", VCPU_STAT(tlb_flush) },
69 { "invlpg", VCPU_STAT(invlpg) },
70 { "exits", VCPU_STAT(exits) },
71 { "io_exits", VCPU_STAT(io_exits) },
72 { "mmio_exits", VCPU_STAT(mmio_exits) },
73 { "signal_exits", VCPU_STAT(signal_exits) },
74 { "irq_window", VCPU_STAT(irq_window_exits) },
75 { "halt_exits", VCPU_STAT(halt_exits) },
76 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 77 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7
AK
78 { "request_irq", VCPU_STAT(request_irq_exits) },
79 { "irq_exits", VCPU_STAT(irq_exits) },
80 { "host_state_reload", VCPU_STAT(host_state_reload) },
81 { "efer_reload", VCPU_STAT(efer_reload) },
82 { "fpu_reload", VCPU_STAT(fpu_reload) },
83 { "insn_emulation", VCPU_STAT(insn_emulation) },
84 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
4cee5764
AK
85 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
86 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
87 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
88 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
89 { "mmu_flooded", VM_STAT(mmu_flooded) },
90 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 91 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
0f74a24c 92 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 93 { "largepages", VM_STAT(lpages) },
417bc304
HB
94 { NULL }
95};
96
97
5fb76f9b
CO
98unsigned long segment_base(u16 selector)
99{
100 struct descriptor_table gdt;
a5f61300 101 struct desc_struct *d;
5fb76f9b
CO
102 unsigned long table_base;
103 unsigned long v;
104
105 if (selector == 0)
106 return 0;
107
108 asm("sgdt %0" : "=m"(gdt));
109 table_base = gdt.base;
110
111 if (selector & 4) { /* from ldt */
112 u16 ldt_selector;
113
114 asm("sldt %0" : "=g"(ldt_selector));
115 table_base = segment_base(ldt_selector);
116 }
a5f61300
AK
117 d = (struct desc_struct *)(table_base + (selector & ~7));
118 v = d->base0 | ((unsigned long)d->base1 << 16) |
119 ((unsigned long)d->base2 << 24);
5fb76f9b 120#ifdef CONFIG_X86_64
a5f61300
AK
121 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
122 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
123#endif
124 return v;
125}
126EXPORT_SYMBOL_GPL(segment_base);
127
6866b83e
CO
128u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
129{
130 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 131 return vcpu->arch.apic_base;
6866b83e 132 else
ad312c7c 133 return vcpu->arch.apic_base;
6866b83e
CO
134}
135EXPORT_SYMBOL_GPL(kvm_get_apic_base);
136
137void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
138{
139 /* TODO: reserve bits check */
140 if (irqchip_in_kernel(vcpu->kvm))
141 kvm_lapic_set_base(vcpu, data);
142 else
ad312c7c 143 vcpu->arch.apic_base = data;
6866b83e
CO
144}
145EXPORT_SYMBOL_GPL(kvm_set_apic_base);
146
298101da
AK
147void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
148{
ad312c7c
ZX
149 WARN_ON(vcpu->arch.exception.pending);
150 vcpu->arch.exception.pending = true;
151 vcpu->arch.exception.has_error_code = false;
152 vcpu->arch.exception.nr = nr;
298101da
AK
153}
154EXPORT_SYMBOL_GPL(kvm_queue_exception);
155
c3c91fee
AK
156void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
157 u32 error_code)
158{
159 ++vcpu->stat.pf_guest;
71c4dfaf
JR
160 if (vcpu->arch.exception.pending) {
161 if (vcpu->arch.exception.nr == PF_VECTOR) {
162 printk(KERN_DEBUG "kvm: inject_page_fault:"
163 " double fault 0x%lx\n", addr);
164 vcpu->arch.exception.nr = DF_VECTOR;
165 vcpu->arch.exception.error_code = 0;
166 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
167 /* triple fault -> shutdown */
168 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
169 }
c3c91fee
AK
170 return;
171 }
ad312c7c 172 vcpu->arch.cr2 = addr;
c3c91fee
AK
173 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
174}
175
3419ffc8
SY
176void kvm_inject_nmi(struct kvm_vcpu *vcpu)
177{
178 vcpu->arch.nmi_pending = 1;
179}
180EXPORT_SYMBOL_GPL(kvm_inject_nmi);
181
298101da
AK
182void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
183{
ad312c7c
ZX
184 WARN_ON(vcpu->arch.exception.pending);
185 vcpu->arch.exception.pending = true;
186 vcpu->arch.exception.has_error_code = true;
187 vcpu->arch.exception.nr = nr;
188 vcpu->arch.exception.error_code = error_code;
298101da
AK
189}
190EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
191
192static void __queue_exception(struct kvm_vcpu *vcpu)
193{
ad312c7c
ZX
194 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
195 vcpu->arch.exception.has_error_code,
196 vcpu->arch.exception.error_code);
298101da
AK
197}
198
a03490ed
CO
199/*
200 * Load the pae pdptrs. Return true is they are all valid.
201 */
202int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
203{
204 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
205 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
206 int i;
207 int ret;
ad312c7c 208 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 209
a03490ed
CO
210 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
211 offset * sizeof(u64), sizeof(pdpte));
212 if (ret < 0) {
213 ret = 0;
214 goto out;
215 }
216 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
217 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
218 ret = 0;
219 goto out;
220 }
221 }
222 ret = 1;
223
ad312c7c 224 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 225out:
a03490ed
CO
226
227 return ret;
228}
cc4b6871 229EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 230
d835dfec
AK
231static bool pdptrs_changed(struct kvm_vcpu *vcpu)
232{
ad312c7c 233 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
234 bool changed = true;
235 int r;
236
237 if (is_long_mode(vcpu) || !is_pae(vcpu))
238 return false;
239
ad312c7c 240 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
241 if (r < 0)
242 goto out;
ad312c7c 243 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 244out:
d835dfec
AK
245
246 return changed;
247}
248
2d3ad1f4 249void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
250{
251 if (cr0 & CR0_RESERVED_BITS) {
252 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 253 cr0, vcpu->arch.cr0);
c1a5d4f9 254 kvm_inject_gp(vcpu, 0);
a03490ed
CO
255 return;
256 }
257
258 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
259 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 260 kvm_inject_gp(vcpu, 0);
a03490ed
CO
261 return;
262 }
263
264 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
265 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
266 "and a clear PE flag\n");
c1a5d4f9 267 kvm_inject_gp(vcpu, 0);
a03490ed
CO
268 return;
269 }
270
271 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
272#ifdef CONFIG_X86_64
ad312c7c 273 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
274 int cs_db, cs_l;
275
276 if (!is_pae(vcpu)) {
277 printk(KERN_DEBUG "set_cr0: #GP, start paging "
278 "in long mode while PAE is disabled\n");
c1a5d4f9 279 kvm_inject_gp(vcpu, 0);
a03490ed
CO
280 return;
281 }
282 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
283 if (cs_l) {
284 printk(KERN_DEBUG "set_cr0: #GP, start paging "
285 "in long mode while CS.L == 1\n");
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288
289 }
290 } else
291#endif
ad312c7c 292 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
293 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
294 "reserved bits\n");
c1a5d4f9 295 kvm_inject_gp(vcpu, 0);
a03490ed
CO
296 return;
297 }
298
299 }
300
301 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 302 vcpu->arch.cr0 = cr0;
a03490ed 303
a03490ed 304 kvm_mmu_reset_context(vcpu);
a03490ed
CO
305 return;
306}
2d3ad1f4 307EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 308
2d3ad1f4 309void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 310{
2d3ad1f4 311 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
312 KVMTRACE_1D(LMSW, vcpu,
313 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
314 handler);
a03490ed 315}
2d3ad1f4 316EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 317
2d3ad1f4 318void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed
CO
319{
320 if (cr4 & CR4_RESERVED_BITS) {
321 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 322 kvm_inject_gp(vcpu, 0);
a03490ed
CO
323 return;
324 }
325
326 if (is_long_mode(vcpu)) {
327 if (!(cr4 & X86_CR4_PAE)) {
328 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
329 "in long mode\n");
c1a5d4f9 330 kvm_inject_gp(vcpu, 0);
a03490ed
CO
331 return;
332 }
333 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
ad312c7c 334 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 335 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 336 kvm_inject_gp(vcpu, 0);
a03490ed
CO
337 return;
338 }
339
340 if (cr4 & X86_CR4_VMXE) {
341 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 342 kvm_inject_gp(vcpu, 0);
a03490ed
CO
343 return;
344 }
345 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 346 vcpu->arch.cr4 = cr4;
a03490ed 347 kvm_mmu_reset_context(vcpu);
a03490ed 348}
2d3ad1f4 349EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 350
2d3ad1f4 351void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 352{
ad312c7c 353 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
d835dfec
AK
354 kvm_mmu_flush_tlb(vcpu);
355 return;
356 }
357
a03490ed
CO
358 if (is_long_mode(vcpu)) {
359 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
360 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 361 kvm_inject_gp(vcpu, 0);
a03490ed
CO
362 return;
363 }
364 } else {
365 if (is_pae(vcpu)) {
366 if (cr3 & CR3_PAE_RESERVED_BITS) {
367 printk(KERN_DEBUG
368 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 369 kvm_inject_gp(vcpu, 0);
a03490ed
CO
370 return;
371 }
372 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
373 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
374 "reserved bits\n");
c1a5d4f9 375 kvm_inject_gp(vcpu, 0);
a03490ed
CO
376 return;
377 }
378 }
379 /*
380 * We don't check reserved bits in nonpae mode, because
381 * this isn't enforced, and VMware depends on this.
382 */
383 }
384
a03490ed
CO
385 /*
386 * Does the new cr3 value map to physical memory? (Note, we
387 * catch an invalid cr3 even in real-mode, because it would
388 * cause trouble later on when we turn on paging anyway.)
389 *
390 * A real CPU would silently accept an invalid cr3 and would
391 * attempt to use it - with largely undefined (and often hard
392 * to debug) behavior on the guest side.
393 */
394 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 395 kvm_inject_gp(vcpu, 0);
a03490ed 396 else {
ad312c7c
ZX
397 vcpu->arch.cr3 = cr3;
398 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 399 }
a03490ed 400}
2d3ad1f4 401EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 402
2d3ad1f4 403void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
404{
405 if (cr8 & CR8_RESERVED_BITS) {
406 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 407 kvm_inject_gp(vcpu, 0);
a03490ed
CO
408 return;
409 }
410 if (irqchip_in_kernel(vcpu->kvm))
411 kvm_lapic_set_tpr(vcpu, cr8);
412 else
ad312c7c 413 vcpu->arch.cr8 = cr8;
a03490ed 414}
2d3ad1f4 415EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 416
2d3ad1f4 417unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
418{
419 if (irqchip_in_kernel(vcpu->kvm))
420 return kvm_lapic_get_cr8(vcpu);
421 else
ad312c7c 422 return vcpu->arch.cr8;
a03490ed 423}
2d3ad1f4 424EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 425
043405e1
CO
426/*
427 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
428 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
429 *
430 * This list is modified at module load time to reflect the
431 * capabilities of the host cpu.
432 */
433static u32 msrs_to_save[] = {
434 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
435 MSR_K6_STAR,
436#ifdef CONFIG_X86_64
437 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
438#endif
18068523 439 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
847f0ad8 440 MSR_IA32_PERF_STATUS,
043405e1
CO
441};
442
443static unsigned num_msrs_to_save;
444
445static u32 emulated_msrs[] = {
446 MSR_IA32_MISC_ENABLE,
447};
448
15c4a640
CO
449static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
450{
f2b4b7dd 451 if (efer & efer_reserved_bits) {
15c4a640
CO
452 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
453 efer);
c1a5d4f9 454 kvm_inject_gp(vcpu, 0);
15c4a640
CO
455 return;
456 }
457
458 if (is_paging(vcpu)
ad312c7c 459 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 460 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 461 kvm_inject_gp(vcpu, 0);
15c4a640
CO
462 return;
463 }
464
465 kvm_x86_ops->set_efer(vcpu, efer);
466
467 efer &= ~EFER_LMA;
ad312c7c 468 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 469
ad312c7c 470 vcpu->arch.shadow_efer = efer;
15c4a640
CO
471}
472
f2b4b7dd
JR
473void kvm_enable_efer_bits(u64 mask)
474{
475 efer_reserved_bits &= ~mask;
476}
477EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
478
479
15c4a640
CO
480/*
481 * Writes msr value into into the appropriate "register".
482 * Returns 0 on success, non-0 otherwise.
483 * Assumes vcpu_load() was already called.
484 */
485int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
486{
487 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
488}
489
313a3dc7
CO
490/*
491 * Adapt set_msr() to msr_io()'s calling convention
492 */
493static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
494{
495 return kvm_set_msr(vcpu, index, *data);
496}
497
18068523
GOC
498static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
499{
500 static int version;
50d0a0f9
GH
501 struct pvclock_wall_clock wc;
502 struct timespec now, sys, boot;
18068523
GOC
503
504 if (!wall_clock)
505 return;
506
507 version++;
508
18068523
GOC
509 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
510
50d0a0f9
GH
511 /*
512 * The guest calculates current wall clock time by adding
513 * system time (updated by kvm_write_guest_time below) to the
514 * wall clock specified here. guest system time equals host
515 * system time for us, thus we must fill in host boot time here.
516 */
517 now = current_kernel_time();
518 ktime_get_ts(&sys);
519 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
520
521 wc.sec = boot.tv_sec;
522 wc.nsec = boot.tv_nsec;
523 wc.version = version;
18068523
GOC
524
525 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
526
527 version++;
528 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
529}
530
50d0a0f9
GH
531static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
532{
533 uint32_t quotient, remainder;
534
535 /* Don't try to replace with do_div(), this one calculates
536 * "(dividend << 32) / divisor" */
537 __asm__ ( "divl %4"
538 : "=a" (quotient), "=d" (remainder)
539 : "0" (0), "1" (dividend), "r" (divisor) );
540 return quotient;
541}
542
543static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
544{
545 uint64_t nsecs = 1000000000LL;
546 int32_t shift = 0;
547 uint64_t tps64;
548 uint32_t tps32;
549
550 tps64 = tsc_khz * 1000LL;
551 while (tps64 > nsecs*2) {
552 tps64 >>= 1;
553 shift--;
554 }
555
556 tps32 = (uint32_t)tps64;
557 while (tps32 <= (uint32_t)nsecs) {
558 tps32 <<= 1;
559 shift++;
560 }
561
562 hv_clock->tsc_shift = shift;
563 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
564
565 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
566 __FUNCTION__, tsc_khz, hv_clock->tsc_shift,
567 hv_clock->tsc_to_system_mul);
568}
569
18068523
GOC
570static void kvm_write_guest_time(struct kvm_vcpu *v)
571{
572 struct timespec ts;
573 unsigned long flags;
574 struct kvm_vcpu_arch *vcpu = &v->arch;
575 void *shared_kaddr;
576
577 if ((!vcpu->time_page))
578 return;
579
50d0a0f9
GH
580 if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
581 kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
582 vcpu->hv_clock_tsc_khz = tsc_khz;
583 }
584
18068523
GOC
585 /* Keep irq disabled to prevent changes to the clock */
586 local_irq_save(flags);
587 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
588 &vcpu->hv_clock.tsc_timestamp);
589 ktime_get_ts(&ts);
590 local_irq_restore(flags);
591
592 /* With all the info we got, fill in the values */
593
594 vcpu->hv_clock.system_time = ts.tv_nsec +
595 (NSEC_PER_SEC * (u64)ts.tv_sec);
596 /*
597 * The interface expects us to write an even number signaling that the
598 * update is finished. Since the guest won't see the intermediate
50d0a0f9 599 * state, we just increase by 2 at the end.
18068523 600 */
50d0a0f9 601 vcpu->hv_clock.version += 2;
18068523
GOC
602
603 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
604
605 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 606 sizeof(vcpu->hv_clock));
18068523
GOC
607
608 kunmap_atomic(shared_kaddr, KM_USER0);
609
610 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
611}
612
15c4a640
CO
613
614int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
615{
616 switch (msr) {
15c4a640
CO
617 case MSR_EFER:
618 set_efer(vcpu, data);
619 break;
15c4a640
CO
620 case MSR_IA32_MC0_STATUS:
621 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 622 __func__, data);
15c4a640
CO
623 break;
624 case MSR_IA32_MCG_STATUS:
625 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 626 __func__, data);
15c4a640 627 break;
c7ac679c
JR
628 case MSR_IA32_MCG_CTL:
629 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 630 __func__, data);
c7ac679c 631 break;
15c4a640
CO
632 case MSR_IA32_UCODE_REV:
633 case MSR_IA32_UCODE_WRITE:
634 case 0x200 ... 0x2ff: /* MTRRs */
635 break;
636 case MSR_IA32_APICBASE:
637 kvm_set_apic_base(vcpu, data);
638 break;
639 case MSR_IA32_MISC_ENABLE:
ad312c7c 640 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 641 break;
18068523
GOC
642 case MSR_KVM_WALL_CLOCK:
643 vcpu->kvm->arch.wall_clock = data;
644 kvm_write_wall_clock(vcpu->kvm, data);
645 break;
646 case MSR_KVM_SYSTEM_TIME: {
647 if (vcpu->arch.time_page) {
648 kvm_release_page_dirty(vcpu->arch.time_page);
649 vcpu->arch.time_page = NULL;
650 }
651
652 vcpu->arch.time = data;
653
654 /* we verify if the enable bit is set... */
655 if (!(data & 1))
656 break;
657
658 /* ...but clean it before doing the actual write */
659 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
660
18068523 661 down_read(&current->mm->mmap_sem);
18068523
GOC
662 vcpu->arch.time_page =
663 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
664 up_read(&current->mm->mmap_sem);
665
666 if (is_error_page(vcpu->arch.time_page)) {
667 kvm_release_page_clean(vcpu->arch.time_page);
668 vcpu->arch.time_page = NULL;
669 }
670
671 kvm_write_guest_time(vcpu);
672 break;
673 }
15c4a640 674 default:
565f1fbd 675 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
676 return 1;
677 }
678 return 0;
679}
680EXPORT_SYMBOL_GPL(kvm_set_msr_common);
681
682
683/*
684 * Reads an msr value (of 'msr_index') into 'pdata'.
685 * Returns 0 on success, non-0 otherwise.
686 * Assumes vcpu_load() was already called.
687 */
688int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
689{
690 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
691}
692
693int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
694{
695 u64 data;
696
697 switch (msr) {
698 case 0xc0010010: /* SYSCFG */
699 case 0xc0010015: /* HWCR */
700 case MSR_IA32_PLATFORM_ID:
701 case MSR_IA32_P5_MC_ADDR:
702 case MSR_IA32_P5_MC_TYPE:
703 case MSR_IA32_MC0_CTL:
704 case MSR_IA32_MCG_STATUS:
705 case MSR_IA32_MCG_CAP:
c7ac679c 706 case MSR_IA32_MCG_CTL:
15c4a640
CO
707 case MSR_IA32_MC0_MISC:
708 case MSR_IA32_MC0_MISC+4:
709 case MSR_IA32_MC0_MISC+8:
710 case MSR_IA32_MC0_MISC+12:
711 case MSR_IA32_MC0_MISC+16:
712 case MSR_IA32_UCODE_REV:
15c4a640
CO
713 case MSR_IA32_EBL_CR_POWERON:
714 /* MTRR registers */
715 case 0xfe:
716 case 0x200 ... 0x2ff:
717 data = 0;
718 break;
719 case 0xcd: /* fsb frequency */
720 data = 3;
721 break;
722 case MSR_IA32_APICBASE:
723 data = kvm_get_apic_base(vcpu);
724 break;
725 case MSR_IA32_MISC_ENABLE:
ad312c7c 726 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 727 break;
847f0ad8
AG
728 case MSR_IA32_PERF_STATUS:
729 /* TSC increment by tick */
730 data = 1000ULL;
731 /* CPU multiplier */
732 data |= (((uint64_t)4ULL) << 40);
733 break;
15c4a640 734 case MSR_EFER:
ad312c7c 735 data = vcpu->arch.shadow_efer;
15c4a640 736 break;
18068523
GOC
737 case MSR_KVM_WALL_CLOCK:
738 data = vcpu->kvm->arch.wall_clock;
739 break;
740 case MSR_KVM_SYSTEM_TIME:
741 data = vcpu->arch.time;
742 break;
15c4a640
CO
743 default:
744 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
745 return 1;
746 }
747 *pdata = data;
748 return 0;
749}
750EXPORT_SYMBOL_GPL(kvm_get_msr_common);
751
313a3dc7
CO
752/*
753 * Read or write a bunch of msrs. All parameters are kernel addresses.
754 *
755 * @return number of msrs set successfully.
756 */
757static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
758 struct kvm_msr_entry *entries,
759 int (*do_msr)(struct kvm_vcpu *vcpu,
760 unsigned index, u64 *data))
761{
762 int i;
763
764 vcpu_load(vcpu);
765
3200f405 766 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
767 for (i = 0; i < msrs->nmsrs; ++i)
768 if (do_msr(vcpu, entries[i].index, &entries[i].data))
769 break;
3200f405 770 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
771
772 vcpu_put(vcpu);
773
774 return i;
775}
776
777/*
778 * Read or write a bunch of msrs. Parameters are user addresses.
779 *
780 * @return number of msrs set successfully.
781 */
782static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
783 int (*do_msr)(struct kvm_vcpu *vcpu,
784 unsigned index, u64 *data),
785 int writeback)
786{
787 struct kvm_msrs msrs;
788 struct kvm_msr_entry *entries;
789 int r, n;
790 unsigned size;
791
792 r = -EFAULT;
793 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
794 goto out;
795
796 r = -E2BIG;
797 if (msrs.nmsrs >= MAX_IO_MSRS)
798 goto out;
799
800 r = -ENOMEM;
801 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
802 entries = vmalloc(size);
803 if (!entries)
804 goto out;
805
806 r = -EFAULT;
807 if (copy_from_user(entries, user_msrs->entries, size))
808 goto out_free;
809
810 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
811 if (r < 0)
812 goto out_free;
813
814 r = -EFAULT;
815 if (writeback && copy_to_user(user_msrs->entries, entries, size))
816 goto out_free;
817
818 r = n;
819
820out_free:
821 vfree(entries);
822out:
823 return r;
824}
825
018d00d2
ZX
826int kvm_dev_ioctl_check_extension(long ext)
827{
828 int r;
829
830 switch (ext) {
831 case KVM_CAP_IRQCHIP:
832 case KVM_CAP_HLT:
833 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
834 case KVM_CAP_USER_MEMORY:
835 case KVM_CAP_SET_TSS_ADDR:
07716717 836 case KVM_CAP_EXT_CPUID:
18068523 837 case KVM_CAP_CLOCKSOURCE:
7837699f 838 case KVM_CAP_PIT:
a28e4f5a 839 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 840 case KVM_CAP_MP_STATE:
018d00d2
ZX
841 r = 1;
842 break;
774ead3a
AK
843 case KVM_CAP_VAPIC:
844 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
845 break;
f725230a
AK
846 case KVM_CAP_NR_VCPUS:
847 r = KVM_MAX_VCPUS;
848 break;
a988b910
AK
849 case KVM_CAP_NR_MEMSLOTS:
850 r = KVM_MEMORY_SLOTS;
851 break;
2f333bcb
MT
852 case KVM_CAP_PV_MMU:
853 r = !tdp_enabled;
854 break;
018d00d2
ZX
855 default:
856 r = 0;
857 break;
858 }
859 return r;
860
861}
862
043405e1
CO
863long kvm_arch_dev_ioctl(struct file *filp,
864 unsigned int ioctl, unsigned long arg)
865{
866 void __user *argp = (void __user *)arg;
867 long r;
868
869 switch (ioctl) {
870 case KVM_GET_MSR_INDEX_LIST: {
871 struct kvm_msr_list __user *user_msr_list = argp;
872 struct kvm_msr_list msr_list;
873 unsigned n;
874
875 r = -EFAULT;
876 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
877 goto out;
878 n = msr_list.nmsrs;
879 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
880 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
881 goto out;
882 r = -E2BIG;
883 if (n < num_msrs_to_save)
884 goto out;
885 r = -EFAULT;
886 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
887 num_msrs_to_save * sizeof(u32)))
888 goto out;
889 if (copy_to_user(user_msr_list->indices
890 + num_msrs_to_save * sizeof(u32),
891 &emulated_msrs,
892 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
893 goto out;
894 r = 0;
895 break;
896 }
674eea0f
AK
897 case KVM_GET_SUPPORTED_CPUID: {
898 struct kvm_cpuid2 __user *cpuid_arg = argp;
899 struct kvm_cpuid2 cpuid;
900
901 r = -EFAULT;
902 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
903 goto out;
904 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
905 cpuid_arg->entries);
906 if (r)
907 goto out;
908
909 r = -EFAULT;
910 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
911 goto out;
912 r = 0;
913 break;
914 }
043405e1
CO
915 default:
916 r = -EINVAL;
917 }
918out:
919 return r;
920}
921
313a3dc7
CO
922void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
923{
924 kvm_x86_ops->vcpu_load(vcpu, cpu);
18068523 925 kvm_write_guest_time(vcpu);
313a3dc7
CO
926}
927
928void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
929{
930 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 931 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
932}
933
07716717 934static int is_efer_nx(void)
313a3dc7
CO
935{
936 u64 efer;
313a3dc7
CO
937
938 rdmsrl(MSR_EFER, efer);
07716717
DK
939 return efer & EFER_NX;
940}
941
942static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
943{
944 int i;
945 struct kvm_cpuid_entry2 *e, *entry;
946
313a3dc7 947 entry = NULL;
ad312c7c
ZX
948 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
949 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
950 if (e->function == 0x80000001) {
951 entry = e;
952 break;
953 }
954 }
07716717 955 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
956 entry->edx &= ~(1 << 20);
957 printk(KERN_INFO "kvm: guest NX capability removed\n");
958 }
959}
960
07716717 961/* when an old userspace process fills a new kernel module */
313a3dc7
CO
962static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
963 struct kvm_cpuid *cpuid,
964 struct kvm_cpuid_entry __user *entries)
07716717
DK
965{
966 int r, i;
967 struct kvm_cpuid_entry *cpuid_entries;
968
969 r = -E2BIG;
970 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
971 goto out;
972 r = -ENOMEM;
973 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
974 if (!cpuid_entries)
975 goto out;
976 r = -EFAULT;
977 if (copy_from_user(cpuid_entries, entries,
978 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
979 goto out_free;
980 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
981 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
982 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
983 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
984 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
985 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
986 vcpu->arch.cpuid_entries[i].index = 0;
987 vcpu->arch.cpuid_entries[i].flags = 0;
988 vcpu->arch.cpuid_entries[i].padding[0] = 0;
989 vcpu->arch.cpuid_entries[i].padding[1] = 0;
990 vcpu->arch.cpuid_entries[i].padding[2] = 0;
991 }
992 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
993 cpuid_fix_nx_cap(vcpu);
994 r = 0;
995
996out_free:
997 vfree(cpuid_entries);
998out:
999 return r;
1000}
1001
1002static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1003 struct kvm_cpuid2 *cpuid,
1004 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1005{
1006 int r;
1007
1008 r = -E2BIG;
1009 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1010 goto out;
1011 r = -EFAULT;
ad312c7c 1012 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1013 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1014 goto out;
ad312c7c 1015 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1016 return 0;
1017
1018out:
1019 return r;
1020}
1021
07716717
DK
1022static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1023 struct kvm_cpuid2 *cpuid,
1024 struct kvm_cpuid_entry2 __user *entries)
1025{
1026 int r;
1027
1028 r = -E2BIG;
ad312c7c 1029 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1030 goto out;
1031 r = -EFAULT;
ad312c7c
ZX
1032 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1033 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1034 goto out;
1035 return 0;
1036
1037out:
ad312c7c 1038 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1039 return r;
1040}
1041
1042static inline u32 bit(int bitno)
1043{
1044 return 1 << (bitno & 31);
1045}
1046
1047static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1048 u32 index)
1049{
1050 entry->function = function;
1051 entry->index = index;
1052 cpuid_count(entry->function, entry->index,
1053 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1054 entry->flags = 0;
1055}
1056
1057static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1058 u32 index, int *nent, int maxnent)
1059{
1060 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1061 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1062 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1063 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1064 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1065 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1066 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1067 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1068 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1069 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1070 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1071 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1072 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1073 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1074 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1075 bit(X86_FEATURE_PGE) |
1076 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1077 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1078 bit(X86_FEATURE_SYSCALL) |
1079 (bit(X86_FEATURE_NX) && is_efer_nx()) |
1080#ifdef CONFIG_X86_64
1081 bit(X86_FEATURE_LM) |
1082#endif
1083 bit(X86_FEATURE_MMXEXT) |
1084 bit(X86_FEATURE_3DNOWEXT) |
1085 bit(X86_FEATURE_3DNOW);
1086 const u32 kvm_supported_word3_x86_features =
1087 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1088 const u32 kvm_supported_word6_x86_features =
1089 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY);
1090
1091 /* all func 2 cpuid_count() should be called on the same cpu */
1092 get_cpu();
1093 do_cpuid_1_ent(entry, function, index);
1094 ++*nent;
1095
1096 switch (function) {
1097 case 0:
1098 entry->eax = min(entry->eax, (u32)0xb);
1099 break;
1100 case 1:
1101 entry->edx &= kvm_supported_word0_x86_features;
1102 entry->ecx &= kvm_supported_word3_x86_features;
1103 break;
1104 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1105 * may return different values. This forces us to get_cpu() before
1106 * issuing the first command, and also to emulate this annoying behavior
1107 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1108 case 2: {
1109 int t, times = entry->eax & 0xff;
1110
1111 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1112 for (t = 1; t < times && *nent < maxnent; ++t) {
1113 do_cpuid_1_ent(&entry[t], function, 0);
1114 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1115 ++*nent;
1116 }
1117 break;
1118 }
1119 /* function 4 and 0xb have additional index. */
1120 case 4: {
14af3f3c 1121 int i, cache_type;
07716717
DK
1122
1123 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1124 /* read more entries until cache_type is zero */
14af3f3c
HH
1125 for (i = 1; *nent < maxnent; ++i) {
1126 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1127 if (!cache_type)
1128 break;
14af3f3c
HH
1129 do_cpuid_1_ent(&entry[i], function, i);
1130 entry[i].flags |=
07716717
DK
1131 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1132 ++*nent;
1133 }
1134 break;
1135 }
1136 case 0xb: {
14af3f3c 1137 int i, level_type;
07716717
DK
1138
1139 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1140 /* read more entries until level_type is zero */
14af3f3c
HH
1141 for (i = 1; *nent < maxnent; ++i) {
1142 level_type = entry[i - 1].ecx & 0xff;
07716717
DK
1143 if (!level_type)
1144 break;
14af3f3c
HH
1145 do_cpuid_1_ent(&entry[i], function, i);
1146 entry[i].flags |=
07716717
DK
1147 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1148 ++*nent;
1149 }
1150 break;
1151 }
1152 case 0x80000000:
1153 entry->eax = min(entry->eax, 0x8000001a);
1154 break;
1155 case 0x80000001:
1156 entry->edx &= kvm_supported_word1_x86_features;
1157 entry->ecx &= kvm_supported_word6_x86_features;
1158 break;
1159 }
1160 put_cpu();
1161}
1162
674eea0f 1163static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
07716717
DK
1164 struct kvm_cpuid_entry2 __user *entries)
1165{
1166 struct kvm_cpuid_entry2 *cpuid_entries;
1167 int limit, nent = 0, r = -E2BIG;
1168 u32 func;
1169
1170 if (cpuid->nent < 1)
1171 goto out;
1172 r = -ENOMEM;
1173 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1174 if (!cpuid_entries)
1175 goto out;
1176
1177 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1178 limit = cpuid_entries[0].eax;
1179 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1180 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1181 &nent, cpuid->nent);
1182 r = -E2BIG;
1183 if (nent >= cpuid->nent)
1184 goto out_free;
1185
1186 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1187 limit = cpuid_entries[nent - 1].eax;
1188 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1189 do_cpuid_ent(&cpuid_entries[nent], func, 0,
1190 &nent, cpuid->nent);
1191 r = -EFAULT;
1192 if (copy_to_user(entries, cpuid_entries,
1193 nent * sizeof(struct kvm_cpuid_entry2)))
1194 goto out_free;
1195 cpuid->nent = nent;
1196 r = 0;
1197
1198out_free:
1199 vfree(cpuid_entries);
1200out:
1201 return r;
1202}
1203
313a3dc7
CO
1204static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1205 struct kvm_lapic_state *s)
1206{
1207 vcpu_load(vcpu);
ad312c7c 1208 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1209 vcpu_put(vcpu);
1210
1211 return 0;
1212}
1213
1214static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1215 struct kvm_lapic_state *s)
1216{
1217 vcpu_load(vcpu);
ad312c7c 1218 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1219 kvm_apic_post_state_restore(vcpu);
1220 vcpu_put(vcpu);
1221
1222 return 0;
1223}
1224
f77bc6a4
ZX
1225static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1226 struct kvm_interrupt *irq)
1227{
1228 if (irq->irq < 0 || irq->irq >= 256)
1229 return -EINVAL;
1230 if (irqchip_in_kernel(vcpu->kvm))
1231 return -ENXIO;
1232 vcpu_load(vcpu);
1233
ad312c7c
ZX
1234 set_bit(irq->irq, vcpu->arch.irq_pending);
1235 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1236
1237 vcpu_put(vcpu);
1238
1239 return 0;
1240}
1241
b209749f
AK
1242static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1243 struct kvm_tpr_access_ctl *tac)
1244{
1245 if (tac->flags)
1246 return -EINVAL;
1247 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1248 return 0;
1249}
1250
313a3dc7
CO
1251long kvm_arch_vcpu_ioctl(struct file *filp,
1252 unsigned int ioctl, unsigned long arg)
1253{
1254 struct kvm_vcpu *vcpu = filp->private_data;
1255 void __user *argp = (void __user *)arg;
1256 int r;
1257
1258 switch (ioctl) {
1259 case KVM_GET_LAPIC: {
1260 struct kvm_lapic_state lapic;
1261
1262 memset(&lapic, 0, sizeof lapic);
1263 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
1264 if (r)
1265 goto out;
1266 r = -EFAULT;
1267 if (copy_to_user(argp, &lapic, sizeof lapic))
1268 goto out;
1269 r = 0;
1270 break;
1271 }
1272 case KVM_SET_LAPIC: {
1273 struct kvm_lapic_state lapic;
1274
1275 r = -EFAULT;
1276 if (copy_from_user(&lapic, argp, sizeof lapic))
1277 goto out;
1278 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
1279 if (r)
1280 goto out;
1281 r = 0;
1282 break;
1283 }
f77bc6a4
ZX
1284 case KVM_INTERRUPT: {
1285 struct kvm_interrupt irq;
1286
1287 r = -EFAULT;
1288 if (copy_from_user(&irq, argp, sizeof irq))
1289 goto out;
1290 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1291 if (r)
1292 goto out;
1293 r = 0;
1294 break;
1295 }
313a3dc7
CO
1296 case KVM_SET_CPUID: {
1297 struct kvm_cpuid __user *cpuid_arg = argp;
1298 struct kvm_cpuid cpuid;
1299
1300 r = -EFAULT;
1301 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1302 goto out;
1303 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1304 if (r)
1305 goto out;
1306 break;
1307 }
07716717
DK
1308 case KVM_SET_CPUID2: {
1309 struct kvm_cpuid2 __user *cpuid_arg = argp;
1310 struct kvm_cpuid2 cpuid;
1311
1312 r = -EFAULT;
1313 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1314 goto out;
1315 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
1316 cpuid_arg->entries);
1317 if (r)
1318 goto out;
1319 break;
1320 }
1321 case KVM_GET_CPUID2: {
1322 struct kvm_cpuid2 __user *cpuid_arg = argp;
1323 struct kvm_cpuid2 cpuid;
1324
1325 r = -EFAULT;
1326 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1327 goto out;
1328 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
1329 cpuid_arg->entries);
1330 if (r)
1331 goto out;
1332 r = -EFAULT;
1333 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1334 goto out;
1335 r = 0;
1336 break;
1337 }
313a3dc7
CO
1338 case KVM_GET_MSRS:
1339 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1340 break;
1341 case KVM_SET_MSRS:
1342 r = msr_io(vcpu, argp, do_set_msr, 0);
1343 break;
b209749f
AK
1344 case KVM_TPR_ACCESS_REPORTING: {
1345 struct kvm_tpr_access_ctl tac;
1346
1347 r = -EFAULT;
1348 if (copy_from_user(&tac, argp, sizeof tac))
1349 goto out;
1350 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1351 if (r)
1352 goto out;
1353 r = -EFAULT;
1354 if (copy_to_user(argp, &tac, sizeof tac))
1355 goto out;
1356 r = 0;
1357 break;
1358 };
b93463aa
AK
1359 case KVM_SET_VAPIC_ADDR: {
1360 struct kvm_vapic_addr va;
1361
1362 r = -EINVAL;
1363 if (!irqchip_in_kernel(vcpu->kvm))
1364 goto out;
1365 r = -EFAULT;
1366 if (copy_from_user(&va, argp, sizeof va))
1367 goto out;
1368 r = 0;
1369 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1370 break;
1371 }
313a3dc7
CO
1372 default:
1373 r = -EINVAL;
1374 }
1375out:
1376 return r;
1377}
1378
1fe779f8
CO
1379static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1380{
1381 int ret;
1382
1383 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1384 return -1;
1385 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1386 return ret;
1387}
1388
1389static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1390 u32 kvm_nr_mmu_pages)
1391{
1392 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1393 return -EINVAL;
1394
72dc67a6 1395 down_write(&kvm->slots_lock);
1fe779f8
CO
1396
1397 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1398 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1399
72dc67a6 1400 up_write(&kvm->slots_lock);
1fe779f8
CO
1401 return 0;
1402}
1403
1404static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1405{
f05e70ac 1406 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1407}
1408
e9f85cde
ZX
1409gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1410{
1411 int i;
1412 struct kvm_mem_alias *alias;
1413
d69fb81f
ZX
1414 for (i = 0; i < kvm->arch.naliases; ++i) {
1415 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1416 if (gfn >= alias->base_gfn
1417 && gfn < alias->base_gfn + alias->npages)
1418 return alias->target_gfn + gfn - alias->base_gfn;
1419 }
1420 return gfn;
1421}
1422
1fe779f8
CO
1423/*
1424 * Set a new alias region. Aliases map a portion of physical memory into
1425 * another portion. This is useful for memory windows, for example the PC
1426 * VGA region.
1427 */
1428static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1429 struct kvm_memory_alias *alias)
1430{
1431 int r, n;
1432 struct kvm_mem_alias *p;
1433
1434 r = -EINVAL;
1435 /* General sanity checks */
1436 if (alias->memory_size & (PAGE_SIZE - 1))
1437 goto out;
1438 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1439 goto out;
1440 if (alias->slot >= KVM_ALIAS_SLOTS)
1441 goto out;
1442 if (alias->guest_phys_addr + alias->memory_size
1443 < alias->guest_phys_addr)
1444 goto out;
1445 if (alias->target_phys_addr + alias->memory_size
1446 < alias->target_phys_addr)
1447 goto out;
1448
72dc67a6 1449 down_write(&kvm->slots_lock);
1fe779f8 1450
d69fb81f 1451 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1452 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1453 p->npages = alias->memory_size >> PAGE_SHIFT;
1454 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1455
1456 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1457 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1458 break;
d69fb81f 1459 kvm->arch.naliases = n;
1fe779f8
CO
1460
1461 kvm_mmu_zap_all(kvm);
1462
72dc67a6 1463 up_write(&kvm->slots_lock);
1fe779f8
CO
1464
1465 return 0;
1466
1467out:
1468 return r;
1469}
1470
1471static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1472{
1473 int r;
1474
1475 r = 0;
1476 switch (chip->chip_id) {
1477 case KVM_IRQCHIP_PIC_MASTER:
1478 memcpy(&chip->chip.pic,
1479 &pic_irqchip(kvm)->pics[0],
1480 sizeof(struct kvm_pic_state));
1481 break;
1482 case KVM_IRQCHIP_PIC_SLAVE:
1483 memcpy(&chip->chip.pic,
1484 &pic_irqchip(kvm)->pics[1],
1485 sizeof(struct kvm_pic_state));
1486 break;
1487 case KVM_IRQCHIP_IOAPIC:
1488 memcpy(&chip->chip.ioapic,
1489 ioapic_irqchip(kvm),
1490 sizeof(struct kvm_ioapic_state));
1491 break;
1492 default:
1493 r = -EINVAL;
1494 break;
1495 }
1496 return r;
1497}
1498
1499static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1500{
1501 int r;
1502
1503 r = 0;
1504 switch (chip->chip_id) {
1505 case KVM_IRQCHIP_PIC_MASTER:
1506 memcpy(&pic_irqchip(kvm)->pics[0],
1507 &chip->chip.pic,
1508 sizeof(struct kvm_pic_state));
1509 break;
1510 case KVM_IRQCHIP_PIC_SLAVE:
1511 memcpy(&pic_irqchip(kvm)->pics[1],
1512 &chip->chip.pic,
1513 sizeof(struct kvm_pic_state));
1514 break;
1515 case KVM_IRQCHIP_IOAPIC:
1516 memcpy(ioapic_irqchip(kvm),
1517 &chip->chip.ioapic,
1518 sizeof(struct kvm_ioapic_state));
1519 break;
1520 default:
1521 r = -EINVAL;
1522 break;
1523 }
1524 kvm_pic_update_irq(pic_irqchip(kvm));
1525 return r;
1526}
1527
e0f63cb9
SY
1528static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1529{
1530 int r = 0;
1531
1532 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1533 return r;
1534}
1535
1536static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1537{
1538 int r = 0;
1539
1540 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1541 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1542 return r;
1543}
1544
5bb064dc
ZX
1545/*
1546 * Get (and clear) the dirty memory log for a memory slot.
1547 */
1548int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1549 struct kvm_dirty_log *log)
1550{
1551 int r;
1552 int n;
1553 struct kvm_memory_slot *memslot;
1554 int is_dirty = 0;
1555
72dc67a6 1556 down_write(&kvm->slots_lock);
5bb064dc
ZX
1557
1558 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1559 if (r)
1560 goto out;
1561
1562 /* If nothing is dirty, don't bother messing with page tables. */
1563 if (is_dirty) {
1564 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1565 kvm_flush_remote_tlbs(kvm);
1566 memslot = &kvm->memslots[log->slot];
1567 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1568 memset(memslot->dirty_bitmap, 0, n);
1569 }
1570 r = 0;
1571out:
72dc67a6 1572 up_write(&kvm->slots_lock);
5bb064dc
ZX
1573 return r;
1574}
1575
1fe779f8
CO
1576long kvm_arch_vm_ioctl(struct file *filp,
1577 unsigned int ioctl, unsigned long arg)
1578{
1579 struct kvm *kvm = filp->private_data;
1580 void __user *argp = (void __user *)arg;
1581 int r = -EINVAL;
1582
1583 switch (ioctl) {
1584 case KVM_SET_TSS_ADDR:
1585 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1586 if (r < 0)
1587 goto out;
1588 break;
1589 case KVM_SET_MEMORY_REGION: {
1590 struct kvm_memory_region kvm_mem;
1591 struct kvm_userspace_memory_region kvm_userspace_mem;
1592
1593 r = -EFAULT;
1594 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1595 goto out;
1596 kvm_userspace_mem.slot = kvm_mem.slot;
1597 kvm_userspace_mem.flags = kvm_mem.flags;
1598 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1599 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1600 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1601 if (r)
1602 goto out;
1603 break;
1604 }
1605 case KVM_SET_NR_MMU_PAGES:
1606 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1607 if (r)
1608 goto out;
1609 break;
1610 case KVM_GET_NR_MMU_PAGES:
1611 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1612 break;
1613 case KVM_SET_MEMORY_ALIAS: {
1614 struct kvm_memory_alias alias;
1615
1616 r = -EFAULT;
1617 if (copy_from_user(&alias, argp, sizeof alias))
1618 goto out;
1619 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
1620 if (r)
1621 goto out;
1622 break;
1623 }
1624 case KVM_CREATE_IRQCHIP:
1625 r = -ENOMEM;
d7deeeb0
ZX
1626 kvm->arch.vpic = kvm_create_pic(kvm);
1627 if (kvm->arch.vpic) {
1fe779f8
CO
1628 r = kvm_ioapic_init(kvm);
1629 if (r) {
d7deeeb0
ZX
1630 kfree(kvm->arch.vpic);
1631 kvm->arch.vpic = NULL;
1fe779f8
CO
1632 goto out;
1633 }
1634 } else
1635 goto out;
1636 break;
7837699f
SY
1637 case KVM_CREATE_PIT:
1638 r = -ENOMEM;
1639 kvm->arch.vpit = kvm_create_pit(kvm);
1640 if (kvm->arch.vpit)
1641 r = 0;
1642 break;
1fe779f8
CO
1643 case KVM_IRQ_LINE: {
1644 struct kvm_irq_level irq_event;
1645
1646 r = -EFAULT;
1647 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1648 goto out;
1649 if (irqchip_in_kernel(kvm)) {
1650 mutex_lock(&kvm->lock);
1651 if (irq_event.irq < 16)
1652 kvm_pic_set_irq(pic_irqchip(kvm),
1653 irq_event.irq,
1654 irq_event.level);
d7deeeb0 1655 kvm_ioapic_set_irq(kvm->arch.vioapic,
1fe779f8
CO
1656 irq_event.irq,
1657 irq_event.level);
1658 mutex_unlock(&kvm->lock);
1659 r = 0;
1660 }
1661 break;
1662 }
1663 case KVM_GET_IRQCHIP: {
1664 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1665 struct kvm_irqchip chip;
1666
1667 r = -EFAULT;
1668 if (copy_from_user(&chip, argp, sizeof chip))
1669 goto out;
1670 r = -ENXIO;
1671 if (!irqchip_in_kernel(kvm))
1672 goto out;
1673 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1674 if (r)
1675 goto out;
1676 r = -EFAULT;
1677 if (copy_to_user(argp, &chip, sizeof chip))
1678 goto out;
1679 r = 0;
1680 break;
1681 }
1682 case KVM_SET_IRQCHIP: {
1683 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1684 struct kvm_irqchip chip;
1685
1686 r = -EFAULT;
1687 if (copy_from_user(&chip, argp, sizeof chip))
1688 goto out;
1689 r = -ENXIO;
1690 if (!irqchip_in_kernel(kvm))
1691 goto out;
1692 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1693 if (r)
1694 goto out;
1695 r = 0;
1696 break;
1697 }
e0f63cb9
SY
1698 case KVM_GET_PIT: {
1699 struct kvm_pit_state ps;
1700 r = -EFAULT;
1701 if (copy_from_user(&ps, argp, sizeof ps))
1702 goto out;
1703 r = -ENXIO;
1704 if (!kvm->arch.vpit)
1705 goto out;
1706 r = kvm_vm_ioctl_get_pit(kvm, &ps);
1707 if (r)
1708 goto out;
1709 r = -EFAULT;
1710 if (copy_to_user(argp, &ps, sizeof ps))
1711 goto out;
1712 r = 0;
1713 break;
1714 }
1715 case KVM_SET_PIT: {
1716 struct kvm_pit_state ps;
1717 r = -EFAULT;
1718 if (copy_from_user(&ps, argp, sizeof ps))
1719 goto out;
1720 r = -ENXIO;
1721 if (!kvm->arch.vpit)
1722 goto out;
1723 r = kvm_vm_ioctl_set_pit(kvm, &ps);
1724 if (r)
1725 goto out;
1726 r = 0;
1727 break;
1728 }
1fe779f8
CO
1729 default:
1730 ;
1731 }
1732out:
1733 return r;
1734}
1735
a16b043c 1736static void kvm_init_msr_list(void)
043405e1
CO
1737{
1738 u32 dummy[2];
1739 unsigned i, j;
1740
1741 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1742 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1743 continue;
1744 if (j < i)
1745 msrs_to_save[j] = msrs_to_save[i];
1746 j++;
1747 }
1748 num_msrs_to_save = j;
1749}
1750
bbd9b64e
CO
1751/*
1752 * Only apic need an MMIO device hook, so shortcut now..
1753 */
1754static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1755 gpa_t addr)
1756{
1757 struct kvm_io_device *dev;
1758
ad312c7c
ZX
1759 if (vcpu->arch.apic) {
1760 dev = &vcpu->arch.apic->dev;
bbd9b64e
CO
1761 if (dev->in_range(dev, addr))
1762 return dev;
1763 }
1764 return NULL;
1765}
1766
1767
1768static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1769 gpa_t addr)
1770{
1771 struct kvm_io_device *dev;
1772
1773 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1774 if (dev == NULL)
1775 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1776 return dev;
1777}
1778
1779int emulator_read_std(unsigned long addr,
1780 void *val,
1781 unsigned int bytes,
1782 struct kvm_vcpu *vcpu)
1783{
1784 void *data = val;
10589a46 1785 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
1786
1787 while (bytes) {
ad312c7c 1788 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1789 unsigned offset = addr & (PAGE_SIZE-1);
1790 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
1791 int ret;
1792
10589a46
MT
1793 if (gpa == UNMAPPED_GVA) {
1794 r = X86EMUL_PROPAGATE_FAULT;
1795 goto out;
1796 }
bbd9b64e 1797 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
10589a46
MT
1798 if (ret < 0) {
1799 r = X86EMUL_UNHANDLEABLE;
1800 goto out;
1801 }
bbd9b64e
CO
1802
1803 bytes -= tocopy;
1804 data += tocopy;
1805 addr += tocopy;
1806 }
10589a46 1807out:
10589a46 1808 return r;
bbd9b64e
CO
1809}
1810EXPORT_SYMBOL_GPL(emulator_read_std);
1811
bbd9b64e
CO
1812static int emulator_read_emulated(unsigned long addr,
1813 void *val,
1814 unsigned int bytes,
1815 struct kvm_vcpu *vcpu)
1816{
1817 struct kvm_io_device *mmio_dev;
1818 gpa_t gpa;
1819
1820 if (vcpu->mmio_read_completed) {
1821 memcpy(val, vcpu->mmio_data, bytes);
1822 vcpu->mmio_read_completed = 0;
1823 return X86EMUL_CONTINUE;
1824 }
1825
ad312c7c 1826 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1827
1828 /* For APIC access vmexit */
1829 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1830 goto mmio;
1831
1832 if (emulator_read_std(addr, val, bytes, vcpu)
1833 == X86EMUL_CONTINUE)
1834 return X86EMUL_CONTINUE;
1835 if (gpa == UNMAPPED_GVA)
1836 return X86EMUL_PROPAGATE_FAULT;
1837
1838mmio:
1839 /*
1840 * Is this MMIO handled locally?
1841 */
10589a46 1842 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1843 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1844 if (mmio_dev) {
1845 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 1846 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1847 return X86EMUL_CONTINUE;
1848 }
10589a46 1849 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1850
1851 vcpu->mmio_needed = 1;
1852 vcpu->mmio_phys_addr = gpa;
1853 vcpu->mmio_size = bytes;
1854 vcpu->mmio_is_write = 0;
1855
1856 return X86EMUL_UNHANDLEABLE;
1857}
1858
3200f405 1859int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 1860 const void *val, int bytes)
bbd9b64e
CO
1861{
1862 int ret;
1863
1864 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 1865 if (ret < 0)
bbd9b64e
CO
1866 return 0;
1867 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
1868 return 1;
1869}
1870
1871static int emulator_write_emulated_onepage(unsigned long addr,
1872 const void *val,
1873 unsigned int bytes,
1874 struct kvm_vcpu *vcpu)
1875{
1876 struct kvm_io_device *mmio_dev;
10589a46
MT
1877 gpa_t gpa;
1878
10589a46 1879 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
1880
1881 if (gpa == UNMAPPED_GVA) {
c3c91fee 1882 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
1883 return X86EMUL_PROPAGATE_FAULT;
1884 }
1885
1886 /* For APIC access vmexit */
1887 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1888 goto mmio;
1889
1890 if (emulator_write_phys(vcpu, gpa, val, bytes))
1891 return X86EMUL_CONTINUE;
1892
1893mmio:
1894 /*
1895 * Is this MMIO handled locally?
1896 */
10589a46 1897 mutex_lock(&vcpu->kvm->lock);
bbd9b64e
CO
1898 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1899 if (mmio_dev) {
1900 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 1901 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1902 return X86EMUL_CONTINUE;
1903 }
10589a46 1904 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
1905
1906 vcpu->mmio_needed = 1;
1907 vcpu->mmio_phys_addr = gpa;
1908 vcpu->mmio_size = bytes;
1909 vcpu->mmio_is_write = 1;
1910 memcpy(vcpu->mmio_data, val, bytes);
1911
1912 return X86EMUL_CONTINUE;
1913}
1914
1915int emulator_write_emulated(unsigned long addr,
1916 const void *val,
1917 unsigned int bytes,
1918 struct kvm_vcpu *vcpu)
1919{
1920 /* Crossing a page boundary? */
1921 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1922 int rc, now;
1923
1924 now = -addr & ~PAGE_MASK;
1925 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
1926 if (rc != X86EMUL_CONTINUE)
1927 return rc;
1928 addr += now;
1929 val += now;
1930 bytes -= now;
1931 }
1932 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
1933}
1934EXPORT_SYMBOL_GPL(emulator_write_emulated);
1935
1936static int emulator_cmpxchg_emulated(unsigned long addr,
1937 const void *old,
1938 const void *new,
1939 unsigned int bytes,
1940 struct kvm_vcpu *vcpu)
1941{
1942 static int reported;
1943
1944 if (!reported) {
1945 reported = 1;
1946 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1947 }
2bacc55c
MT
1948#ifndef CONFIG_X86_64
1949 /* guests cmpxchg8b have to be emulated atomically */
1950 if (bytes == 8) {
10589a46 1951 gpa_t gpa;
2bacc55c 1952 struct page *page;
c0b49b0d 1953 char *kaddr;
2bacc55c
MT
1954 u64 val;
1955
10589a46
MT
1956 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1957
2bacc55c
MT
1958 if (gpa == UNMAPPED_GVA ||
1959 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
1960 goto emul_write;
1961
1962 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
1963 goto emul_write;
1964
1965 val = *(u64 *)new;
72dc67a6
IE
1966
1967 down_read(&current->mm->mmap_sem);
2bacc55c 1968 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6
IE
1969 up_read(&current->mm->mmap_sem);
1970
c0b49b0d
AM
1971 kaddr = kmap_atomic(page, KM_USER0);
1972 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
1973 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
1974 kvm_release_page_dirty(page);
1975 }
3200f405 1976emul_write:
2bacc55c
MT
1977#endif
1978
bbd9b64e
CO
1979 return emulator_write_emulated(addr, new, bytes, vcpu);
1980}
1981
1982static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1983{
1984 return kvm_x86_ops->get_segment_base(vcpu, seg);
1985}
1986
1987int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1988{
1989 return X86EMUL_CONTINUE;
1990}
1991
1992int emulate_clts(struct kvm_vcpu *vcpu)
1993{
54e445ca 1994 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 1995 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
1996 return X86EMUL_CONTINUE;
1997}
1998
1999int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2000{
2001 struct kvm_vcpu *vcpu = ctxt->vcpu;
2002
2003 switch (dr) {
2004 case 0 ... 3:
2005 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2006 return X86EMUL_CONTINUE;
2007 default:
b8688d51 2008 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2009 return X86EMUL_UNHANDLEABLE;
2010 }
2011}
2012
2013int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2014{
2015 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2016 int exception;
2017
2018 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2019 if (exception) {
2020 /* FIXME: better handling */
2021 return X86EMUL_UNHANDLEABLE;
2022 }
2023 return X86EMUL_CONTINUE;
2024}
2025
2026void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2027{
2028 static int reported;
2029 u8 opcodes[4];
ad312c7c 2030 unsigned long rip = vcpu->arch.rip;
bbd9b64e
CO
2031 unsigned long rip_linear;
2032
2033 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2034
2035 if (reported)
2036 return;
2037
2038 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
2039
2040 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2041 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
2042 reported = 1;
2043}
2044EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2045
14af3f3c 2046static struct x86_emulate_ops emulate_ops = {
bbd9b64e 2047 .read_std = emulator_read_std,
bbd9b64e
CO
2048 .read_emulated = emulator_read_emulated,
2049 .write_emulated = emulator_write_emulated,
2050 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2051};
2052
2053int emulate_instruction(struct kvm_vcpu *vcpu,
2054 struct kvm_run *run,
2055 unsigned long cr2,
2056 u16 error_code,
571008da 2057 int emulation_type)
bbd9b64e
CO
2058{
2059 int r;
571008da 2060 struct decode_cache *c;
bbd9b64e 2061
ad312c7c 2062 vcpu->arch.mmio_fault_cr2 = cr2;
bbd9b64e
CO
2063 kvm_x86_ops->cache_regs(vcpu);
2064
2065 vcpu->mmio_is_write = 0;
ad312c7c 2066 vcpu->arch.pio.string = 0;
bbd9b64e 2067
571008da 2068 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2069 int cs_db, cs_l;
2070 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2071
ad312c7c
ZX
2072 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2073 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2074 vcpu->arch.emulate_ctxt.mode =
2075 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2076 ? X86EMUL_MODE_REAL : cs_l
2077 ? X86EMUL_MODE_PROT64 : cs_db
2078 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2079
ad312c7c
ZX
2080 if (vcpu->arch.emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
2081 vcpu->arch.emulate_ctxt.cs_base = 0;
2082 vcpu->arch.emulate_ctxt.ds_base = 0;
2083 vcpu->arch.emulate_ctxt.es_base = 0;
2084 vcpu->arch.emulate_ctxt.ss_base = 0;
bbd9b64e 2085 } else {
ad312c7c 2086 vcpu->arch.emulate_ctxt.cs_base =
bbd9b64e 2087 get_segment_base(vcpu, VCPU_SREG_CS);
ad312c7c 2088 vcpu->arch.emulate_ctxt.ds_base =
bbd9b64e 2089 get_segment_base(vcpu, VCPU_SREG_DS);
ad312c7c 2090 vcpu->arch.emulate_ctxt.es_base =
bbd9b64e 2091 get_segment_base(vcpu, VCPU_SREG_ES);
ad312c7c 2092 vcpu->arch.emulate_ctxt.ss_base =
bbd9b64e
CO
2093 get_segment_base(vcpu, VCPU_SREG_SS);
2094 }
2095
ad312c7c 2096 vcpu->arch.emulate_ctxt.gs_base =
bbd9b64e 2097 get_segment_base(vcpu, VCPU_SREG_GS);
ad312c7c 2098 vcpu->arch.emulate_ctxt.fs_base =
bbd9b64e
CO
2099 get_segment_base(vcpu, VCPU_SREG_FS);
2100
ad312c7c 2101 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2102
2103 /* Reject the instructions other than VMCALL/VMMCALL when
2104 * try to emulate invalid opcode */
2105 c = &vcpu->arch.emulate_ctxt.decode;
2106 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2107 (!(c->twobyte && c->b == 0x01 &&
2108 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2109 c->modrm_mod == 3 && c->modrm_rm == 1)))
2110 return EMULATE_FAIL;
2111
f2b5756b 2112 ++vcpu->stat.insn_emulation;
bbd9b64e 2113 if (r) {
f2b5756b 2114 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2115 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2116 return EMULATE_DONE;
2117 return EMULATE_FAIL;
2118 }
2119 }
2120
ad312c7c 2121 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2122
ad312c7c 2123 if (vcpu->arch.pio.string)
bbd9b64e
CO
2124 return EMULATE_DO_MMIO;
2125
2126 if ((r || vcpu->mmio_is_write) && run) {
2127 run->exit_reason = KVM_EXIT_MMIO;
2128 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2129 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2130 run->mmio.len = vcpu->mmio_size;
2131 run->mmio.is_write = vcpu->mmio_is_write;
2132 }
2133
2134 if (r) {
2135 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2136 return EMULATE_DONE;
2137 if (!vcpu->mmio_needed) {
2138 kvm_report_emulation_failure(vcpu, "mmio");
2139 return EMULATE_FAIL;
2140 }
2141 return EMULATE_DO_MMIO;
2142 }
2143
2144 kvm_x86_ops->decache_regs(vcpu);
ad312c7c 2145 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2146
2147 if (vcpu->mmio_is_write) {
2148 vcpu->mmio_needed = 0;
2149 return EMULATE_DO_MMIO;
2150 }
2151
2152 return EMULATE_DONE;
2153}
2154EXPORT_SYMBOL_GPL(emulate_instruction);
2155
de7d789a
CO
2156static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
2157{
2158 int i;
2159
ad312c7c
ZX
2160 for (i = 0; i < ARRAY_SIZE(vcpu->arch.pio.guest_pages); ++i)
2161 if (vcpu->arch.pio.guest_pages[i]) {
2162 kvm_release_page_dirty(vcpu->arch.pio.guest_pages[i]);
2163 vcpu->arch.pio.guest_pages[i] = NULL;
de7d789a
CO
2164 }
2165}
2166
2167static int pio_copy_data(struct kvm_vcpu *vcpu)
2168{
ad312c7c 2169 void *p = vcpu->arch.pio_data;
de7d789a
CO
2170 void *q;
2171 unsigned bytes;
ad312c7c 2172 int nr_pages = vcpu->arch.pio.guest_pages[1] ? 2 : 1;
de7d789a 2173
ad312c7c 2174 q = vmap(vcpu->arch.pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
de7d789a
CO
2175 PAGE_KERNEL);
2176 if (!q) {
2177 free_pio_guest_pages(vcpu);
2178 return -ENOMEM;
2179 }
ad312c7c
ZX
2180 q += vcpu->arch.pio.guest_page_offset;
2181 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2182 if (vcpu->arch.pio.in)
de7d789a
CO
2183 memcpy(q, p, bytes);
2184 else
2185 memcpy(p, q, bytes);
ad312c7c 2186 q -= vcpu->arch.pio.guest_page_offset;
de7d789a
CO
2187 vunmap(q);
2188 free_pio_guest_pages(vcpu);
2189 return 0;
2190}
2191
2192int complete_pio(struct kvm_vcpu *vcpu)
2193{
ad312c7c 2194 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2195 long delta;
2196 int r;
2197
2198 kvm_x86_ops->cache_regs(vcpu);
2199
2200 if (!io->string) {
2201 if (io->in)
ad312c7c 2202 memcpy(&vcpu->arch.regs[VCPU_REGS_RAX], vcpu->arch.pio_data,
de7d789a
CO
2203 io->size);
2204 } else {
2205 if (io->in) {
2206 r = pio_copy_data(vcpu);
2207 if (r) {
2208 kvm_x86_ops->cache_regs(vcpu);
2209 return r;
2210 }
2211 }
2212
2213 delta = 1;
2214 if (io->rep) {
2215 delta *= io->cur_count;
2216 /*
2217 * The size of the register should really depend on
2218 * current address size.
2219 */
ad312c7c 2220 vcpu->arch.regs[VCPU_REGS_RCX] -= delta;
de7d789a
CO
2221 }
2222 if (io->down)
2223 delta = -delta;
2224 delta *= io->size;
2225 if (io->in)
ad312c7c 2226 vcpu->arch.regs[VCPU_REGS_RDI] += delta;
de7d789a 2227 else
ad312c7c 2228 vcpu->arch.regs[VCPU_REGS_RSI] += delta;
de7d789a
CO
2229 }
2230
2231 kvm_x86_ops->decache_regs(vcpu);
2232
2233 io->count -= io->cur_count;
2234 io->cur_count = 0;
2235
2236 return 0;
2237}
2238
2239static void kernel_pio(struct kvm_io_device *pio_dev,
2240 struct kvm_vcpu *vcpu,
2241 void *pd)
2242{
2243 /* TODO: String I/O for in kernel device */
2244
2245 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2246 if (vcpu->arch.pio.in)
2247 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2248 vcpu->arch.pio.size,
de7d789a
CO
2249 pd);
2250 else
ad312c7c
ZX
2251 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2252 vcpu->arch.pio.size,
de7d789a
CO
2253 pd);
2254 mutex_unlock(&vcpu->kvm->lock);
2255}
2256
2257static void pio_string_write(struct kvm_io_device *pio_dev,
2258 struct kvm_vcpu *vcpu)
2259{
ad312c7c
ZX
2260 struct kvm_pio_request *io = &vcpu->arch.pio;
2261 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2262 int i;
2263
2264 mutex_lock(&vcpu->kvm->lock);
2265 for (i = 0; i < io->cur_count; i++) {
2266 kvm_iodevice_write(pio_dev, io->port,
2267 io->size,
2268 pd);
2269 pd += io->size;
2270 }
2271 mutex_unlock(&vcpu->kvm->lock);
2272}
2273
2274static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
2275 gpa_t addr)
2276{
2277 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
2278}
2279
2280int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2281 int size, unsigned port)
2282{
2283 struct kvm_io_device *pio_dev;
2284
2285 vcpu->run->exit_reason = KVM_EXIT_IO;
2286 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2287 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2288 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2289 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2290 vcpu->run->io.port = vcpu->arch.pio.port = port;
2291 vcpu->arch.pio.in = in;
2292 vcpu->arch.pio.string = 0;
2293 vcpu->arch.pio.down = 0;
2294 vcpu->arch.pio.guest_page_offset = 0;
2295 vcpu->arch.pio.rep = 0;
de7d789a 2296
2714d1d3
FEL
2297 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2298 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2299 handler);
2300 else
2301 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2302 handler);
2303
de7d789a 2304 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2305 memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
de7d789a
CO
2306
2307 kvm_x86_ops->skip_emulated_instruction(vcpu);
2308
2309 pio_dev = vcpu_find_pio_dev(vcpu, port);
2310 if (pio_dev) {
ad312c7c 2311 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2312 complete_pio(vcpu);
2313 return 1;
2314 }
2315 return 0;
2316}
2317EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2318
2319int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2320 int size, unsigned long count, int down,
2321 gva_t address, int rep, unsigned port)
2322{
2323 unsigned now, in_page;
2324 int i, ret = 0;
2325 int nr_pages = 1;
2326 struct page *page;
2327 struct kvm_io_device *pio_dev;
2328
2329 vcpu->run->exit_reason = KVM_EXIT_IO;
2330 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2331 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2332 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2333 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2334 vcpu->run->io.port = vcpu->arch.pio.port = port;
2335 vcpu->arch.pio.in = in;
2336 vcpu->arch.pio.string = 1;
2337 vcpu->arch.pio.down = down;
2338 vcpu->arch.pio.guest_page_offset = offset_in_page(address);
2339 vcpu->arch.pio.rep = rep;
de7d789a 2340
2714d1d3
FEL
2341 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2342 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2343 handler);
2344 else
2345 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2346 handler);
2347
de7d789a
CO
2348 if (!count) {
2349 kvm_x86_ops->skip_emulated_instruction(vcpu);
2350 return 1;
2351 }
2352
2353 if (!down)
2354 in_page = PAGE_SIZE - offset_in_page(address);
2355 else
2356 in_page = offset_in_page(address) + size;
2357 now = min(count, (unsigned long)in_page / size);
2358 if (!now) {
2359 /*
2360 * String I/O straddles page boundary. Pin two guest pages
2361 * so that we satisfy atomicity constraints. Do just one
2362 * transaction to avoid complexity.
2363 */
2364 nr_pages = 2;
2365 now = 1;
2366 }
2367 if (down) {
2368 /*
2369 * String I/O in reverse. Yuck. Kill the guest, fix later.
2370 */
2371 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2372 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2373 return 1;
2374 }
2375 vcpu->run->io.count = now;
ad312c7c 2376 vcpu->arch.pio.cur_count = now;
de7d789a 2377
ad312c7c 2378 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2379 kvm_x86_ops->skip_emulated_instruction(vcpu);
2380
2381 for (i = 0; i < nr_pages; ++i) {
de7d789a 2382 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
ad312c7c 2383 vcpu->arch.pio.guest_pages[i] = page;
de7d789a 2384 if (!page) {
c1a5d4f9 2385 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2386 free_pio_guest_pages(vcpu);
2387 return 1;
2388 }
2389 }
2390
2391 pio_dev = vcpu_find_pio_dev(vcpu, port);
ad312c7c 2392 if (!vcpu->arch.pio.in) {
de7d789a
CO
2393 /* string PIO write */
2394 ret = pio_copy_data(vcpu);
2395 if (ret >= 0 && pio_dev) {
2396 pio_string_write(pio_dev, vcpu);
2397 complete_pio(vcpu);
ad312c7c 2398 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2399 ret = 1;
2400 }
2401 } else if (pio_dev)
2402 pr_unimpl(vcpu, "no string pio read support yet, "
2403 "port %x size %d count %ld\n",
2404 port, size, count);
2405
2406 return ret;
2407}
2408EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2409
f8c16bba 2410int kvm_arch_init(void *opaque)
043405e1 2411{
56c6d28a 2412 int r;
f8c16bba
ZX
2413 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2414
f8c16bba
ZX
2415 if (kvm_x86_ops) {
2416 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2417 r = -EEXIST;
2418 goto out;
f8c16bba
ZX
2419 }
2420
2421 if (!ops->cpu_has_kvm_support()) {
2422 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2423 r = -EOPNOTSUPP;
2424 goto out;
f8c16bba
ZX
2425 }
2426 if (ops->disabled_by_bios()) {
2427 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2428 r = -EOPNOTSUPP;
2429 goto out;
f8c16bba
ZX
2430 }
2431
97db56ce
AK
2432 r = kvm_mmu_module_init();
2433 if (r)
2434 goto out;
2435
2436 kvm_init_msr_list();
2437
f8c16bba 2438 kvm_x86_ops = ops;
56c6d28a 2439 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2440 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2441 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2442 PT_DIRTY_MASK, PT64_NX_MASK, 0);
f8c16bba 2443 return 0;
56c6d28a
ZX
2444
2445out:
56c6d28a 2446 return r;
043405e1 2447}
8776e519 2448
f8c16bba
ZX
2449void kvm_arch_exit(void)
2450{
2451 kvm_x86_ops = NULL;
56c6d28a
ZX
2452 kvm_mmu_module_exit();
2453}
f8c16bba 2454
8776e519
HB
2455int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2456{
2457 ++vcpu->stat.halt_exits;
2714d1d3 2458 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2459 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2460 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3200f405 2461 up_read(&vcpu->kvm->slots_lock);
8776e519 2462 kvm_vcpu_block(vcpu);
3200f405 2463 down_read(&vcpu->kvm->slots_lock);
a4535290 2464 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
8776e519
HB
2465 return -EINTR;
2466 return 1;
2467 } else {
2468 vcpu->run->exit_reason = KVM_EXIT_HLT;
2469 return 0;
2470 }
2471}
2472EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2473
2f333bcb
MT
2474static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2475 unsigned long a1)
2476{
2477 if (is_long_mode(vcpu))
2478 return a0;
2479 else
2480 return a0 | ((gpa_t)a1 << 32);
2481}
2482
8776e519
HB
2483int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2484{
2485 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2486 int r = 1;
8776e519
HB
2487
2488 kvm_x86_ops->cache_regs(vcpu);
2489
ad312c7c
ZX
2490 nr = vcpu->arch.regs[VCPU_REGS_RAX];
2491 a0 = vcpu->arch.regs[VCPU_REGS_RBX];
2492 a1 = vcpu->arch.regs[VCPU_REGS_RCX];
2493 a2 = vcpu->arch.regs[VCPU_REGS_RDX];
2494 a3 = vcpu->arch.regs[VCPU_REGS_RSI];
8776e519 2495
2714d1d3
FEL
2496 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2497
8776e519
HB
2498 if (!is_long_mode(vcpu)) {
2499 nr &= 0xFFFFFFFF;
2500 a0 &= 0xFFFFFFFF;
2501 a1 &= 0xFFFFFFFF;
2502 a2 &= 0xFFFFFFFF;
2503 a3 &= 0xFFFFFFFF;
2504 }
2505
2506 switch (nr) {
b93463aa
AK
2507 case KVM_HC_VAPIC_POLL_IRQ:
2508 ret = 0;
2509 break;
2f333bcb
MT
2510 case KVM_HC_MMU_OP:
2511 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2512 break;
8776e519
HB
2513 default:
2514 ret = -KVM_ENOSYS;
2515 break;
2516 }
ad312c7c 2517 vcpu->arch.regs[VCPU_REGS_RAX] = ret;
8776e519 2518 kvm_x86_ops->decache_regs(vcpu);
f11c3a8d 2519 ++vcpu->stat.hypercalls;
2f333bcb 2520 return r;
8776e519
HB
2521}
2522EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2523
2524int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2525{
2526 char instruction[3];
2527 int ret = 0;
2528
8776e519
HB
2529
2530 /*
2531 * Blow out the MMU to ensure that no other VCPU has an active mapping
2532 * to ensure that the updated hypercall appears atomically across all
2533 * VCPUs.
2534 */
2535 kvm_mmu_zap_all(vcpu->kvm);
2536
2537 kvm_x86_ops->cache_regs(vcpu);
2538 kvm_x86_ops->patch_hypercall(vcpu, instruction);
ad312c7c 2539 if (emulator_write_emulated(vcpu->arch.rip, instruction, 3, vcpu)
8776e519
HB
2540 != X86EMUL_CONTINUE)
2541 ret = -EFAULT;
2542
8776e519
HB
2543 return ret;
2544}
2545
2546static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2547{
2548 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2549}
2550
2551void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2552{
2553 struct descriptor_table dt = { limit, base };
2554
2555 kvm_x86_ops->set_gdt(vcpu, &dt);
2556}
2557
2558void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2559{
2560 struct descriptor_table dt = { limit, base };
2561
2562 kvm_x86_ops->set_idt(vcpu, &dt);
2563}
2564
2565void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2566 unsigned long *rflags)
2567{
2d3ad1f4 2568 kvm_lmsw(vcpu, msw);
8776e519
HB
2569 *rflags = kvm_x86_ops->get_rflags(vcpu);
2570}
2571
2572unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2573{
54e445ca
JR
2574 unsigned long value;
2575
8776e519
HB
2576 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2577 switch (cr) {
2578 case 0:
54e445ca
JR
2579 value = vcpu->arch.cr0;
2580 break;
8776e519 2581 case 2:
54e445ca
JR
2582 value = vcpu->arch.cr2;
2583 break;
8776e519 2584 case 3:
54e445ca
JR
2585 value = vcpu->arch.cr3;
2586 break;
8776e519 2587 case 4:
54e445ca
JR
2588 value = vcpu->arch.cr4;
2589 break;
152ff9be 2590 case 8:
54e445ca
JR
2591 value = kvm_get_cr8(vcpu);
2592 break;
8776e519 2593 default:
b8688d51 2594 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2595 return 0;
2596 }
54e445ca
JR
2597 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2598 (u32)((u64)value >> 32), handler);
2599
2600 return value;
8776e519
HB
2601}
2602
2603void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2604 unsigned long *rflags)
2605{
54e445ca
JR
2606 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2607 (u32)((u64)val >> 32), handler);
2608
8776e519
HB
2609 switch (cr) {
2610 case 0:
2d3ad1f4 2611 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2612 *rflags = kvm_x86_ops->get_rflags(vcpu);
2613 break;
2614 case 2:
ad312c7c 2615 vcpu->arch.cr2 = val;
8776e519
HB
2616 break;
2617 case 3:
2d3ad1f4 2618 kvm_set_cr3(vcpu, val);
8776e519
HB
2619 break;
2620 case 4:
2d3ad1f4 2621 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2622 break;
152ff9be 2623 case 8:
2d3ad1f4 2624 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2625 break;
8776e519 2626 default:
b8688d51 2627 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2628 }
2629}
2630
07716717
DK
2631static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2632{
ad312c7c
ZX
2633 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2634 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2635
2636 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2637 /* when no next entry is found, the current entry[i] is reselected */
2638 for (j = i + 1; j == i; j = (j + 1) % nent) {
ad312c7c 2639 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2640 if (ej->function == e->function) {
2641 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2642 return j;
2643 }
2644 }
2645 return 0; /* silence gcc, even though control never reaches here */
2646}
2647
2648/* find an entry with matching function, matching index (if needed), and that
2649 * should be read next (if it's stateful) */
2650static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2651 u32 function, u32 index)
2652{
2653 if (e->function != function)
2654 return 0;
2655 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2656 return 0;
2657 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
2658 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
2659 return 0;
2660 return 1;
2661}
2662
8776e519
HB
2663void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2664{
2665 int i;
07716717
DK
2666 u32 function, index;
2667 struct kvm_cpuid_entry2 *e, *best;
8776e519
HB
2668
2669 kvm_x86_ops->cache_regs(vcpu);
ad312c7c
ZX
2670 function = vcpu->arch.regs[VCPU_REGS_RAX];
2671 index = vcpu->arch.regs[VCPU_REGS_RCX];
2672 vcpu->arch.regs[VCPU_REGS_RAX] = 0;
2673 vcpu->arch.regs[VCPU_REGS_RBX] = 0;
2674 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2675 vcpu->arch.regs[VCPU_REGS_RDX] = 0;
8776e519 2676 best = NULL;
ad312c7c
ZX
2677 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
2678 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
2679 if (is_matching_cpuid_entry(e, function, index)) {
2680 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2681 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2682 best = e;
2683 break;
2684 }
2685 /*
2686 * Both basic or both extended?
2687 */
2688 if (((e->function ^ function) & 0x80000000) == 0)
2689 if (!best || e->function > best->function)
2690 best = e;
2691 }
2692 if (best) {
ad312c7c
ZX
2693 vcpu->arch.regs[VCPU_REGS_RAX] = best->eax;
2694 vcpu->arch.regs[VCPU_REGS_RBX] = best->ebx;
2695 vcpu->arch.regs[VCPU_REGS_RCX] = best->ecx;
2696 vcpu->arch.regs[VCPU_REGS_RDX] = best->edx;
8776e519
HB
2697 }
2698 kvm_x86_ops->decache_regs(vcpu);
2699 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3
FEL
2700 KVMTRACE_5D(CPUID, vcpu, function,
2701 (u32)vcpu->arch.regs[VCPU_REGS_RAX],
2702 (u32)vcpu->arch.regs[VCPU_REGS_RBX],
2703 (u32)vcpu->arch.regs[VCPU_REGS_RCX],
2704 (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
8776e519
HB
2705}
2706EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2707
b6c7a5dc
HB
2708/*
2709 * Check if userspace requested an interrupt window, and that the
2710 * interrupt window is open.
2711 *
2712 * No need to exit to userspace if we already have an interrupt queued.
2713 */
2714static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2715 struct kvm_run *kvm_run)
2716{
ad312c7c 2717 return (!vcpu->arch.irq_summary &&
b6c7a5dc 2718 kvm_run->request_interrupt_window &&
ad312c7c 2719 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
2720 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2721}
2722
2723static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2724 struct kvm_run *kvm_run)
2725{
2726 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 2727 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc
HB
2728 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2729 if (irqchip_in_kernel(vcpu->kvm))
2730 kvm_run->ready_for_interrupt_injection = 1;
2731 else
2732 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
2733 (vcpu->arch.interrupt_window_open &&
2734 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
2735}
2736
b93463aa
AK
2737static void vapic_enter(struct kvm_vcpu *vcpu)
2738{
2739 struct kvm_lapic *apic = vcpu->arch.apic;
2740 struct page *page;
2741
2742 if (!apic || !apic->vapic_addr)
2743 return;
2744
10589a46 2745 down_read(&current->mm->mmap_sem);
b93463aa 2746 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
10589a46 2747 up_read(&current->mm->mmap_sem);
72dc67a6
IE
2748
2749 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
2750}
2751
2752static void vapic_exit(struct kvm_vcpu *vcpu)
2753{
2754 struct kvm_lapic *apic = vcpu->arch.apic;
2755
2756 if (!apic || !apic->vapic_addr)
2757 return;
2758
2759 kvm_release_page_dirty(apic->vapic_page);
2760 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
2761}
2762
b6c7a5dc
HB
2763static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2764{
2765 int r;
2766
a4535290 2767 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
b6c7a5dc 2768 pr_debug("vcpu %d received sipi with vector # %x\n",
ad312c7c 2769 vcpu->vcpu_id, vcpu->arch.sipi_vector);
b6c7a5dc
HB
2770 kvm_lapic_reset(vcpu);
2771 r = kvm_x86_ops->vcpu_reset(vcpu);
2772 if (r)
2773 return r;
a4535290 2774 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
2775 }
2776
3200f405 2777 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2778 vapic_enter(vcpu);
2779
b6c7a5dc
HB
2780preempted:
2781 if (vcpu->guest_debug.enabled)
2782 kvm_x86_ops->guest_debug_pre(vcpu);
2783
2784again:
2e53d63a
MT
2785 if (vcpu->requests)
2786 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
2787 kvm_mmu_unload(vcpu);
2788
b6c7a5dc
HB
2789 r = kvm_mmu_reload(vcpu);
2790 if (unlikely(r))
2791 goto out;
2792
2f52d58c
AK
2793 if (vcpu->requests) {
2794 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 2795 __kvm_migrate_timers(vcpu);
d4acf7e7
MT
2796 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
2797 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
2798 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
2799 &vcpu->requests)) {
2800 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
2801 r = 0;
2802 goto out;
2803 }
71c4dfaf
JR
2804 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
2805 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2806 r = 0;
2807 goto out;
2808 }
2f52d58c 2809 }
b93463aa 2810
06e05645 2811 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
b6c7a5dc
HB
2812 kvm_inject_pending_timer_irqs(vcpu);
2813
2814 preempt_disable();
2815
2816 kvm_x86_ops->prepare_guest_switch(vcpu);
2817 kvm_load_guest_fpu(vcpu);
2818
2819 local_irq_disable();
2820
d4acf7e7 2821 if (vcpu->requests || need_resched()) {
6c142801
AK
2822 local_irq_enable();
2823 preempt_enable();
2824 r = 1;
2825 goto out;
2826 }
2827
b6c7a5dc
HB
2828 if (signal_pending(current)) {
2829 local_irq_enable();
2830 preempt_enable();
2831 r = -EINTR;
2832 kvm_run->exit_reason = KVM_EXIT_INTR;
2833 ++vcpu->stat.signal_exits;
2834 goto out;
2835 }
2836
e9571ed5
MT
2837 vcpu->guest_mode = 1;
2838 /*
2839 * Make sure that guest_mode assignment won't happen after
2840 * testing the pending IRQ vector bitmap.
2841 */
2842 smp_wmb();
2843
ad312c7c 2844 if (vcpu->arch.exception.pending)
298101da
AK
2845 __queue_exception(vcpu);
2846 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2847 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 2848 else
b6c7a5dc
HB
2849 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2850
b93463aa
AK
2851 kvm_lapic_sync_to_vapic(vcpu);
2852
3200f405
MT
2853 up_read(&vcpu->kvm->slots_lock);
2854
b6c7a5dc
HB
2855 kvm_guest_enter();
2856
b6c7a5dc 2857
2714d1d3 2858 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
2859 kvm_x86_ops->run(vcpu, kvm_run);
2860
2861 vcpu->guest_mode = 0;
2862 local_irq_enable();
2863
2864 ++vcpu->stat.exits;
2865
2866 /*
2867 * We must have an instruction between local_irq_enable() and
2868 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2869 * the interrupt shadow. The stat.exits increment will do nicely.
2870 * But we need to prevent reordering, hence this barrier():
2871 */
2872 barrier();
2873
2874 kvm_guest_exit();
2875
2876 preempt_enable();
2877
3200f405
MT
2878 down_read(&vcpu->kvm->slots_lock);
2879
b6c7a5dc
HB
2880 /*
2881 * Profile KVM exit RIPs:
2882 */
2883 if (unlikely(prof_on == KVM_PROFILING)) {
2884 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2885 profile_hit(KVM_PROFILING, (void *)vcpu->arch.rip);
b6c7a5dc
HB
2886 }
2887
ad312c7c
ZX
2888 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
2889 vcpu->arch.exception.pending = false;
298101da 2890
b93463aa
AK
2891 kvm_lapic_sync_from_vapic(vcpu);
2892
b6c7a5dc
HB
2893 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2894
2895 if (r > 0) {
2896 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2897 r = -EINTR;
2898 kvm_run->exit_reason = KVM_EXIT_INTR;
2899 ++vcpu->stat.request_irq_exits;
2900 goto out;
2901 }
e1beb1d3 2902 if (!need_resched())
b6c7a5dc 2903 goto again;
b6c7a5dc
HB
2904 }
2905
2906out:
3200f405 2907 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2908 if (r > 0) {
2909 kvm_resched(vcpu);
3200f405 2910 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2911 goto preempted;
2912 }
2913
2914 post_kvm_run_save(vcpu, kvm_run);
2915
3200f405 2916 down_read(&vcpu->kvm->slots_lock);
b93463aa 2917 vapic_exit(vcpu);
3200f405 2918 up_read(&vcpu->kvm->slots_lock);
b93463aa 2919
b6c7a5dc
HB
2920 return r;
2921}
2922
2923int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2924{
2925 int r;
2926 sigset_t sigsaved;
2927
2928 vcpu_load(vcpu);
2929
a4535290 2930 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc
HB
2931 kvm_vcpu_block(vcpu);
2932 vcpu_put(vcpu);
2933 return -EAGAIN;
2934 }
2935
2936 if (vcpu->sigset_active)
2937 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2938
2939 /* re-sync apic's tpr */
2940 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 2941 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 2942
ad312c7c 2943 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
2944 r = complete_pio(vcpu);
2945 if (r)
2946 goto out;
2947 }
2948#if CONFIG_HAS_IOMEM
2949 if (vcpu->mmio_needed) {
2950 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2951 vcpu->mmio_read_completed = 1;
2952 vcpu->mmio_needed = 0;
3200f405
MT
2953
2954 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 2955 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
2956 vcpu->arch.mmio_fault_cr2, 0,
2957 EMULTYPE_NO_DECODE);
3200f405 2958 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
2959 if (r == EMULATE_DO_MMIO) {
2960 /*
2961 * Read-modify-write. Back to userspace.
2962 */
2963 r = 0;
2964 goto out;
2965 }
2966 }
2967#endif
2968 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
2969 kvm_x86_ops->cache_regs(vcpu);
ad312c7c 2970 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
b6c7a5dc
HB
2971 kvm_x86_ops->decache_regs(vcpu);
2972 }
2973
2974 r = __vcpu_run(vcpu, kvm_run);
2975
2976out:
2977 if (vcpu->sigset_active)
2978 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2979
2980 vcpu_put(vcpu);
2981 return r;
2982}
2983
2984int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2985{
2986 vcpu_load(vcpu);
2987
2988 kvm_x86_ops->cache_regs(vcpu);
2989
ad312c7c
ZX
2990 regs->rax = vcpu->arch.regs[VCPU_REGS_RAX];
2991 regs->rbx = vcpu->arch.regs[VCPU_REGS_RBX];
2992 regs->rcx = vcpu->arch.regs[VCPU_REGS_RCX];
2993 regs->rdx = vcpu->arch.regs[VCPU_REGS_RDX];
2994 regs->rsi = vcpu->arch.regs[VCPU_REGS_RSI];
2995 regs->rdi = vcpu->arch.regs[VCPU_REGS_RDI];
2996 regs->rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2997 regs->rbp = vcpu->arch.regs[VCPU_REGS_RBP];
b6c7a5dc 2998#ifdef CONFIG_X86_64
ad312c7c
ZX
2999 regs->r8 = vcpu->arch.regs[VCPU_REGS_R8];
3000 regs->r9 = vcpu->arch.regs[VCPU_REGS_R9];
3001 regs->r10 = vcpu->arch.regs[VCPU_REGS_R10];
3002 regs->r11 = vcpu->arch.regs[VCPU_REGS_R11];
3003 regs->r12 = vcpu->arch.regs[VCPU_REGS_R12];
3004 regs->r13 = vcpu->arch.regs[VCPU_REGS_R13];
3005 regs->r14 = vcpu->arch.regs[VCPU_REGS_R14];
3006 regs->r15 = vcpu->arch.regs[VCPU_REGS_R15];
b6c7a5dc
HB
3007#endif
3008
ad312c7c 3009 regs->rip = vcpu->arch.rip;
b6c7a5dc
HB
3010 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3011
3012 /*
3013 * Don't leak debug flags in case they were set for guest debugging
3014 */
3015 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
3016 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3017
3018 vcpu_put(vcpu);
3019
3020 return 0;
3021}
3022
3023int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3024{
3025 vcpu_load(vcpu);
3026
ad312c7c
ZX
3027 vcpu->arch.regs[VCPU_REGS_RAX] = regs->rax;
3028 vcpu->arch.regs[VCPU_REGS_RBX] = regs->rbx;
3029 vcpu->arch.regs[VCPU_REGS_RCX] = regs->rcx;
3030 vcpu->arch.regs[VCPU_REGS_RDX] = regs->rdx;
3031 vcpu->arch.regs[VCPU_REGS_RSI] = regs->rsi;
3032 vcpu->arch.regs[VCPU_REGS_RDI] = regs->rdi;
3033 vcpu->arch.regs[VCPU_REGS_RSP] = regs->rsp;
3034 vcpu->arch.regs[VCPU_REGS_RBP] = regs->rbp;
b6c7a5dc 3035#ifdef CONFIG_X86_64
ad312c7c
ZX
3036 vcpu->arch.regs[VCPU_REGS_R8] = regs->r8;
3037 vcpu->arch.regs[VCPU_REGS_R9] = regs->r9;
3038 vcpu->arch.regs[VCPU_REGS_R10] = regs->r10;
3039 vcpu->arch.regs[VCPU_REGS_R11] = regs->r11;
3040 vcpu->arch.regs[VCPU_REGS_R12] = regs->r12;
3041 vcpu->arch.regs[VCPU_REGS_R13] = regs->r13;
3042 vcpu->arch.regs[VCPU_REGS_R14] = regs->r14;
3043 vcpu->arch.regs[VCPU_REGS_R15] = regs->r15;
b6c7a5dc
HB
3044#endif
3045
ad312c7c 3046 vcpu->arch.rip = regs->rip;
b6c7a5dc
HB
3047 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3048
3049 kvm_x86_ops->decache_regs(vcpu);
3050
b4f14abd
JK
3051 vcpu->arch.exception.pending = false;
3052
b6c7a5dc
HB
3053 vcpu_put(vcpu);
3054
3055 return 0;
3056}
3057
3058static void get_segment(struct kvm_vcpu *vcpu,
3059 struct kvm_segment *var, int seg)
3060{
14af3f3c 3061 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3062}
3063
3064void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3065{
3066 struct kvm_segment cs;
3067
3068 get_segment(vcpu, &cs, VCPU_SREG_CS);
3069 *db = cs.db;
3070 *l = cs.l;
3071}
3072EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3073
3074int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3075 struct kvm_sregs *sregs)
3076{
3077 struct descriptor_table dt;
3078 int pending_vec;
3079
3080 vcpu_load(vcpu);
3081
3082 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3083 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3084 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3085 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3086 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3087 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3088
3089 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3090 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3091
3092 kvm_x86_ops->get_idt(vcpu, &dt);
3093 sregs->idt.limit = dt.limit;
3094 sregs->idt.base = dt.base;
3095 kvm_x86_ops->get_gdt(vcpu, &dt);
3096 sregs->gdt.limit = dt.limit;
3097 sregs->gdt.base = dt.base;
3098
3099 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3100 sregs->cr0 = vcpu->arch.cr0;
3101 sregs->cr2 = vcpu->arch.cr2;
3102 sregs->cr3 = vcpu->arch.cr3;
3103 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3104 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3105 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3106 sregs->apic_base = kvm_get_apic_base(vcpu);
3107
3108 if (irqchip_in_kernel(vcpu->kvm)) {
3109 memset(sregs->interrupt_bitmap, 0,
3110 sizeof sregs->interrupt_bitmap);
3111 pending_vec = kvm_x86_ops->get_irq(vcpu);
3112 if (pending_vec >= 0)
3113 set_bit(pending_vec,
3114 (unsigned long *)sregs->interrupt_bitmap);
3115 } else
ad312c7c 3116 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3117 sizeof sregs->interrupt_bitmap);
3118
3119 vcpu_put(vcpu);
3120
3121 return 0;
3122}
3123
62d9f0db
MT
3124int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3125 struct kvm_mp_state *mp_state)
3126{
3127 vcpu_load(vcpu);
3128 mp_state->mp_state = vcpu->arch.mp_state;
3129 vcpu_put(vcpu);
3130 return 0;
3131}
3132
3133int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3134 struct kvm_mp_state *mp_state)
3135{
3136 vcpu_load(vcpu);
3137 vcpu->arch.mp_state = mp_state->mp_state;
3138 vcpu_put(vcpu);
3139 return 0;
3140}
3141
b6c7a5dc
HB
3142static void set_segment(struct kvm_vcpu *vcpu,
3143 struct kvm_segment *var, int seg)
3144{
14af3f3c 3145 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3146}
3147
37817f29
IE
3148static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3149 struct kvm_segment *kvm_desct)
3150{
3151 kvm_desct->base = seg_desc->base0;
3152 kvm_desct->base |= seg_desc->base1 << 16;
3153 kvm_desct->base |= seg_desc->base2 << 24;
3154 kvm_desct->limit = seg_desc->limit0;
3155 kvm_desct->limit |= seg_desc->limit << 16;
3156 kvm_desct->selector = selector;
3157 kvm_desct->type = seg_desc->type;
3158 kvm_desct->present = seg_desc->p;
3159 kvm_desct->dpl = seg_desc->dpl;
3160 kvm_desct->db = seg_desc->d;
3161 kvm_desct->s = seg_desc->s;
3162 kvm_desct->l = seg_desc->l;
3163 kvm_desct->g = seg_desc->g;
3164 kvm_desct->avl = seg_desc->avl;
3165 if (!selector)
3166 kvm_desct->unusable = 1;
3167 else
3168 kvm_desct->unusable = 0;
3169 kvm_desct->padding = 0;
3170}
3171
3172static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
3173 u16 selector,
3174 struct descriptor_table *dtable)
3175{
3176 if (selector & 1 << 2) {
3177 struct kvm_segment kvm_seg;
3178
3179 get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
3180
3181 if (kvm_seg.unusable)
3182 dtable->limit = 0;
3183 else
3184 dtable->limit = kvm_seg.limit;
3185 dtable->base = kvm_seg.base;
3186 }
3187 else
3188 kvm_x86_ops->get_gdt(vcpu, dtable);
3189}
3190
3191/* allowed just for 8 bytes segments */
3192static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3193 struct desc_struct *seg_desc)
3194{
3195 struct descriptor_table dtable;
3196 u16 index = selector >> 3;
3197
3198 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3199
3200 if (dtable.limit < index * 8 + 7) {
3201 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3202 return 1;
3203 }
3204 return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3205}
3206
3207/* allowed just for 8 bytes segments */
3208static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3209 struct desc_struct *seg_desc)
3210{
3211 struct descriptor_table dtable;
3212 u16 index = selector >> 3;
3213
3214 get_segment_descritptor_dtable(vcpu, selector, &dtable);
3215
3216 if (dtable.limit < index * 8 + 7)
3217 return 1;
3218 return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
3219}
3220
3221static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3222 struct desc_struct *seg_desc)
3223{
3224 u32 base_addr;
3225
3226 base_addr = seg_desc->base0;
3227 base_addr |= (seg_desc->base1 << 16);
3228 base_addr |= (seg_desc->base2 << 24);
3229
3230 return base_addr;
3231}
3232
3233static int load_tss_segment32(struct kvm_vcpu *vcpu,
3234 struct desc_struct *seg_desc,
3235 struct tss_segment_32 *tss)
3236{
3237 u32 base_addr;
3238
3239 base_addr = get_tss_base_addr(vcpu, seg_desc);
3240
3241 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3242 sizeof(struct tss_segment_32));
3243}
3244
3245static int save_tss_segment32(struct kvm_vcpu *vcpu,
3246 struct desc_struct *seg_desc,
3247 struct tss_segment_32 *tss)
3248{
3249 u32 base_addr;
3250
3251 base_addr = get_tss_base_addr(vcpu, seg_desc);
3252
3253 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3254 sizeof(struct tss_segment_32));
3255}
3256
3257static int load_tss_segment16(struct kvm_vcpu *vcpu,
3258 struct desc_struct *seg_desc,
3259 struct tss_segment_16 *tss)
3260{
3261 u32 base_addr;
3262
3263 base_addr = get_tss_base_addr(vcpu, seg_desc);
3264
3265 return kvm_read_guest(vcpu->kvm, base_addr, tss,
3266 sizeof(struct tss_segment_16));
3267}
3268
3269static int save_tss_segment16(struct kvm_vcpu *vcpu,
3270 struct desc_struct *seg_desc,
3271 struct tss_segment_16 *tss)
3272{
3273 u32 base_addr;
3274
3275 base_addr = get_tss_base_addr(vcpu, seg_desc);
3276
3277 return kvm_write_guest(vcpu->kvm, base_addr, tss,
3278 sizeof(struct tss_segment_16));
3279}
3280
3281static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3282{
3283 struct kvm_segment kvm_seg;
3284
3285 get_segment(vcpu, &kvm_seg, seg);
3286 return kvm_seg.selector;
3287}
3288
3289static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3290 u16 selector,
3291 struct kvm_segment *kvm_seg)
3292{
3293 struct desc_struct seg_desc;
3294
3295 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3296 return 1;
3297 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3298 return 0;
3299}
3300
3301static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3302 int type_bits, int seg)
3303{
3304 struct kvm_segment kvm_seg;
3305
3306 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3307 return 1;
3308 kvm_seg.type |= type_bits;
3309
3310 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3311 seg != VCPU_SREG_LDTR)
3312 if (!kvm_seg.s)
3313 kvm_seg.unusable = 1;
3314
3315 set_segment(vcpu, &kvm_seg, seg);
3316 return 0;
3317}
3318
3319static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3320 struct tss_segment_32 *tss)
3321{
3322 tss->cr3 = vcpu->arch.cr3;
3323 tss->eip = vcpu->arch.rip;
3324 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
3325 tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
3326 tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3327 tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
3328 tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
3329 tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
3330 tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
3331 tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
3332 tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
3333
3334 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3335 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3336 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3337 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3338 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3339 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3340 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3341 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3342}
3343
3344static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3345 struct tss_segment_32 *tss)
3346{
3347 kvm_set_cr3(vcpu, tss->cr3);
3348
3349 vcpu->arch.rip = tss->eip;
3350 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3351
3352 vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
3353 vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
3354 vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
3355 vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
3356 vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
3357 vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
3358 vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
3359 vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
3360
3361 if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
3362 return 1;
3363
3364 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3365 return 1;
3366
3367 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3368 return 1;
3369
3370 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3371 return 1;
3372
3373 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3374 return 1;
3375
3376 if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
3377 return 1;
3378
3379 if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
3380 return 1;
3381 return 0;
3382}
3383
3384static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3385 struct tss_segment_16 *tss)
3386{
3387 tss->ip = vcpu->arch.rip;
3388 tss->flag = kvm_x86_ops->get_rflags(vcpu);
3389 tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
3390 tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
3391 tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
3392 tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
3393 tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
3394 tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
3395 tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
3396 tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
3397
3398 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3399 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3400 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3401 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3402 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3403 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3404}
3405
3406static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3407 struct tss_segment_16 *tss)
3408{
3409 vcpu->arch.rip = tss->ip;
3410 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
3411 vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
3412 vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
3413 vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
3414 vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
3415 vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
3416 vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
3417 vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
3418 vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
3419
3420 if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
3421 return 1;
3422
3423 if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
3424 return 1;
3425
3426 if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
3427 return 1;
3428
3429 if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
3430 return 1;
3431
3432 if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
3433 return 1;
3434 return 0;
3435}
3436
8b2cf73c 3437static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
37817f29
IE
3438 struct desc_struct *cseg_desc,
3439 struct desc_struct *nseg_desc)
3440{
3441 struct tss_segment_16 tss_segment_16;
3442 int ret = 0;
3443
3444 if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
3445 goto out;
3446
3447 save_state_to_tss16(vcpu, &tss_segment_16);
3448 save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
3449
3450 if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
3451 goto out;
3452 if (load_state_from_tss16(vcpu, &tss_segment_16))
3453 goto out;
3454
3455 ret = 1;
3456out:
3457 return ret;
3458}
3459
8b2cf73c 3460static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
37817f29
IE
3461 struct desc_struct *cseg_desc,
3462 struct desc_struct *nseg_desc)
3463{
3464 struct tss_segment_32 tss_segment_32;
3465 int ret = 0;
3466
3467 if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
3468 goto out;
3469
3470 save_state_to_tss32(vcpu, &tss_segment_32);
3471 save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
3472
3473 if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
3474 goto out;
3475 if (load_state_from_tss32(vcpu, &tss_segment_32))
3476 goto out;
3477
3478 ret = 1;
3479out:
3480 return ret;
3481}
3482
3483int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3484{
3485 struct kvm_segment tr_seg;
3486 struct desc_struct cseg_desc;
3487 struct desc_struct nseg_desc;
3488 int ret = 0;
3489
3490 get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3491
3492 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3493 goto out;
3494
3495 if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
3496 goto out;
3497
3498
3499 if (reason != TASK_SWITCH_IRET) {
3500 int cpl;
3501
3502 cpl = kvm_x86_ops->get_cpl(vcpu);
3503 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3504 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3505 return 1;
3506 }
3507 }
3508
3509 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3510 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3511 return 1;
3512 }
3513
3514 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 3515 cseg_desc.type &= ~(1 << 1); //clear the B flag
37817f29
IE
3516 save_guest_segment_descriptor(vcpu, tr_seg.selector,
3517 &cseg_desc);
3518 }
3519
3520 if (reason == TASK_SWITCH_IRET) {
3521 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3522 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3523 }
3524
3525 kvm_x86_ops->skip_emulated_instruction(vcpu);
3526 kvm_x86_ops->cache_regs(vcpu);
3527
3528 if (nseg_desc.type & 8)
3529 ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
3530 &nseg_desc);
3531 else
3532 ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
3533 &nseg_desc);
3534
3535 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3536 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3537 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3538 }
3539
3540 if (reason != TASK_SWITCH_IRET) {
3fe913e7 3541 nseg_desc.type |= (1 << 1);
37817f29
IE
3542 save_guest_segment_descriptor(vcpu, tss_selector,
3543 &nseg_desc);
3544 }
3545
3546 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3547 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3548 tr_seg.type = 11;
3549 set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
3550out:
3551 kvm_x86_ops->decache_regs(vcpu);
3552 return ret;
3553}
3554EXPORT_SYMBOL_GPL(kvm_task_switch);
3555
b6c7a5dc
HB
3556int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3557 struct kvm_sregs *sregs)
3558{
3559 int mmu_reset_needed = 0;
3560 int i, pending_vec, max_bits;
3561 struct descriptor_table dt;
3562
3563 vcpu_load(vcpu);
3564
3565 dt.limit = sregs->idt.limit;
3566 dt.base = sregs->idt.base;
3567 kvm_x86_ops->set_idt(vcpu, &dt);
3568 dt.limit = sregs->gdt.limit;
3569 dt.base = sregs->gdt.base;
3570 kvm_x86_ops->set_gdt(vcpu, &dt);
3571
ad312c7c
ZX
3572 vcpu->arch.cr2 = sregs->cr2;
3573 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3574 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc 3575
2d3ad1f4 3576 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 3577
ad312c7c 3578 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 3579 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
3580 kvm_set_apic_base(vcpu, sregs->apic_base);
3581
3582 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3583
ad312c7c 3584 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 3585 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 3586 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 3587
ad312c7c 3588 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
3589 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3590 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 3591 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
3592
3593 if (mmu_reset_needed)
3594 kvm_mmu_reset_context(vcpu);
3595
3596 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
3597 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3598 sizeof vcpu->arch.irq_pending);
3599 vcpu->arch.irq_summary = 0;
3600 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3601 if (vcpu->arch.irq_pending[i])
3602 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
3603 } else {
3604 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3605 pending_vec = find_first_bit(
3606 (const unsigned long *)sregs->interrupt_bitmap,
3607 max_bits);
3608 /* Only pending external irq is handled here */
3609 if (pending_vec < max_bits) {
3610 kvm_x86_ops->set_irq(vcpu, pending_vec);
3611 pr_debug("Set back pending irq %d\n",
3612 pending_vec);
3613 }
3614 }
3615
3616 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3617 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3618 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3619 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3620 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3621 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
3622
3623 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3624 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
3625
3626 vcpu_put(vcpu);
3627
3628 return 0;
3629}
3630
3631int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
3632 struct kvm_debug_guest *dbg)
3633{
3634 int r;
3635
3636 vcpu_load(vcpu);
3637
3638 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3639
3640 vcpu_put(vcpu);
3641
3642 return r;
3643}
3644
d0752060
HB
3645/*
3646 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
3647 * we have asm/x86/processor.h
3648 */
3649struct fxsave {
3650 u16 cwd;
3651 u16 swd;
3652 u16 twd;
3653 u16 fop;
3654 u64 rip;
3655 u64 rdp;
3656 u32 mxcsr;
3657 u32 mxcsr_mask;
3658 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
3659#ifdef CONFIG_X86_64
3660 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
3661#else
3662 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
3663#endif
3664};
3665
8b006791
ZX
3666/*
3667 * Translate a guest virtual address to a guest physical address.
3668 */
3669int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3670 struct kvm_translation *tr)
3671{
3672 unsigned long vaddr = tr->linear_address;
3673 gpa_t gpa;
3674
3675 vcpu_load(vcpu);
72dc67a6 3676 down_read(&vcpu->kvm->slots_lock);
ad312c7c 3677 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 3678 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
3679 tr->physical_address = gpa;
3680 tr->valid = gpa != UNMAPPED_GVA;
3681 tr->writeable = 1;
3682 tr->usermode = 0;
8b006791
ZX
3683 vcpu_put(vcpu);
3684
3685 return 0;
3686}
3687
d0752060
HB
3688int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3689{
ad312c7c 3690 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3691
3692 vcpu_load(vcpu);
3693
3694 memcpy(fpu->fpr, fxsave->st_space, 128);
3695 fpu->fcw = fxsave->cwd;
3696 fpu->fsw = fxsave->swd;
3697 fpu->ftwx = fxsave->twd;
3698 fpu->last_opcode = fxsave->fop;
3699 fpu->last_ip = fxsave->rip;
3700 fpu->last_dp = fxsave->rdp;
3701 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3702
3703 vcpu_put(vcpu);
3704
3705 return 0;
3706}
3707
3708int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3709{
ad312c7c 3710 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3711
3712 vcpu_load(vcpu);
3713
3714 memcpy(fxsave->st_space, fpu->fpr, 128);
3715 fxsave->cwd = fpu->fcw;
3716 fxsave->swd = fpu->fsw;
3717 fxsave->twd = fpu->ftwx;
3718 fxsave->fop = fpu->last_opcode;
3719 fxsave->rip = fpu->last_ip;
3720 fxsave->rdp = fpu->last_dp;
3721 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
3722
3723 vcpu_put(vcpu);
3724
3725 return 0;
3726}
3727
3728void fx_init(struct kvm_vcpu *vcpu)
3729{
3730 unsigned after_mxcsr_mask;
3731
bc1a34f1
AA
3732 /*
3733 * Touch the fpu the first time in non atomic context as if
3734 * this is the first fpu instruction the exception handler
3735 * will fire before the instruction returns and it'll have to
3736 * allocate ram with GFP_KERNEL.
3737 */
3738 if (!used_math())
3739 fx_save(&vcpu->arch.host_fx_image);
3740
d0752060
HB
3741 /* Initialize guest FPU by resetting ours and saving into guest's */
3742 preempt_disable();
ad312c7c 3743 fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 3744 fx_finit();
ad312c7c
ZX
3745 fx_save(&vcpu->arch.guest_fx_image);
3746 fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
3747 preempt_enable();
3748
ad312c7c 3749 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 3750 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
3751 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
3752 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
3753 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
3754}
3755EXPORT_SYMBOL_GPL(fx_init);
3756
3757void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
3758{
3759 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
3760 return;
3761
3762 vcpu->guest_fpu_loaded = 1;
ad312c7c
ZX
3763 fx_save(&vcpu->arch.host_fx_image);
3764 fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
3765}
3766EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
3767
3768void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
3769{
3770 if (!vcpu->guest_fpu_loaded)
3771 return;
3772
3773 vcpu->guest_fpu_loaded = 0;
ad312c7c
ZX
3774 fx_save(&vcpu->arch.guest_fx_image);
3775 fx_restore(&vcpu->arch.host_fx_image);
f096ed85 3776 ++vcpu->stat.fpu_reload;
d0752060
HB
3777}
3778EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
3779
3780void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
3781{
3782 kvm_x86_ops->vcpu_free(vcpu);
3783}
3784
3785struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3786 unsigned int id)
3787{
26e5215f
AK
3788 return kvm_x86_ops->vcpu_create(kvm, id);
3789}
e9b11c17 3790
26e5215f
AK
3791int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
3792{
3793 int r;
e9b11c17
ZX
3794
3795 /* We do fxsave: this must be aligned. */
ad312c7c 3796 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17
ZX
3797
3798 vcpu_load(vcpu);
3799 r = kvm_arch_vcpu_reset(vcpu);
3800 if (r == 0)
3801 r = kvm_mmu_setup(vcpu);
3802 vcpu_put(vcpu);
3803 if (r < 0)
3804 goto free_vcpu;
3805
26e5215f 3806 return 0;
e9b11c17
ZX
3807free_vcpu:
3808 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 3809 return r;
e9b11c17
ZX
3810}
3811
d40ccc62 3812void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
3813{
3814 vcpu_load(vcpu);
3815 kvm_mmu_unload(vcpu);
3816 vcpu_put(vcpu);
3817
3818 kvm_x86_ops->vcpu_free(vcpu);
3819}
3820
3821int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
3822{
3823 return kvm_x86_ops->vcpu_reset(vcpu);
3824}
3825
3826void kvm_arch_hardware_enable(void *garbage)
3827{
3828 kvm_x86_ops->hardware_enable(garbage);
3829}
3830
3831void kvm_arch_hardware_disable(void *garbage)
3832{
3833 kvm_x86_ops->hardware_disable(garbage);
3834}
3835
3836int kvm_arch_hardware_setup(void)
3837{
3838 return kvm_x86_ops->hardware_setup();
3839}
3840
3841void kvm_arch_hardware_unsetup(void)
3842{
3843 kvm_x86_ops->hardware_unsetup();
3844}
3845
3846void kvm_arch_check_processor_compat(void *rtn)
3847{
3848 kvm_x86_ops->check_processor_compatibility(rtn);
3849}
3850
3851int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
3852{
3853 struct page *page;
3854 struct kvm *kvm;
3855 int r;
3856
3857 BUG_ON(vcpu->kvm == NULL);
3858 kvm = vcpu->kvm;
3859
ad312c7c 3860 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 3861 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 3862 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 3863 else
a4535290 3864 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
3865
3866 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
3867 if (!page) {
3868 r = -ENOMEM;
3869 goto fail;
3870 }
ad312c7c 3871 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
3872
3873 r = kvm_mmu_create(vcpu);
3874 if (r < 0)
3875 goto fail_free_pio_data;
3876
3877 if (irqchip_in_kernel(kvm)) {
3878 r = kvm_create_lapic(vcpu);
3879 if (r < 0)
3880 goto fail_mmu_destroy;
3881 }
3882
3883 return 0;
3884
3885fail_mmu_destroy:
3886 kvm_mmu_destroy(vcpu);
3887fail_free_pio_data:
ad312c7c 3888 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
3889fail:
3890 return r;
3891}
3892
3893void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3894{
3895 kvm_free_lapic(vcpu);
3200f405 3896 down_read(&vcpu->kvm->slots_lock);
e9b11c17 3897 kvm_mmu_destroy(vcpu);
3200f405 3898 up_read(&vcpu->kvm->slots_lock);
ad312c7c 3899 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 3900}
d19a9cd2
ZX
3901
3902struct kvm *kvm_arch_create_vm(void)
3903{
3904 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
3905
3906 if (!kvm)
3907 return ERR_PTR(-ENOMEM);
3908
f05e70ac 3909 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
d19a9cd2
ZX
3910
3911 return kvm;
3912}
3913
3914static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
3915{
3916 vcpu_load(vcpu);
3917 kvm_mmu_unload(vcpu);
3918 vcpu_put(vcpu);
3919}
3920
3921static void kvm_free_vcpus(struct kvm *kvm)
3922{
3923 unsigned int i;
3924
3925 /*
3926 * Unpin any mmu pages first.
3927 */
3928 for (i = 0; i < KVM_MAX_VCPUS; ++i)
3929 if (kvm->vcpus[i])
3930 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
3931 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
3932 if (kvm->vcpus[i]) {
3933 kvm_arch_vcpu_free(kvm->vcpus[i]);
3934 kvm->vcpus[i] = NULL;
3935 }
3936 }
3937
3938}
3939
3940void kvm_arch_destroy_vm(struct kvm *kvm)
3941{
7837699f 3942 kvm_free_pit(kvm);
d7deeeb0
ZX
3943 kfree(kvm->arch.vpic);
3944 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
3945 kvm_free_vcpus(kvm);
3946 kvm_free_physmem(kvm);
3d45830c
AK
3947 if (kvm->arch.apic_access_page)
3948 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
3949 if (kvm->arch.ept_identity_pagetable)
3950 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
3951 kfree(kvm);
3952}
0de10343
ZX
3953
3954int kvm_arch_set_memory_region(struct kvm *kvm,
3955 struct kvm_userspace_memory_region *mem,
3956 struct kvm_memory_slot old,
3957 int user_alloc)
3958{
3959 int npages = mem->memory_size >> PAGE_SHIFT;
3960 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
3961
3962 /*To keep backward compatibility with older userspace,
3963 *x86 needs to hanlde !user_alloc case.
3964 */
3965 if (!user_alloc) {
3966 if (npages && !old.rmap) {
72dc67a6 3967 down_write(&current->mm->mmap_sem);
0de10343
ZX
3968 memslot->userspace_addr = do_mmap(NULL, 0,
3969 npages * PAGE_SIZE,
3970 PROT_READ | PROT_WRITE,
3971 MAP_SHARED | MAP_ANONYMOUS,
3972 0);
72dc67a6 3973 up_write(&current->mm->mmap_sem);
0de10343
ZX
3974
3975 if (IS_ERR((void *)memslot->userspace_addr))
3976 return PTR_ERR((void *)memslot->userspace_addr);
3977 } else {
3978 if (!old.user_alloc && old.rmap) {
3979 int ret;
3980
72dc67a6 3981 down_write(&current->mm->mmap_sem);
0de10343
ZX
3982 ret = do_munmap(current->mm, old.userspace_addr,
3983 old.npages * PAGE_SIZE);
72dc67a6 3984 up_write(&current->mm->mmap_sem);
0de10343
ZX
3985 if (ret < 0)
3986 printk(KERN_WARNING
3987 "kvm_vm_ioctl_set_memory_region: "
3988 "failed to munmap memory\n");
3989 }
3990 }
3991 }
3992
f05e70ac 3993 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
3994 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
3995 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
3996 }
3997
3998 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
3999 kvm_flush_remote_tlbs(kvm);
4000
4001 return 0;
4002}
1d737c8a
ZX
4003
4004int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4005{
a4535290
AK
4006 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
4007 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
1d737c8a 4008}
5736199a
ZX
4009
4010static void vcpu_kick_intr(void *info)
4011{
4012#ifdef DEBUG
4013 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4014 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4015#endif
4016}
4017
4018void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4019{
4020 int ipi_pcpu = vcpu->cpu;
e9571ed5 4021 int cpu = get_cpu();
5736199a
ZX
4022
4023 if (waitqueue_active(&vcpu->wq)) {
4024 wake_up_interruptible(&vcpu->wq);
4025 ++vcpu->stat.halt_wakeup;
4026 }
e9571ed5
MT
4027 /*
4028 * We may be called synchronously with irqs disabled in guest mode,
4029 * So need not to call smp_call_function_single() in that case.
4030 */
4031 if (vcpu->guest_mode && vcpu->cpu != cpu)
8691e5a8 4032 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
e9571ed5 4033 put_cpu();
5736199a 4034}