KVM: VMX: Use kvm_mmu_page_fault() handle EPT violation mmio
[linux-2.6-block.git] / arch / x86 / kvm / x86.c
CommitLineData
043405e1
CO
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * derived from drivers/kvm/kvm_main.c
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
4d5c5d0f
BAY
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
043405e1
CO
9 *
10 * Authors:
11 * Avi Kivity <avi@qumranet.com>
12 * Yaniv Kamay <yaniv@qumranet.com>
4d5c5d0f
BAY
13 * Amit Shah <amit.shah@qumranet.com>
14 * Ben-Ami Yassour <benami@il.ibm.com>
043405e1
CO
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
20
edf88417 21#include <linux/kvm_host.h>
313a3dc7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
7837699f 24#include "i8254.h"
37817f29 25#include "tss.h"
5fdbf976 26#include "kvm_cache_regs.h"
26eef70c 27#include "x86.h"
313a3dc7 28
18068523 29#include <linux/clocksource.h>
4d5c5d0f 30#include <linux/interrupt.h>
313a3dc7
CO
31#include <linux/kvm.h>
32#include <linux/fs.h>
33#include <linux/vmalloc.h>
5fb76f9b 34#include <linux/module.h>
0de10343 35#include <linux/mman.h>
2bacc55c 36#include <linux/highmem.h>
19de40a8 37#include <linux/iommu.h>
62c476c7 38#include <linux/intel-iommu.h>
043405e1
CO
39
40#include <asm/uaccess.h>
d825ed0a 41#include <asm/msr.h>
a5f61300 42#include <asm/desc.h>
0bed3b56 43#include <asm/mtrr.h>
043405e1 44
313a3dc7 45#define MAX_IO_MSRS 256
a03490ed
CO
46#define CR0_RESERVED_BITS \
47 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
48 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
49 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
50#define CR4_RESERVED_BITS \
51 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
52 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
53 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
54 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
55
56#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
50a37eb4
JR
57/* EFER defaults:
58 * - enable syscall per default because its emulated by KVM
59 * - enable LME and LMA per default on 64 bit KVM
60 */
61#ifdef CONFIG_X86_64
62static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
63#else
64static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
65#endif
313a3dc7 66
ba1389b7
AK
67#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
68#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
417bc304 69
674eea0f
AK
70static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
71 struct kvm_cpuid_entry2 __user *entries);
d8017474
AG
72struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
73 u32 function, u32 index);
674eea0f 74
97896d04 75struct kvm_x86_ops *kvm_x86_ops;
5fdbf976 76EXPORT_SYMBOL_GPL(kvm_x86_ops);
97896d04 77
417bc304 78struct kvm_stats_debugfs_item debugfs_entries[] = {
ba1389b7
AK
79 { "pf_fixed", VCPU_STAT(pf_fixed) },
80 { "pf_guest", VCPU_STAT(pf_guest) },
81 { "tlb_flush", VCPU_STAT(tlb_flush) },
82 { "invlpg", VCPU_STAT(invlpg) },
83 { "exits", VCPU_STAT(exits) },
84 { "io_exits", VCPU_STAT(io_exits) },
85 { "mmio_exits", VCPU_STAT(mmio_exits) },
86 { "signal_exits", VCPU_STAT(signal_exits) },
87 { "irq_window", VCPU_STAT(irq_window_exits) },
f08864b4 88 { "nmi_window", VCPU_STAT(nmi_window_exits) },
ba1389b7
AK
89 { "halt_exits", VCPU_STAT(halt_exits) },
90 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f11c3a8d 91 { "hypercalls", VCPU_STAT(hypercalls) },
ba1389b7 92 { "request_irq", VCPU_STAT(request_irq_exits) },
c4abb7c9 93 { "request_nmi", VCPU_STAT(request_nmi_exits) },
ba1389b7
AK
94 { "irq_exits", VCPU_STAT(irq_exits) },
95 { "host_state_reload", VCPU_STAT(host_state_reload) },
96 { "efer_reload", VCPU_STAT(efer_reload) },
97 { "fpu_reload", VCPU_STAT(fpu_reload) },
98 { "insn_emulation", VCPU_STAT(insn_emulation) },
99 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
fa89a817 100 { "irq_injections", VCPU_STAT(irq_injections) },
c4abb7c9 101 { "nmi_injections", VCPU_STAT(nmi_injections) },
4cee5764
AK
102 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
103 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
104 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
105 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
106 { "mmu_flooded", VM_STAT(mmu_flooded) },
107 { "mmu_recycled", VM_STAT(mmu_recycled) },
dfc5aa00 108 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
4731d4c7 109 { "mmu_unsync", VM_STAT(mmu_unsync) },
6cffe8ca 110 { "mmu_unsync_global", VM_STAT(mmu_unsync_global) },
0f74a24c 111 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
05da4558 112 { "largepages", VM_STAT(lpages) },
417bc304
HB
113 { NULL }
114};
115
5fb76f9b
CO
116unsigned long segment_base(u16 selector)
117{
118 struct descriptor_table gdt;
a5f61300 119 struct desc_struct *d;
5fb76f9b
CO
120 unsigned long table_base;
121 unsigned long v;
122
123 if (selector == 0)
124 return 0;
125
126 asm("sgdt %0" : "=m"(gdt));
127 table_base = gdt.base;
128
129 if (selector & 4) { /* from ldt */
130 u16 ldt_selector;
131
132 asm("sldt %0" : "=g"(ldt_selector));
133 table_base = segment_base(ldt_selector);
134 }
a5f61300
AK
135 d = (struct desc_struct *)(table_base + (selector & ~7));
136 v = d->base0 | ((unsigned long)d->base1 << 16) |
137 ((unsigned long)d->base2 << 24);
5fb76f9b 138#ifdef CONFIG_X86_64
a5f61300
AK
139 if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
140 v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
5fb76f9b
CO
141#endif
142 return v;
143}
144EXPORT_SYMBOL_GPL(segment_base);
145
6866b83e
CO
146u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
147{
148 if (irqchip_in_kernel(vcpu->kvm))
ad312c7c 149 return vcpu->arch.apic_base;
6866b83e 150 else
ad312c7c 151 return vcpu->arch.apic_base;
6866b83e
CO
152}
153EXPORT_SYMBOL_GPL(kvm_get_apic_base);
154
155void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
156{
157 /* TODO: reserve bits check */
158 if (irqchip_in_kernel(vcpu->kvm))
159 kvm_lapic_set_base(vcpu, data);
160 else
ad312c7c 161 vcpu->arch.apic_base = data;
6866b83e
CO
162}
163EXPORT_SYMBOL_GPL(kvm_set_apic_base);
164
298101da
AK
165void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
166{
ad312c7c
ZX
167 WARN_ON(vcpu->arch.exception.pending);
168 vcpu->arch.exception.pending = true;
169 vcpu->arch.exception.has_error_code = false;
170 vcpu->arch.exception.nr = nr;
298101da
AK
171}
172EXPORT_SYMBOL_GPL(kvm_queue_exception);
173
c3c91fee
AK
174void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
175 u32 error_code)
176{
177 ++vcpu->stat.pf_guest;
d8017474 178
71c4dfaf
JR
179 if (vcpu->arch.exception.pending) {
180 if (vcpu->arch.exception.nr == PF_VECTOR) {
181 printk(KERN_DEBUG "kvm: inject_page_fault:"
182 " double fault 0x%lx\n", addr);
183 vcpu->arch.exception.nr = DF_VECTOR;
184 vcpu->arch.exception.error_code = 0;
185 } else if (vcpu->arch.exception.nr == DF_VECTOR) {
186 /* triple fault -> shutdown */
187 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
188 }
c3c91fee
AK
189 return;
190 }
ad312c7c 191 vcpu->arch.cr2 = addr;
c3c91fee
AK
192 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
193}
194
3419ffc8
SY
195void kvm_inject_nmi(struct kvm_vcpu *vcpu)
196{
197 vcpu->arch.nmi_pending = 1;
198}
199EXPORT_SYMBOL_GPL(kvm_inject_nmi);
200
298101da
AK
201void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
202{
ad312c7c
ZX
203 WARN_ON(vcpu->arch.exception.pending);
204 vcpu->arch.exception.pending = true;
205 vcpu->arch.exception.has_error_code = true;
206 vcpu->arch.exception.nr = nr;
207 vcpu->arch.exception.error_code = error_code;
298101da
AK
208}
209EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
210
211static void __queue_exception(struct kvm_vcpu *vcpu)
212{
ad312c7c
ZX
213 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
214 vcpu->arch.exception.has_error_code,
215 vcpu->arch.exception.error_code);
298101da
AK
216}
217
a03490ed
CO
218/*
219 * Load the pae pdptrs. Return true is they are all valid.
220 */
221int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
222{
223 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
224 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
225 int i;
226 int ret;
ad312c7c 227 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
a03490ed 228
a03490ed
CO
229 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
230 offset * sizeof(u64), sizeof(pdpte));
231 if (ret < 0) {
232 ret = 0;
233 goto out;
234 }
235 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
236 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
237 ret = 0;
238 goto out;
239 }
240 }
241 ret = 1;
242
ad312c7c 243 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
a03490ed 244out:
a03490ed
CO
245
246 return ret;
247}
cc4b6871 248EXPORT_SYMBOL_GPL(load_pdptrs);
a03490ed 249
d835dfec
AK
250static bool pdptrs_changed(struct kvm_vcpu *vcpu)
251{
ad312c7c 252 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
d835dfec
AK
253 bool changed = true;
254 int r;
255
256 if (is_long_mode(vcpu) || !is_pae(vcpu))
257 return false;
258
ad312c7c 259 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
d835dfec
AK
260 if (r < 0)
261 goto out;
ad312c7c 262 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
d835dfec 263out:
d835dfec
AK
264
265 return changed;
266}
267
2d3ad1f4 268void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
a03490ed
CO
269{
270 if (cr0 & CR0_RESERVED_BITS) {
271 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
ad312c7c 272 cr0, vcpu->arch.cr0);
c1a5d4f9 273 kvm_inject_gp(vcpu, 0);
a03490ed
CO
274 return;
275 }
276
277 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
278 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
c1a5d4f9 279 kvm_inject_gp(vcpu, 0);
a03490ed
CO
280 return;
281 }
282
283 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
284 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
285 "and a clear PE flag\n");
c1a5d4f9 286 kvm_inject_gp(vcpu, 0);
a03490ed
CO
287 return;
288 }
289
290 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
291#ifdef CONFIG_X86_64
ad312c7c 292 if ((vcpu->arch.shadow_efer & EFER_LME)) {
a03490ed
CO
293 int cs_db, cs_l;
294
295 if (!is_pae(vcpu)) {
296 printk(KERN_DEBUG "set_cr0: #GP, start paging "
297 "in long mode while PAE is disabled\n");
c1a5d4f9 298 kvm_inject_gp(vcpu, 0);
a03490ed
CO
299 return;
300 }
301 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
302 if (cs_l) {
303 printk(KERN_DEBUG "set_cr0: #GP, start paging "
304 "in long mode while CS.L == 1\n");
c1a5d4f9 305 kvm_inject_gp(vcpu, 0);
a03490ed
CO
306 return;
307
308 }
309 } else
310#endif
ad312c7c 311 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed
CO
312 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
313 "reserved bits\n");
c1a5d4f9 314 kvm_inject_gp(vcpu, 0);
a03490ed
CO
315 return;
316 }
317
318 }
319
320 kvm_x86_ops->set_cr0(vcpu, cr0);
ad312c7c 321 vcpu->arch.cr0 = cr0;
a03490ed 322
6cffe8ca 323 kvm_mmu_sync_global(vcpu);
a03490ed 324 kvm_mmu_reset_context(vcpu);
a03490ed
CO
325 return;
326}
2d3ad1f4 327EXPORT_SYMBOL_GPL(kvm_set_cr0);
a03490ed 328
2d3ad1f4 329void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
a03490ed 330{
2d3ad1f4 331 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
2714d1d3
FEL
332 KVMTRACE_1D(LMSW, vcpu,
333 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
334 handler);
a03490ed 335}
2d3ad1f4 336EXPORT_SYMBOL_GPL(kvm_lmsw);
a03490ed 337
2d3ad1f4 338void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
a03490ed
CO
339{
340 if (cr4 & CR4_RESERVED_BITS) {
341 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
c1a5d4f9 342 kvm_inject_gp(vcpu, 0);
a03490ed
CO
343 return;
344 }
345
346 if (is_long_mode(vcpu)) {
347 if (!(cr4 & X86_CR4_PAE)) {
348 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
349 "in long mode\n");
c1a5d4f9 350 kvm_inject_gp(vcpu, 0);
a03490ed
CO
351 return;
352 }
353 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
ad312c7c 354 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
a03490ed 355 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
c1a5d4f9 356 kvm_inject_gp(vcpu, 0);
a03490ed
CO
357 return;
358 }
359
360 if (cr4 & X86_CR4_VMXE) {
361 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
c1a5d4f9 362 kvm_inject_gp(vcpu, 0);
a03490ed
CO
363 return;
364 }
365 kvm_x86_ops->set_cr4(vcpu, cr4);
ad312c7c 366 vcpu->arch.cr4 = cr4;
5a41accd 367 vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
6cffe8ca 368 kvm_mmu_sync_global(vcpu);
a03490ed 369 kvm_mmu_reset_context(vcpu);
a03490ed 370}
2d3ad1f4 371EXPORT_SYMBOL_GPL(kvm_set_cr4);
a03490ed 372
2d3ad1f4 373void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
a03490ed 374{
ad312c7c 375 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
0ba73cda 376 kvm_mmu_sync_roots(vcpu);
d835dfec
AK
377 kvm_mmu_flush_tlb(vcpu);
378 return;
379 }
380
a03490ed
CO
381 if (is_long_mode(vcpu)) {
382 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
383 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
c1a5d4f9 384 kvm_inject_gp(vcpu, 0);
a03490ed
CO
385 return;
386 }
387 } else {
388 if (is_pae(vcpu)) {
389 if (cr3 & CR3_PAE_RESERVED_BITS) {
390 printk(KERN_DEBUG
391 "set_cr3: #GP, reserved bits\n");
c1a5d4f9 392 kvm_inject_gp(vcpu, 0);
a03490ed
CO
393 return;
394 }
395 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
396 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
397 "reserved bits\n");
c1a5d4f9 398 kvm_inject_gp(vcpu, 0);
a03490ed
CO
399 return;
400 }
401 }
402 /*
403 * We don't check reserved bits in nonpae mode, because
404 * this isn't enforced, and VMware depends on this.
405 */
406 }
407
a03490ed
CO
408 /*
409 * Does the new cr3 value map to physical memory? (Note, we
410 * catch an invalid cr3 even in real-mode, because it would
411 * cause trouble later on when we turn on paging anyway.)
412 *
413 * A real CPU would silently accept an invalid cr3 and would
414 * attempt to use it - with largely undefined (and often hard
415 * to debug) behavior on the guest side.
416 */
417 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
c1a5d4f9 418 kvm_inject_gp(vcpu, 0);
a03490ed 419 else {
ad312c7c
ZX
420 vcpu->arch.cr3 = cr3;
421 vcpu->arch.mmu.new_cr3(vcpu);
a03490ed 422 }
a03490ed 423}
2d3ad1f4 424EXPORT_SYMBOL_GPL(kvm_set_cr3);
a03490ed 425
2d3ad1f4 426void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
a03490ed
CO
427{
428 if (cr8 & CR8_RESERVED_BITS) {
429 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
c1a5d4f9 430 kvm_inject_gp(vcpu, 0);
a03490ed
CO
431 return;
432 }
433 if (irqchip_in_kernel(vcpu->kvm))
434 kvm_lapic_set_tpr(vcpu, cr8);
435 else
ad312c7c 436 vcpu->arch.cr8 = cr8;
a03490ed 437}
2d3ad1f4 438EXPORT_SYMBOL_GPL(kvm_set_cr8);
a03490ed 439
2d3ad1f4 440unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
a03490ed
CO
441{
442 if (irqchip_in_kernel(vcpu->kvm))
443 return kvm_lapic_get_cr8(vcpu);
444 else
ad312c7c 445 return vcpu->arch.cr8;
a03490ed 446}
2d3ad1f4 447EXPORT_SYMBOL_GPL(kvm_get_cr8);
a03490ed 448
d8017474
AG
449static inline u32 bit(int bitno)
450{
451 return 1 << (bitno & 31);
452}
453
043405e1
CO
454/*
455 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
456 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
457 *
458 * This list is modified at module load time to reflect the
459 * capabilities of the host cpu.
460 */
461static u32 msrs_to_save[] = {
462 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
463 MSR_K6_STAR,
464#ifdef CONFIG_X86_64
465 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
466#endif
18068523 467 MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
b286d5d8 468 MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
043405e1
CO
469};
470
471static unsigned num_msrs_to_save;
472
473static u32 emulated_msrs[] = {
474 MSR_IA32_MISC_ENABLE,
475};
476
15c4a640
CO
477static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
478{
f2b4b7dd 479 if (efer & efer_reserved_bits) {
15c4a640
CO
480 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
481 efer);
c1a5d4f9 482 kvm_inject_gp(vcpu, 0);
15c4a640
CO
483 return;
484 }
485
486 if (is_paging(vcpu)
ad312c7c 487 && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
15c4a640 488 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
c1a5d4f9 489 kvm_inject_gp(vcpu, 0);
15c4a640
CO
490 return;
491 }
492
1b2fd70c
AG
493 if (efer & EFER_FFXSR) {
494 struct kvm_cpuid_entry2 *feat;
495
496 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
497 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
498 printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
499 kvm_inject_gp(vcpu, 0);
500 return;
501 }
502 }
503
d8017474
AG
504 if (efer & EFER_SVME) {
505 struct kvm_cpuid_entry2 *feat;
506
507 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
508 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
509 printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
510 kvm_inject_gp(vcpu, 0);
511 return;
512 }
513 }
514
15c4a640
CO
515 kvm_x86_ops->set_efer(vcpu, efer);
516
517 efer &= ~EFER_LMA;
ad312c7c 518 efer |= vcpu->arch.shadow_efer & EFER_LMA;
15c4a640 519
ad312c7c 520 vcpu->arch.shadow_efer = efer;
15c4a640
CO
521}
522
f2b4b7dd
JR
523void kvm_enable_efer_bits(u64 mask)
524{
525 efer_reserved_bits &= ~mask;
526}
527EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
528
529
15c4a640
CO
530/*
531 * Writes msr value into into the appropriate "register".
532 * Returns 0 on success, non-0 otherwise.
533 * Assumes vcpu_load() was already called.
534 */
535int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
536{
537 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
538}
539
313a3dc7
CO
540/*
541 * Adapt set_msr() to msr_io()'s calling convention
542 */
543static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
544{
545 return kvm_set_msr(vcpu, index, *data);
546}
547
18068523
GOC
548static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
549{
550 static int version;
50d0a0f9
GH
551 struct pvclock_wall_clock wc;
552 struct timespec now, sys, boot;
18068523
GOC
553
554 if (!wall_clock)
555 return;
556
557 version++;
558
18068523
GOC
559 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
560
50d0a0f9
GH
561 /*
562 * The guest calculates current wall clock time by adding
563 * system time (updated by kvm_write_guest_time below) to the
564 * wall clock specified here. guest system time equals host
565 * system time for us, thus we must fill in host boot time here.
566 */
567 now = current_kernel_time();
568 ktime_get_ts(&sys);
569 boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
570
571 wc.sec = boot.tv_sec;
572 wc.nsec = boot.tv_nsec;
573 wc.version = version;
18068523
GOC
574
575 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
576
577 version++;
578 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
18068523
GOC
579}
580
50d0a0f9
GH
581static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
582{
583 uint32_t quotient, remainder;
584
585 /* Don't try to replace with do_div(), this one calculates
586 * "(dividend << 32) / divisor" */
587 __asm__ ( "divl %4"
588 : "=a" (quotient), "=d" (remainder)
589 : "0" (0), "1" (dividend), "r" (divisor) );
590 return quotient;
591}
592
593static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
594{
595 uint64_t nsecs = 1000000000LL;
596 int32_t shift = 0;
597 uint64_t tps64;
598 uint32_t tps32;
599
600 tps64 = tsc_khz * 1000LL;
601 while (tps64 > nsecs*2) {
602 tps64 >>= 1;
603 shift--;
604 }
605
606 tps32 = (uint32_t)tps64;
607 while (tps32 <= (uint32_t)nsecs) {
608 tps32 <<= 1;
609 shift++;
610 }
611
612 hv_clock->tsc_shift = shift;
613 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
614
615 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
80a914dc 616 __func__, tsc_khz, hv_clock->tsc_shift,
50d0a0f9
GH
617 hv_clock->tsc_to_system_mul);
618}
619
18068523
GOC
620static void kvm_write_guest_time(struct kvm_vcpu *v)
621{
622 struct timespec ts;
623 unsigned long flags;
624 struct kvm_vcpu_arch *vcpu = &v->arch;
625 void *shared_kaddr;
626
627 if ((!vcpu->time_page))
628 return;
629
50d0a0f9
GH
630 if (unlikely(vcpu->hv_clock_tsc_khz != tsc_khz)) {
631 kvm_set_time_scale(tsc_khz, &vcpu->hv_clock);
632 vcpu->hv_clock_tsc_khz = tsc_khz;
633 }
634
18068523
GOC
635 /* Keep irq disabled to prevent changes to the clock */
636 local_irq_save(flags);
637 kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
638 &vcpu->hv_clock.tsc_timestamp);
639 ktime_get_ts(&ts);
640 local_irq_restore(flags);
641
642 /* With all the info we got, fill in the values */
643
644 vcpu->hv_clock.system_time = ts.tv_nsec +
645 (NSEC_PER_SEC * (u64)ts.tv_sec);
646 /*
647 * The interface expects us to write an even number signaling that the
648 * update is finished. Since the guest won't see the intermediate
50d0a0f9 649 * state, we just increase by 2 at the end.
18068523 650 */
50d0a0f9 651 vcpu->hv_clock.version += 2;
18068523
GOC
652
653 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
654
655 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
50d0a0f9 656 sizeof(vcpu->hv_clock));
18068523
GOC
657
658 kunmap_atomic(shared_kaddr, KM_USER0);
659
660 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
661}
662
9ba075a6
AK
663static bool msr_mtrr_valid(unsigned msr)
664{
665 switch (msr) {
666 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
667 case MSR_MTRRfix64K_00000:
668 case MSR_MTRRfix16K_80000:
669 case MSR_MTRRfix16K_A0000:
670 case MSR_MTRRfix4K_C0000:
671 case MSR_MTRRfix4K_C8000:
672 case MSR_MTRRfix4K_D0000:
673 case MSR_MTRRfix4K_D8000:
674 case MSR_MTRRfix4K_E0000:
675 case MSR_MTRRfix4K_E8000:
676 case MSR_MTRRfix4K_F0000:
677 case MSR_MTRRfix4K_F8000:
678 case MSR_MTRRdefType:
679 case MSR_IA32_CR_PAT:
680 return true;
681 case 0x2f8:
682 return true;
683 }
684 return false;
685}
686
687static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
688{
0bed3b56
SY
689 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
690
9ba075a6
AK
691 if (!msr_mtrr_valid(msr))
692 return 1;
693
0bed3b56
SY
694 if (msr == MSR_MTRRdefType) {
695 vcpu->arch.mtrr_state.def_type = data;
696 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
697 } else if (msr == MSR_MTRRfix64K_00000)
698 p[0] = data;
699 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
700 p[1 + msr - MSR_MTRRfix16K_80000] = data;
701 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
702 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
703 else if (msr == MSR_IA32_CR_PAT)
704 vcpu->arch.pat = data;
705 else { /* Variable MTRRs */
706 int idx, is_mtrr_mask;
707 u64 *pt;
708
709 idx = (msr - 0x200) / 2;
710 is_mtrr_mask = msr - 0x200 - 2 * idx;
711 if (!is_mtrr_mask)
712 pt =
713 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
714 else
715 pt =
716 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
717 *pt = data;
718 }
719
720 kvm_mmu_reset_context(vcpu);
9ba075a6
AK
721 return 0;
722}
15c4a640
CO
723
724int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
725{
726 switch (msr) {
15c4a640
CO
727 case MSR_EFER:
728 set_efer(vcpu, data);
729 break;
15c4a640
CO
730 case MSR_IA32_MC0_STATUS:
731 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
b8688d51 732 __func__, data);
15c4a640
CO
733 break;
734 case MSR_IA32_MCG_STATUS:
735 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
b8688d51 736 __func__, data);
15c4a640 737 break;
c7ac679c
JR
738 case MSR_IA32_MCG_CTL:
739 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
b8688d51 740 __func__, data);
c7ac679c 741 break;
b5e2fec0
AG
742 case MSR_IA32_DEBUGCTLMSR:
743 if (!data) {
744 /* We support the non-activated case already */
745 break;
746 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
747 /* Values other than LBR and BTF are vendor-specific,
748 thus reserved and should throw a #GP */
749 return 1;
750 }
751 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
752 __func__, data);
753 break;
15c4a640
CO
754 case MSR_IA32_UCODE_REV:
755 case MSR_IA32_UCODE_WRITE:
61a6bd67 756 case MSR_VM_HSAVE_PA:
15c4a640 757 break;
9ba075a6
AK
758 case 0x200 ... 0x2ff:
759 return set_msr_mtrr(vcpu, msr, data);
15c4a640
CO
760 case MSR_IA32_APICBASE:
761 kvm_set_apic_base(vcpu, data);
762 break;
763 case MSR_IA32_MISC_ENABLE:
ad312c7c 764 vcpu->arch.ia32_misc_enable_msr = data;
15c4a640 765 break;
18068523
GOC
766 case MSR_KVM_WALL_CLOCK:
767 vcpu->kvm->arch.wall_clock = data;
768 kvm_write_wall_clock(vcpu->kvm, data);
769 break;
770 case MSR_KVM_SYSTEM_TIME: {
771 if (vcpu->arch.time_page) {
772 kvm_release_page_dirty(vcpu->arch.time_page);
773 vcpu->arch.time_page = NULL;
774 }
775
776 vcpu->arch.time = data;
777
778 /* we verify if the enable bit is set... */
779 if (!(data & 1))
780 break;
781
782 /* ...but clean it before doing the actual write */
783 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
784
18068523
GOC
785 vcpu->arch.time_page =
786 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
18068523
GOC
787
788 if (is_error_page(vcpu->arch.time_page)) {
789 kvm_release_page_clean(vcpu->arch.time_page);
790 vcpu->arch.time_page = NULL;
791 }
792
793 kvm_write_guest_time(vcpu);
794 break;
795 }
15c4a640 796 default:
565f1fbd 797 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
15c4a640
CO
798 return 1;
799 }
800 return 0;
801}
802EXPORT_SYMBOL_GPL(kvm_set_msr_common);
803
804
805/*
806 * Reads an msr value (of 'msr_index') into 'pdata'.
807 * Returns 0 on success, non-0 otherwise.
808 * Assumes vcpu_load() was already called.
809 */
810int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
811{
812 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
813}
814
9ba075a6
AK
815static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
816{
0bed3b56
SY
817 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
818
9ba075a6
AK
819 if (!msr_mtrr_valid(msr))
820 return 1;
821
0bed3b56
SY
822 if (msr == MSR_MTRRdefType)
823 *pdata = vcpu->arch.mtrr_state.def_type +
824 (vcpu->arch.mtrr_state.enabled << 10);
825 else if (msr == MSR_MTRRfix64K_00000)
826 *pdata = p[0];
827 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
828 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
829 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
830 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
831 else if (msr == MSR_IA32_CR_PAT)
832 *pdata = vcpu->arch.pat;
833 else { /* Variable MTRRs */
834 int idx, is_mtrr_mask;
835 u64 *pt;
836
837 idx = (msr - 0x200) / 2;
838 is_mtrr_mask = msr - 0x200 - 2 * idx;
839 if (!is_mtrr_mask)
840 pt =
841 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
842 else
843 pt =
844 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
845 *pdata = *pt;
846 }
847
9ba075a6
AK
848 return 0;
849}
850
15c4a640
CO
851int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
852{
853 u64 data;
854
855 switch (msr) {
856 case 0xc0010010: /* SYSCFG */
857 case 0xc0010015: /* HWCR */
858 case MSR_IA32_PLATFORM_ID:
859 case MSR_IA32_P5_MC_ADDR:
860 case MSR_IA32_P5_MC_TYPE:
861 case MSR_IA32_MC0_CTL:
862 case MSR_IA32_MCG_STATUS:
863 case MSR_IA32_MCG_CAP:
c7ac679c 864 case MSR_IA32_MCG_CTL:
15c4a640
CO
865 case MSR_IA32_MC0_MISC:
866 case MSR_IA32_MC0_MISC+4:
867 case MSR_IA32_MC0_MISC+8:
868 case MSR_IA32_MC0_MISC+12:
869 case MSR_IA32_MC0_MISC+16:
a89c1ad2 870 case MSR_IA32_MC0_MISC+20:
15c4a640 871 case MSR_IA32_UCODE_REV:
15c4a640 872 case MSR_IA32_EBL_CR_POWERON:
b5e2fec0
AG
873 case MSR_IA32_DEBUGCTLMSR:
874 case MSR_IA32_LASTBRANCHFROMIP:
875 case MSR_IA32_LASTBRANCHTOIP:
876 case MSR_IA32_LASTINTFROMIP:
877 case MSR_IA32_LASTINTTOIP:
61a6bd67 878 case MSR_VM_HSAVE_PA:
15c4a640
CO
879 data = 0;
880 break;
9ba075a6
AK
881 case MSR_MTRRcap:
882 data = 0x500 | KVM_NR_VAR_MTRR;
883 break;
884 case 0x200 ... 0x2ff:
885 return get_msr_mtrr(vcpu, msr, pdata);
15c4a640
CO
886 case 0xcd: /* fsb frequency */
887 data = 3;
888 break;
889 case MSR_IA32_APICBASE:
890 data = kvm_get_apic_base(vcpu);
891 break;
892 case MSR_IA32_MISC_ENABLE:
ad312c7c 893 data = vcpu->arch.ia32_misc_enable_msr;
15c4a640 894 break;
847f0ad8
AG
895 case MSR_IA32_PERF_STATUS:
896 /* TSC increment by tick */
897 data = 1000ULL;
898 /* CPU multiplier */
899 data |= (((uint64_t)4ULL) << 40);
900 break;
15c4a640 901 case MSR_EFER:
ad312c7c 902 data = vcpu->arch.shadow_efer;
15c4a640 903 break;
18068523
GOC
904 case MSR_KVM_WALL_CLOCK:
905 data = vcpu->kvm->arch.wall_clock;
906 break;
907 case MSR_KVM_SYSTEM_TIME:
908 data = vcpu->arch.time;
909 break;
15c4a640
CO
910 default:
911 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
912 return 1;
913 }
914 *pdata = data;
915 return 0;
916}
917EXPORT_SYMBOL_GPL(kvm_get_msr_common);
918
313a3dc7
CO
919/*
920 * Read or write a bunch of msrs. All parameters are kernel addresses.
921 *
922 * @return number of msrs set successfully.
923 */
924static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
925 struct kvm_msr_entry *entries,
926 int (*do_msr)(struct kvm_vcpu *vcpu,
927 unsigned index, u64 *data))
928{
929 int i;
930
931 vcpu_load(vcpu);
932
3200f405 933 down_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
934 for (i = 0; i < msrs->nmsrs; ++i)
935 if (do_msr(vcpu, entries[i].index, &entries[i].data))
936 break;
3200f405 937 up_read(&vcpu->kvm->slots_lock);
313a3dc7
CO
938
939 vcpu_put(vcpu);
940
941 return i;
942}
943
944/*
945 * Read or write a bunch of msrs. Parameters are user addresses.
946 *
947 * @return number of msrs set successfully.
948 */
949static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
950 int (*do_msr)(struct kvm_vcpu *vcpu,
951 unsigned index, u64 *data),
952 int writeback)
953{
954 struct kvm_msrs msrs;
955 struct kvm_msr_entry *entries;
956 int r, n;
957 unsigned size;
958
959 r = -EFAULT;
960 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
961 goto out;
962
963 r = -E2BIG;
964 if (msrs.nmsrs >= MAX_IO_MSRS)
965 goto out;
966
967 r = -ENOMEM;
968 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
969 entries = vmalloc(size);
970 if (!entries)
971 goto out;
972
973 r = -EFAULT;
974 if (copy_from_user(entries, user_msrs->entries, size))
975 goto out_free;
976
977 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
978 if (r < 0)
979 goto out_free;
980
981 r = -EFAULT;
982 if (writeback && copy_to_user(user_msrs->entries, entries, size))
983 goto out_free;
984
985 r = n;
986
987out_free:
988 vfree(entries);
989out:
990 return r;
991}
992
018d00d2
ZX
993int kvm_dev_ioctl_check_extension(long ext)
994{
995 int r;
996
997 switch (ext) {
998 case KVM_CAP_IRQCHIP:
999 case KVM_CAP_HLT:
1000 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
018d00d2 1001 case KVM_CAP_SET_TSS_ADDR:
07716717 1002 case KVM_CAP_EXT_CPUID:
7837699f 1003 case KVM_CAP_PIT:
a28e4f5a 1004 case KVM_CAP_NOP_IO_DELAY:
62d9f0db 1005 case KVM_CAP_MP_STATE:
ed848624 1006 case KVM_CAP_SYNC_MMU:
52d939a0 1007 case KVM_CAP_REINJECT_CONTROL:
018d00d2
ZX
1008 r = 1;
1009 break;
542472b5
LV
1010 case KVM_CAP_COALESCED_MMIO:
1011 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1012 break;
774ead3a
AK
1013 case KVM_CAP_VAPIC:
1014 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1015 break;
f725230a
AK
1016 case KVM_CAP_NR_VCPUS:
1017 r = KVM_MAX_VCPUS;
1018 break;
a988b910
AK
1019 case KVM_CAP_NR_MEMSLOTS:
1020 r = KVM_MEMORY_SLOTS;
1021 break;
2f333bcb
MT
1022 case KVM_CAP_PV_MMU:
1023 r = !tdp_enabled;
1024 break;
62c476c7 1025 case KVM_CAP_IOMMU:
19de40a8 1026 r = iommu_found();
62c476c7 1027 break;
abe6655d
MT
1028 case KVM_CAP_CLOCKSOURCE:
1029 r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
1030 break;
018d00d2
ZX
1031 default:
1032 r = 0;
1033 break;
1034 }
1035 return r;
1036
1037}
1038
043405e1
CO
1039long kvm_arch_dev_ioctl(struct file *filp,
1040 unsigned int ioctl, unsigned long arg)
1041{
1042 void __user *argp = (void __user *)arg;
1043 long r;
1044
1045 switch (ioctl) {
1046 case KVM_GET_MSR_INDEX_LIST: {
1047 struct kvm_msr_list __user *user_msr_list = argp;
1048 struct kvm_msr_list msr_list;
1049 unsigned n;
1050
1051 r = -EFAULT;
1052 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1053 goto out;
1054 n = msr_list.nmsrs;
1055 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1056 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1057 goto out;
1058 r = -E2BIG;
1059 if (n < num_msrs_to_save)
1060 goto out;
1061 r = -EFAULT;
1062 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1063 num_msrs_to_save * sizeof(u32)))
1064 goto out;
1065 if (copy_to_user(user_msr_list->indices
1066 + num_msrs_to_save * sizeof(u32),
1067 &emulated_msrs,
1068 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1069 goto out;
1070 r = 0;
1071 break;
1072 }
674eea0f
AK
1073 case KVM_GET_SUPPORTED_CPUID: {
1074 struct kvm_cpuid2 __user *cpuid_arg = argp;
1075 struct kvm_cpuid2 cpuid;
1076
1077 r = -EFAULT;
1078 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1079 goto out;
1080 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
19355475 1081 cpuid_arg->entries);
674eea0f
AK
1082 if (r)
1083 goto out;
1084
1085 r = -EFAULT;
1086 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1087 goto out;
1088 r = 0;
1089 break;
1090 }
043405e1
CO
1091 default:
1092 r = -EINVAL;
1093 }
1094out:
1095 return r;
1096}
1097
313a3dc7
CO
1098void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1099{
1100 kvm_x86_ops->vcpu_load(vcpu, cpu);
18068523 1101 kvm_write_guest_time(vcpu);
313a3dc7
CO
1102}
1103
1104void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1105{
1106 kvm_x86_ops->vcpu_put(vcpu);
9327fd11 1107 kvm_put_guest_fpu(vcpu);
313a3dc7
CO
1108}
1109
07716717 1110static int is_efer_nx(void)
313a3dc7
CO
1111{
1112 u64 efer;
313a3dc7
CO
1113
1114 rdmsrl(MSR_EFER, efer);
07716717
DK
1115 return efer & EFER_NX;
1116}
1117
1118static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1119{
1120 int i;
1121 struct kvm_cpuid_entry2 *e, *entry;
1122
313a3dc7 1123 entry = NULL;
ad312c7c
ZX
1124 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1125 e = &vcpu->arch.cpuid_entries[i];
313a3dc7
CO
1126 if (e->function == 0x80000001) {
1127 entry = e;
1128 break;
1129 }
1130 }
07716717 1131 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
313a3dc7
CO
1132 entry->edx &= ~(1 << 20);
1133 printk(KERN_INFO "kvm: guest NX capability removed\n");
1134 }
1135}
1136
07716717 1137/* when an old userspace process fills a new kernel module */
313a3dc7
CO
1138static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1139 struct kvm_cpuid *cpuid,
1140 struct kvm_cpuid_entry __user *entries)
07716717
DK
1141{
1142 int r, i;
1143 struct kvm_cpuid_entry *cpuid_entries;
1144
1145 r = -E2BIG;
1146 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1147 goto out;
1148 r = -ENOMEM;
1149 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1150 if (!cpuid_entries)
1151 goto out;
1152 r = -EFAULT;
1153 if (copy_from_user(cpuid_entries, entries,
1154 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1155 goto out_free;
1156 for (i = 0; i < cpuid->nent; i++) {
ad312c7c
ZX
1157 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1158 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1159 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1160 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1161 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1162 vcpu->arch.cpuid_entries[i].index = 0;
1163 vcpu->arch.cpuid_entries[i].flags = 0;
1164 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1165 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1166 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1167 }
1168 vcpu->arch.cpuid_nent = cpuid->nent;
07716717
DK
1169 cpuid_fix_nx_cap(vcpu);
1170 r = 0;
1171
1172out_free:
1173 vfree(cpuid_entries);
1174out:
1175 return r;
1176}
1177
1178static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1179 struct kvm_cpuid2 *cpuid,
1180 struct kvm_cpuid_entry2 __user *entries)
313a3dc7
CO
1181{
1182 int r;
1183
1184 r = -E2BIG;
1185 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1186 goto out;
1187 r = -EFAULT;
ad312c7c 1188 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
07716717 1189 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
313a3dc7 1190 goto out;
ad312c7c 1191 vcpu->arch.cpuid_nent = cpuid->nent;
313a3dc7
CO
1192 return 0;
1193
1194out:
1195 return r;
1196}
1197
07716717 1198static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
19355475
AS
1199 struct kvm_cpuid2 *cpuid,
1200 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1201{
1202 int r;
1203
1204 r = -E2BIG;
ad312c7c 1205 if (cpuid->nent < vcpu->arch.cpuid_nent)
07716717
DK
1206 goto out;
1207 r = -EFAULT;
ad312c7c 1208 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
19355475 1209 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1210 goto out;
1211 return 0;
1212
1213out:
ad312c7c 1214 cpuid->nent = vcpu->arch.cpuid_nent;
07716717
DK
1215 return r;
1216}
1217
07716717 1218static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
19355475 1219 u32 index)
07716717
DK
1220{
1221 entry->function = function;
1222 entry->index = index;
1223 cpuid_count(entry->function, entry->index,
19355475 1224 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
07716717
DK
1225 entry->flags = 0;
1226}
1227
1228static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1229 u32 index, int *nent, int maxnent)
1230{
1231 const u32 kvm_supported_word0_x86_features = bit(X86_FEATURE_FPU) |
1232 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1233 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1234 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1235 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1236 bit(X86_FEATURE_SEP) | bit(X86_FEATURE_PGE) |
1237 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1238 bit(X86_FEATURE_CLFLSH) | bit(X86_FEATURE_MMX) |
1239 bit(X86_FEATURE_FXSR) | bit(X86_FEATURE_XMM) |
1240 bit(X86_FEATURE_XMM2) | bit(X86_FEATURE_SELFSNOOP);
1241 const u32 kvm_supported_word1_x86_features = bit(X86_FEATURE_FPU) |
1242 bit(X86_FEATURE_VME) | bit(X86_FEATURE_DE) |
1243 bit(X86_FEATURE_PSE) | bit(X86_FEATURE_TSC) |
1244 bit(X86_FEATURE_MSR) | bit(X86_FEATURE_PAE) |
1245 bit(X86_FEATURE_CX8) | bit(X86_FEATURE_APIC) |
1246 bit(X86_FEATURE_PGE) |
1247 bit(X86_FEATURE_CMOV) | bit(X86_FEATURE_PSE36) |
1248 bit(X86_FEATURE_MMX) | bit(X86_FEATURE_FXSR) |
1249 bit(X86_FEATURE_SYSCALL) |
1250 (bit(X86_FEATURE_NX) && is_efer_nx()) |
1251#ifdef CONFIG_X86_64
1252 bit(X86_FEATURE_LM) |
1253#endif
1b2fd70c 1254 bit(X86_FEATURE_FXSR_OPT) |
07716717
DK
1255 bit(X86_FEATURE_MMXEXT) |
1256 bit(X86_FEATURE_3DNOWEXT) |
1257 bit(X86_FEATURE_3DNOW);
1258 const u32 kvm_supported_word3_x86_features =
1259 bit(X86_FEATURE_XMM3) | bit(X86_FEATURE_CX16);
1260 const u32 kvm_supported_word6_x86_features =
d8017474
AG
1261 bit(X86_FEATURE_LAHF_LM) | bit(X86_FEATURE_CMP_LEGACY) |
1262 bit(X86_FEATURE_SVM);
07716717 1263
19355475 1264 /* all calls to cpuid_count() should be made on the same cpu */
07716717
DK
1265 get_cpu();
1266 do_cpuid_1_ent(entry, function, index);
1267 ++*nent;
1268
1269 switch (function) {
1270 case 0:
1271 entry->eax = min(entry->eax, (u32)0xb);
1272 break;
1273 case 1:
1274 entry->edx &= kvm_supported_word0_x86_features;
1275 entry->ecx &= kvm_supported_word3_x86_features;
1276 break;
1277 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1278 * may return different values. This forces us to get_cpu() before
1279 * issuing the first command, and also to emulate this annoying behavior
1280 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
1281 case 2: {
1282 int t, times = entry->eax & 0xff;
1283
1284 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
0fdf8e59 1285 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
07716717
DK
1286 for (t = 1; t < times && *nent < maxnent; ++t) {
1287 do_cpuid_1_ent(&entry[t], function, 0);
1288 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
1289 ++*nent;
1290 }
1291 break;
1292 }
1293 /* function 4 and 0xb have additional index. */
1294 case 4: {
14af3f3c 1295 int i, cache_type;
07716717
DK
1296
1297 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1298 /* read more entries until cache_type is zero */
14af3f3c
HH
1299 for (i = 1; *nent < maxnent; ++i) {
1300 cache_type = entry[i - 1].eax & 0x1f;
07716717
DK
1301 if (!cache_type)
1302 break;
14af3f3c
HH
1303 do_cpuid_1_ent(&entry[i], function, i);
1304 entry[i].flags |=
07716717
DK
1305 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1306 ++*nent;
1307 }
1308 break;
1309 }
1310 case 0xb: {
14af3f3c 1311 int i, level_type;
07716717
DK
1312
1313 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1314 /* read more entries until level_type is zero */
14af3f3c 1315 for (i = 1; *nent < maxnent; ++i) {
0853d2c1 1316 level_type = entry[i - 1].ecx & 0xff00;
07716717
DK
1317 if (!level_type)
1318 break;
14af3f3c
HH
1319 do_cpuid_1_ent(&entry[i], function, i);
1320 entry[i].flags |=
07716717
DK
1321 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
1322 ++*nent;
1323 }
1324 break;
1325 }
1326 case 0x80000000:
1327 entry->eax = min(entry->eax, 0x8000001a);
1328 break;
1329 case 0x80000001:
1330 entry->edx &= kvm_supported_word1_x86_features;
1331 entry->ecx &= kvm_supported_word6_x86_features;
1332 break;
1333 }
1334 put_cpu();
1335}
1336
674eea0f 1337static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
19355475 1338 struct kvm_cpuid_entry2 __user *entries)
07716717
DK
1339{
1340 struct kvm_cpuid_entry2 *cpuid_entries;
1341 int limit, nent = 0, r = -E2BIG;
1342 u32 func;
1343
1344 if (cpuid->nent < 1)
1345 goto out;
1346 r = -ENOMEM;
1347 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
1348 if (!cpuid_entries)
1349 goto out;
1350
1351 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
1352 limit = cpuid_entries[0].eax;
1353 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
1354 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1355 &nent, cpuid->nent);
07716717
DK
1356 r = -E2BIG;
1357 if (nent >= cpuid->nent)
1358 goto out_free;
1359
1360 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
1361 limit = cpuid_entries[nent - 1].eax;
1362 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
1363 do_cpuid_ent(&cpuid_entries[nent], func, 0,
19355475 1364 &nent, cpuid->nent);
07716717
DK
1365 r = -EFAULT;
1366 if (copy_to_user(entries, cpuid_entries,
19355475 1367 nent * sizeof(struct kvm_cpuid_entry2)))
07716717
DK
1368 goto out_free;
1369 cpuid->nent = nent;
1370 r = 0;
1371
1372out_free:
1373 vfree(cpuid_entries);
1374out:
1375 return r;
1376}
1377
313a3dc7
CO
1378static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
1379 struct kvm_lapic_state *s)
1380{
1381 vcpu_load(vcpu);
ad312c7c 1382 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
313a3dc7
CO
1383 vcpu_put(vcpu);
1384
1385 return 0;
1386}
1387
1388static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
1389 struct kvm_lapic_state *s)
1390{
1391 vcpu_load(vcpu);
ad312c7c 1392 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
313a3dc7
CO
1393 kvm_apic_post_state_restore(vcpu);
1394 vcpu_put(vcpu);
1395
1396 return 0;
1397}
1398
f77bc6a4
ZX
1399static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1400 struct kvm_interrupt *irq)
1401{
1402 if (irq->irq < 0 || irq->irq >= 256)
1403 return -EINVAL;
1404 if (irqchip_in_kernel(vcpu->kvm))
1405 return -ENXIO;
1406 vcpu_load(vcpu);
1407
ad312c7c
ZX
1408 set_bit(irq->irq, vcpu->arch.irq_pending);
1409 set_bit(irq->irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
f77bc6a4
ZX
1410
1411 vcpu_put(vcpu);
1412
1413 return 0;
1414}
1415
c4abb7c9
JK
1416static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
1417{
1418 vcpu_load(vcpu);
1419 kvm_inject_nmi(vcpu);
1420 vcpu_put(vcpu);
1421
1422 return 0;
1423}
1424
b209749f
AK
1425static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
1426 struct kvm_tpr_access_ctl *tac)
1427{
1428 if (tac->flags)
1429 return -EINVAL;
1430 vcpu->arch.tpr_access_reporting = !!tac->enabled;
1431 return 0;
1432}
1433
313a3dc7
CO
1434long kvm_arch_vcpu_ioctl(struct file *filp,
1435 unsigned int ioctl, unsigned long arg)
1436{
1437 struct kvm_vcpu *vcpu = filp->private_data;
1438 void __user *argp = (void __user *)arg;
1439 int r;
b772ff36 1440 struct kvm_lapic_state *lapic = NULL;
313a3dc7
CO
1441
1442 switch (ioctl) {
1443 case KVM_GET_LAPIC: {
b772ff36 1444 lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
313a3dc7 1445
b772ff36
DH
1446 r = -ENOMEM;
1447 if (!lapic)
1448 goto out;
1449 r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
313a3dc7
CO
1450 if (r)
1451 goto out;
1452 r = -EFAULT;
b772ff36 1453 if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
313a3dc7
CO
1454 goto out;
1455 r = 0;
1456 break;
1457 }
1458 case KVM_SET_LAPIC: {
b772ff36
DH
1459 lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
1460 r = -ENOMEM;
1461 if (!lapic)
1462 goto out;
313a3dc7 1463 r = -EFAULT;
b772ff36 1464 if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
313a3dc7 1465 goto out;
b772ff36 1466 r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
313a3dc7
CO
1467 if (r)
1468 goto out;
1469 r = 0;
1470 break;
1471 }
f77bc6a4
ZX
1472 case KVM_INTERRUPT: {
1473 struct kvm_interrupt irq;
1474
1475 r = -EFAULT;
1476 if (copy_from_user(&irq, argp, sizeof irq))
1477 goto out;
1478 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1479 if (r)
1480 goto out;
1481 r = 0;
1482 break;
1483 }
c4abb7c9
JK
1484 case KVM_NMI: {
1485 r = kvm_vcpu_ioctl_nmi(vcpu);
1486 if (r)
1487 goto out;
1488 r = 0;
1489 break;
1490 }
313a3dc7
CO
1491 case KVM_SET_CPUID: {
1492 struct kvm_cpuid __user *cpuid_arg = argp;
1493 struct kvm_cpuid cpuid;
1494
1495 r = -EFAULT;
1496 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1497 goto out;
1498 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
1499 if (r)
1500 goto out;
1501 break;
1502 }
07716717
DK
1503 case KVM_SET_CPUID2: {
1504 struct kvm_cpuid2 __user *cpuid_arg = argp;
1505 struct kvm_cpuid2 cpuid;
1506
1507 r = -EFAULT;
1508 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1509 goto out;
1510 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
19355475 1511 cpuid_arg->entries);
07716717
DK
1512 if (r)
1513 goto out;
1514 break;
1515 }
1516 case KVM_GET_CPUID2: {
1517 struct kvm_cpuid2 __user *cpuid_arg = argp;
1518 struct kvm_cpuid2 cpuid;
1519
1520 r = -EFAULT;
1521 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1522 goto out;
1523 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
19355475 1524 cpuid_arg->entries);
07716717
DK
1525 if (r)
1526 goto out;
1527 r = -EFAULT;
1528 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1529 goto out;
1530 r = 0;
1531 break;
1532 }
313a3dc7
CO
1533 case KVM_GET_MSRS:
1534 r = msr_io(vcpu, argp, kvm_get_msr, 1);
1535 break;
1536 case KVM_SET_MSRS:
1537 r = msr_io(vcpu, argp, do_set_msr, 0);
1538 break;
b209749f
AK
1539 case KVM_TPR_ACCESS_REPORTING: {
1540 struct kvm_tpr_access_ctl tac;
1541
1542 r = -EFAULT;
1543 if (copy_from_user(&tac, argp, sizeof tac))
1544 goto out;
1545 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
1546 if (r)
1547 goto out;
1548 r = -EFAULT;
1549 if (copy_to_user(argp, &tac, sizeof tac))
1550 goto out;
1551 r = 0;
1552 break;
1553 };
b93463aa
AK
1554 case KVM_SET_VAPIC_ADDR: {
1555 struct kvm_vapic_addr va;
1556
1557 r = -EINVAL;
1558 if (!irqchip_in_kernel(vcpu->kvm))
1559 goto out;
1560 r = -EFAULT;
1561 if (copy_from_user(&va, argp, sizeof va))
1562 goto out;
1563 r = 0;
1564 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
1565 break;
1566 }
313a3dc7
CO
1567 default:
1568 r = -EINVAL;
1569 }
1570out:
b772ff36
DH
1571 if (lapic)
1572 kfree(lapic);
313a3dc7
CO
1573 return r;
1574}
1575
1fe779f8
CO
1576static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
1577{
1578 int ret;
1579
1580 if (addr > (unsigned int)(-3 * PAGE_SIZE))
1581 return -1;
1582 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
1583 return ret;
1584}
1585
1586static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1587 u32 kvm_nr_mmu_pages)
1588{
1589 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1590 return -EINVAL;
1591
72dc67a6 1592 down_write(&kvm->slots_lock);
1fe779f8
CO
1593
1594 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
f05e70ac 1595 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
1fe779f8 1596
72dc67a6 1597 up_write(&kvm->slots_lock);
1fe779f8
CO
1598 return 0;
1599}
1600
1601static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1602{
f05e70ac 1603 return kvm->arch.n_alloc_mmu_pages;
1fe779f8
CO
1604}
1605
e9f85cde
ZX
1606gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1607{
1608 int i;
1609 struct kvm_mem_alias *alias;
1610
d69fb81f
ZX
1611 for (i = 0; i < kvm->arch.naliases; ++i) {
1612 alias = &kvm->arch.aliases[i];
e9f85cde
ZX
1613 if (gfn >= alias->base_gfn
1614 && gfn < alias->base_gfn + alias->npages)
1615 return alias->target_gfn + gfn - alias->base_gfn;
1616 }
1617 return gfn;
1618}
1619
1fe779f8
CO
1620/*
1621 * Set a new alias region. Aliases map a portion of physical memory into
1622 * another portion. This is useful for memory windows, for example the PC
1623 * VGA region.
1624 */
1625static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
1626 struct kvm_memory_alias *alias)
1627{
1628 int r, n;
1629 struct kvm_mem_alias *p;
1630
1631 r = -EINVAL;
1632 /* General sanity checks */
1633 if (alias->memory_size & (PAGE_SIZE - 1))
1634 goto out;
1635 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
1636 goto out;
1637 if (alias->slot >= KVM_ALIAS_SLOTS)
1638 goto out;
1639 if (alias->guest_phys_addr + alias->memory_size
1640 < alias->guest_phys_addr)
1641 goto out;
1642 if (alias->target_phys_addr + alias->memory_size
1643 < alias->target_phys_addr)
1644 goto out;
1645
72dc67a6 1646 down_write(&kvm->slots_lock);
a1708ce8 1647 spin_lock(&kvm->mmu_lock);
1fe779f8 1648
d69fb81f 1649 p = &kvm->arch.aliases[alias->slot];
1fe779f8
CO
1650 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
1651 p->npages = alias->memory_size >> PAGE_SHIFT;
1652 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
1653
1654 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
d69fb81f 1655 if (kvm->arch.aliases[n - 1].npages)
1fe779f8 1656 break;
d69fb81f 1657 kvm->arch.naliases = n;
1fe779f8 1658
a1708ce8 1659 spin_unlock(&kvm->mmu_lock);
1fe779f8
CO
1660 kvm_mmu_zap_all(kvm);
1661
72dc67a6 1662 up_write(&kvm->slots_lock);
1fe779f8
CO
1663
1664 return 0;
1665
1666out:
1667 return r;
1668}
1669
1670static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1671{
1672 int r;
1673
1674 r = 0;
1675 switch (chip->chip_id) {
1676 case KVM_IRQCHIP_PIC_MASTER:
1677 memcpy(&chip->chip.pic,
1678 &pic_irqchip(kvm)->pics[0],
1679 sizeof(struct kvm_pic_state));
1680 break;
1681 case KVM_IRQCHIP_PIC_SLAVE:
1682 memcpy(&chip->chip.pic,
1683 &pic_irqchip(kvm)->pics[1],
1684 sizeof(struct kvm_pic_state));
1685 break;
1686 case KVM_IRQCHIP_IOAPIC:
1687 memcpy(&chip->chip.ioapic,
1688 ioapic_irqchip(kvm),
1689 sizeof(struct kvm_ioapic_state));
1690 break;
1691 default:
1692 r = -EINVAL;
1693 break;
1694 }
1695 return r;
1696}
1697
1698static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
1699{
1700 int r;
1701
1702 r = 0;
1703 switch (chip->chip_id) {
1704 case KVM_IRQCHIP_PIC_MASTER:
1705 memcpy(&pic_irqchip(kvm)->pics[0],
1706 &chip->chip.pic,
1707 sizeof(struct kvm_pic_state));
1708 break;
1709 case KVM_IRQCHIP_PIC_SLAVE:
1710 memcpy(&pic_irqchip(kvm)->pics[1],
1711 &chip->chip.pic,
1712 sizeof(struct kvm_pic_state));
1713 break;
1714 case KVM_IRQCHIP_IOAPIC:
1715 memcpy(ioapic_irqchip(kvm),
1716 &chip->chip.ioapic,
1717 sizeof(struct kvm_ioapic_state));
1718 break;
1719 default:
1720 r = -EINVAL;
1721 break;
1722 }
1723 kvm_pic_update_irq(pic_irqchip(kvm));
1724 return r;
1725}
1726
e0f63cb9
SY
1727static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1728{
1729 int r = 0;
1730
1731 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
1732 return r;
1733}
1734
1735static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
1736{
1737 int r = 0;
1738
1739 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
1740 kvm_pit_load_count(kvm, 0, ps->channels[0].count);
1741 return r;
1742}
1743
52d939a0
MT
1744static int kvm_vm_ioctl_reinject(struct kvm *kvm,
1745 struct kvm_reinject_control *control)
1746{
1747 if (!kvm->arch.vpit)
1748 return -ENXIO;
1749 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
1750 return 0;
1751}
1752
5bb064dc
ZX
1753/*
1754 * Get (and clear) the dirty memory log for a memory slot.
1755 */
1756int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1757 struct kvm_dirty_log *log)
1758{
1759 int r;
1760 int n;
1761 struct kvm_memory_slot *memslot;
1762 int is_dirty = 0;
1763
72dc67a6 1764 down_write(&kvm->slots_lock);
5bb064dc
ZX
1765
1766 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1767 if (r)
1768 goto out;
1769
1770 /* If nothing is dirty, don't bother messing with page tables. */
1771 if (is_dirty) {
1772 kvm_mmu_slot_remove_write_access(kvm, log->slot);
1773 kvm_flush_remote_tlbs(kvm);
1774 memslot = &kvm->memslots[log->slot];
1775 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1776 memset(memslot->dirty_bitmap, 0, n);
1777 }
1778 r = 0;
1779out:
72dc67a6 1780 up_write(&kvm->slots_lock);
5bb064dc
ZX
1781 return r;
1782}
1783
1fe779f8
CO
1784long kvm_arch_vm_ioctl(struct file *filp,
1785 unsigned int ioctl, unsigned long arg)
1786{
1787 struct kvm *kvm = filp->private_data;
1788 void __user *argp = (void __user *)arg;
1789 int r = -EINVAL;
f0d66275
DH
1790 /*
1791 * This union makes it completely explicit to gcc-3.x
1792 * that these two variables' stack usage should be
1793 * combined, not added together.
1794 */
1795 union {
1796 struct kvm_pit_state ps;
1797 struct kvm_memory_alias alias;
1798 } u;
1fe779f8
CO
1799
1800 switch (ioctl) {
1801 case KVM_SET_TSS_ADDR:
1802 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
1803 if (r < 0)
1804 goto out;
1805 break;
1806 case KVM_SET_MEMORY_REGION: {
1807 struct kvm_memory_region kvm_mem;
1808 struct kvm_userspace_memory_region kvm_userspace_mem;
1809
1810 r = -EFAULT;
1811 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
1812 goto out;
1813 kvm_userspace_mem.slot = kvm_mem.slot;
1814 kvm_userspace_mem.flags = kvm_mem.flags;
1815 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
1816 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
1817 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
1818 if (r)
1819 goto out;
1820 break;
1821 }
1822 case KVM_SET_NR_MMU_PAGES:
1823 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
1824 if (r)
1825 goto out;
1826 break;
1827 case KVM_GET_NR_MMU_PAGES:
1828 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
1829 break;
f0d66275 1830 case KVM_SET_MEMORY_ALIAS:
1fe779f8 1831 r = -EFAULT;
f0d66275 1832 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
1fe779f8 1833 goto out;
f0d66275 1834 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
1fe779f8
CO
1835 if (r)
1836 goto out;
1837 break;
1fe779f8
CO
1838 case KVM_CREATE_IRQCHIP:
1839 r = -ENOMEM;
d7deeeb0
ZX
1840 kvm->arch.vpic = kvm_create_pic(kvm);
1841 if (kvm->arch.vpic) {
1fe779f8
CO
1842 r = kvm_ioapic_init(kvm);
1843 if (r) {
d7deeeb0
ZX
1844 kfree(kvm->arch.vpic);
1845 kvm->arch.vpic = NULL;
1fe779f8
CO
1846 goto out;
1847 }
1848 } else
1849 goto out;
399ec807
AK
1850 r = kvm_setup_default_irq_routing(kvm);
1851 if (r) {
1852 kfree(kvm->arch.vpic);
1853 kfree(kvm->arch.vioapic);
1854 goto out;
1855 }
1fe779f8 1856 break;
7837699f 1857 case KVM_CREATE_PIT:
269e05e4
AK
1858 mutex_lock(&kvm->lock);
1859 r = -EEXIST;
1860 if (kvm->arch.vpit)
1861 goto create_pit_unlock;
7837699f
SY
1862 r = -ENOMEM;
1863 kvm->arch.vpit = kvm_create_pit(kvm);
1864 if (kvm->arch.vpit)
1865 r = 0;
269e05e4
AK
1866 create_pit_unlock:
1867 mutex_unlock(&kvm->lock);
7837699f 1868 break;
1fe779f8
CO
1869 case KVM_IRQ_LINE: {
1870 struct kvm_irq_level irq_event;
1871
1872 r = -EFAULT;
1873 if (copy_from_user(&irq_event, argp, sizeof irq_event))
1874 goto out;
1875 if (irqchip_in_kernel(kvm)) {
1876 mutex_lock(&kvm->lock);
5550af4d
SY
1877 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1878 irq_event.irq, irq_event.level);
1fe779f8
CO
1879 mutex_unlock(&kvm->lock);
1880 r = 0;
1881 }
1882 break;
1883 }
1884 case KVM_GET_IRQCHIP: {
1885 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1886 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1887
f0d66275
DH
1888 r = -ENOMEM;
1889 if (!chip)
1fe779f8 1890 goto out;
f0d66275
DH
1891 r = -EFAULT;
1892 if (copy_from_user(chip, argp, sizeof *chip))
1893 goto get_irqchip_out;
1fe779f8
CO
1894 r = -ENXIO;
1895 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1896 goto get_irqchip_out;
1897 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
1fe779f8 1898 if (r)
f0d66275 1899 goto get_irqchip_out;
1fe779f8 1900 r = -EFAULT;
f0d66275
DH
1901 if (copy_to_user(argp, chip, sizeof *chip))
1902 goto get_irqchip_out;
1fe779f8 1903 r = 0;
f0d66275
DH
1904 get_irqchip_out:
1905 kfree(chip);
1906 if (r)
1907 goto out;
1fe779f8
CO
1908 break;
1909 }
1910 case KVM_SET_IRQCHIP: {
1911 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
f0d66275 1912 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
1fe779f8 1913
f0d66275
DH
1914 r = -ENOMEM;
1915 if (!chip)
1fe779f8 1916 goto out;
f0d66275
DH
1917 r = -EFAULT;
1918 if (copy_from_user(chip, argp, sizeof *chip))
1919 goto set_irqchip_out;
1fe779f8
CO
1920 r = -ENXIO;
1921 if (!irqchip_in_kernel(kvm))
f0d66275
DH
1922 goto set_irqchip_out;
1923 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
1fe779f8 1924 if (r)
f0d66275 1925 goto set_irqchip_out;
1fe779f8 1926 r = 0;
f0d66275
DH
1927 set_irqchip_out:
1928 kfree(chip);
1929 if (r)
1930 goto out;
1fe779f8
CO
1931 break;
1932 }
e0f63cb9 1933 case KVM_GET_PIT: {
e0f63cb9 1934 r = -EFAULT;
f0d66275 1935 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1936 goto out;
1937 r = -ENXIO;
1938 if (!kvm->arch.vpit)
1939 goto out;
f0d66275 1940 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
e0f63cb9
SY
1941 if (r)
1942 goto out;
1943 r = -EFAULT;
f0d66275 1944 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
e0f63cb9
SY
1945 goto out;
1946 r = 0;
1947 break;
1948 }
1949 case KVM_SET_PIT: {
e0f63cb9 1950 r = -EFAULT;
f0d66275 1951 if (copy_from_user(&u.ps, argp, sizeof u.ps))
e0f63cb9
SY
1952 goto out;
1953 r = -ENXIO;
1954 if (!kvm->arch.vpit)
1955 goto out;
f0d66275 1956 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
e0f63cb9
SY
1957 if (r)
1958 goto out;
1959 r = 0;
1960 break;
1961 }
52d939a0
MT
1962 case KVM_REINJECT_CONTROL: {
1963 struct kvm_reinject_control control;
1964 r = -EFAULT;
1965 if (copy_from_user(&control, argp, sizeof(control)))
1966 goto out;
1967 r = kvm_vm_ioctl_reinject(kvm, &control);
1968 if (r)
1969 goto out;
1970 r = 0;
1971 break;
1972 }
1fe779f8
CO
1973 default:
1974 ;
1975 }
1976out:
1977 return r;
1978}
1979
a16b043c 1980static void kvm_init_msr_list(void)
043405e1
CO
1981{
1982 u32 dummy[2];
1983 unsigned i, j;
1984
1985 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
1986 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
1987 continue;
1988 if (j < i)
1989 msrs_to_save[j] = msrs_to_save[i];
1990 j++;
1991 }
1992 num_msrs_to_save = j;
1993}
1994
bbd9b64e
CO
1995/*
1996 * Only apic need an MMIO device hook, so shortcut now..
1997 */
1998static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
92760499
LV
1999 gpa_t addr, int len,
2000 int is_write)
bbd9b64e
CO
2001{
2002 struct kvm_io_device *dev;
2003
ad312c7c
ZX
2004 if (vcpu->arch.apic) {
2005 dev = &vcpu->arch.apic->dev;
92760499 2006 if (dev->in_range(dev, addr, len, is_write))
bbd9b64e
CO
2007 return dev;
2008 }
2009 return NULL;
2010}
2011
2012
2013static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2014 gpa_t addr, int len,
2015 int is_write)
bbd9b64e
CO
2016{
2017 struct kvm_io_device *dev;
2018
92760499 2019 dev = vcpu_find_pervcpu_dev(vcpu, addr, len, is_write);
bbd9b64e 2020 if (dev == NULL)
92760499
LV
2021 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len,
2022 is_write);
bbd9b64e
CO
2023 return dev;
2024}
2025
77c2002e
IE
2026int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
2027 struct kvm_vcpu *vcpu)
bbd9b64e
CO
2028{
2029 void *data = val;
10589a46 2030 int r = X86EMUL_CONTINUE;
bbd9b64e
CO
2031
2032 while (bytes) {
ad312c7c 2033 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e 2034 unsigned offset = addr & (PAGE_SIZE-1);
77c2002e 2035 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
bbd9b64e
CO
2036 int ret;
2037
10589a46
MT
2038 if (gpa == UNMAPPED_GVA) {
2039 r = X86EMUL_PROPAGATE_FAULT;
2040 goto out;
2041 }
77c2002e 2042 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
10589a46
MT
2043 if (ret < 0) {
2044 r = X86EMUL_UNHANDLEABLE;
2045 goto out;
2046 }
bbd9b64e 2047
77c2002e
IE
2048 bytes -= toread;
2049 data += toread;
2050 addr += toread;
bbd9b64e 2051 }
10589a46 2052out:
10589a46 2053 return r;
bbd9b64e 2054}
77c2002e
IE
2055
2056int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
2057 struct kvm_vcpu *vcpu)
2058{
2059 void *data = val;
2060 int r = X86EMUL_CONTINUE;
2061
2062 while (bytes) {
2063 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2064 unsigned offset = addr & (PAGE_SIZE-1);
2065 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
2066 int ret;
2067
2068 if (gpa == UNMAPPED_GVA) {
2069 r = X86EMUL_PROPAGATE_FAULT;
2070 goto out;
2071 }
2072 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
2073 if (ret < 0) {
2074 r = X86EMUL_UNHANDLEABLE;
2075 goto out;
2076 }
2077
2078 bytes -= towrite;
2079 data += towrite;
2080 addr += towrite;
2081 }
2082out:
2083 return r;
2084}
2085
bbd9b64e 2086
bbd9b64e
CO
2087static int emulator_read_emulated(unsigned long addr,
2088 void *val,
2089 unsigned int bytes,
2090 struct kvm_vcpu *vcpu)
2091{
2092 struct kvm_io_device *mmio_dev;
2093 gpa_t gpa;
2094
2095 if (vcpu->mmio_read_completed) {
2096 memcpy(val, vcpu->mmio_data, bytes);
2097 vcpu->mmio_read_completed = 0;
2098 return X86EMUL_CONTINUE;
2099 }
2100
ad312c7c 2101 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2102
2103 /* For APIC access vmexit */
2104 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2105 goto mmio;
2106
77c2002e
IE
2107 if (kvm_read_guest_virt(addr, val, bytes, vcpu)
2108 == X86EMUL_CONTINUE)
bbd9b64e
CO
2109 return X86EMUL_CONTINUE;
2110 if (gpa == UNMAPPED_GVA)
2111 return X86EMUL_PROPAGATE_FAULT;
2112
2113mmio:
2114 /*
2115 * Is this MMIO handled locally?
2116 */
10589a46 2117 mutex_lock(&vcpu->kvm->lock);
92760499 2118 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 0);
bbd9b64e
CO
2119 if (mmio_dev) {
2120 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
10589a46 2121 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2122 return X86EMUL_CONTINUE;
2123 }
10589a46 2124 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2125
2126 vcpu->mmio_needed = 1;
2127 vcpu->mmio_phys_addr = gpa;
2128 vcpu->mmio_size = bytes;
2129 vcpu->mmio_is_write = 0;
2130
2131 return X86EMUL_UNHANDLEABLE;
2132}
2133
3200f405 2134int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
9f811285 2135 const void *val, int bytes)
bbd9b64e
CO
2136{
2137 int ret;
2138
2139 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
9f811285 2140 if (ret < 0)
bbd9b64e 2141 return 0;
ad218f85 2142 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
bbd9b64e
CO
2143 return 1;
2144}
2145
2146static int emulator_write_emulated_onepage(unsigned long addr,
2147 const void *val,
2148 unsigned int bytes,
2149 struct kvm_vcpu *vcpu)
2150{
2151 struct kvm_io_device *mmio_dev;
10589a46
MT
2152 gpa_t gpa;
2153
10589a46 2154 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
bbd9b64e
CO
2155
2156 if (gpa == UNMAPPED_GVA) {
c3c91fee 2157 kvm_inject_page_fault(vcpu, addr, 2);
bbd9b64e
CO
2158 return X86EMUL_PROPAGATE_FAULT;
2159 }
2160
2161 /* For APIC access vmexit */
2162 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2163 goto mmio;
2164
2165 if (emulator_write_phys(vcpu, gpa, val, bytes))
2166 return X86EMUL_CONTINUE;
2167
2168mmio:
2169 /*
2170 * Is this MMIO handled locally?
2171 */
10589a46 2172 mutex_lock(&vcpu->kvm->lock);
92760499 2173 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa, bytes, 1);
bbd9b64e
CO
2174 if (mmio_dev) {
2175 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
10589a46 2176 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2177 return X86EMUL_CONTINUE;
2178 }
10589a46 2179 mutex_unlock(&vcpu->kvm->lock);
bbd9b64e
CO
2180
2181 vcpu->mmio_needed = 1;
2182 vcpu->mmio_phys_addr = gpa;
2183 vcpu->mmio_size = bytes;
2184 vcpu->mmio_is_write = 1;
2185 memcpy(vcpu->mmio_data, val, bytes);
2186
2187 return X86EMUL_CONTINUE;
2188}
2189
2190int emulator_write_emulated(unsigned long addr,
2191 const void *val,
2192 unsigned int bytes,
2193 struct kvm_vcpu *vcpu)
2194{
2195 /* Crossing a page boundary? */
2196 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
2197 int rc, now;
2198
2199 now = -addr & ~PAGE_MASK;
2200 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
2201 if (rc != X86EMUL_CONTINUE)
2202 return rc;
2203 addr += now;
2204 val += now;
2205 bytes -= now;
2206 }
2207 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
2208}
2209EXPORT_SYMBOL_GPL(emulator_write_emulated);
2210
2211static int emulator_cmpxchg_emulated(unsigned long addr,
2212 const void *old,
2213 const void *new,
2214 unsigned int bytes,
2215 struct kvm_vcpu *vcpu)
2216{
2217 static int reported;
2218
2219 if (!reported) {
2220 reported = 1;
2221 printk(KERN_WARNING "kvm: emulating exchange as write\n");
2222 }
2bacc55c
MT
2223#ifndef CONFIG_X86_64
2224 /* guests cmpxchg8b have to be emulated atomically */
2225 if (bytes == 8) {
10589a46 2226 gpa_t gpa;
2bacc55c 2227 struct page *page;
c0b49b0d 2228 char *kaddr;
2bacc55c
MT
2229 u64 val;
2230
10589a46
MT
2231 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
2232
2bacc55c
MT
2233 if (gpa == UNMAPPED_GVA ||
2234 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
2235 goto emul_write;
2236
2237 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
2238 goto emul_write;
2239
2240 val = *(u64 *)new;
72dc67a6 2241
2bacc55c 2242 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
72dc67a6 2243
c0b49b0d
AM
2244 kaddr = kmap_atomic(page, KM_USER0);
2245 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
2246 kunmap_atomic(kaddr, KM_USER0);
2bacc55c
MT
2247 kvm_release_page_dirty(page);
2248 }
3200f405 2249emul_write:
2bacc55c
MT
2250#endif
2251
bbd9b64e
CO
2252 return emulator_write_emulated(addr, new, bytes, vcpu);
2253}
2254
2255static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
2256{
2257 return kvm_x86_ops->get_segment_base(vcpu, seg);
2258}
2259
2260int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2261{
a7052897 2262 kvm_mmu_invlpg(vcpu, address);
bbd9b64e
CO
2263 return X86EMUL_CONTINUE;
2264}
2265
2266int emulate_clts(struct kvm_vcpu *vcpu)
2267{
54e445ca 2268 KVMTRACE_0D(CLTS, vcpu, handler);
ad312c7c 2269 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
bbd9b64e
CO
2270 return X86EMUL_CONTINUE;
2271}
2272
2273int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
2274{
2275 struct kvm_vcpu *vcpu = ctxt->vcpu;
2276
2277 switch (dr) {
2278 case 0 ... 3:
2279 *dest = kvm_x86_ops->get_dr(vcpu, dr);
2280 return X86EMUL_CONTINUE;
2281 default:
b8688d51 2282 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
bbd9b64e
CO
2283 return X86EMUL_UNHANDLEABLE;
2284 }
2285}
2286
2287int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
2288{
2289 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
2290 int exception;
2291
2292 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
2293 if (exception) {
2294 /* FIXME: better handling */
2295 return X86EMUL_UNHANDLEABLE;
2296 }
2297 return X86EMUL_CONTINUE;
2298}
2299
2300void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
2301{
bbd9b64e 2302 u8 opcodes[4];
5fdbf976 2303 unsigned long rip = kvm_rip_read(vcpu);
bbd9b64e
CO
2304 unsigned long rip_linear;
2305
f76c710d 2306 if (!printk_ratelimit())
bbd9b64e
CO
2307 return;
2308
25be4608
GC
2309 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
2310
77c2002e 2311 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
bbd9b64e
CO
2312
2313 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
2314 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
bbd9b64e
CO
2315}
2316EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
2317
14af3f3c 2318static struct x86_emulate_ops emulate_ops = {
77c2002e 2319 .read_std = kvm_read_guest_virt,
bbd9b64e
CO
2320 .read_emulated = emulator_read_emulated,
2321 .write_emulated = emulator_write_emulated,
2322 .cmpxchg_emulated = emulator_cmpxchg_emulated,
2323};
2324
5fdbf976
MT
2325static void cache_all_regs(struct kvm_vcpu *vcpu)
2326{
2327 kvm_register_read(vcpu, VCPU_REGS_RAX);
2328 kvm_register_read(vcpu, VCPU_REGS_RSP);
2329 kvm_register_read(vcpu, VCPU_REGS_RIP);
2330 vcpu->arch.regs_dirty = ~0;
2331}
2332
bbd9b64e
CO
2333int emulate_instruction(struct kvm_vcpu *vcpu,
2334 struct kvm_run *run,
2335 unsigned long cr2,
2336 u16 error_code,
571008da 2337 int emulation_type)
bbd9b64e
CO
2338{
2339 int r;
571008da 2340 struct decode_cache *c;
bbd9b64e 2341
26eef70c 2342 kvm_clear_exception_queue(vcpu);
ad312c7c 2343 vcpu->arch.mmio_fault_cr2 = cr2;
5fdbf976
MT
2344 /*
2345 * TODO: fix x86_emulate.c to use guest_read/write_register
2346 * instead of direct ->regs accesses, can save hundred cycles
2347 * on Intel for instructions that don't read/change RSP, for
2348 * for example.
2349 */
2350 cache_all_regs(vcpu);
bbd9b64e
CO
2351
2352 vcpu->mmio_is_write = 0;
ad312c7c 2353 vcpu->arch.pio.string = 0;
bbd9b64e 2354
571008da 2355 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
bbd9b64e
CO
2356 int cs_db, cs_l;
2357 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
2358
ad312c7c
ZX
2359 vcpu->arch.emulate_ctxt.vcpu = vcpu;
2360 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
2361 vcpu->arch.emulate_ctxt.mode =
2362 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
bbd9b64e
CO
2363 ? X86EMUL_MODE_REAL : cs_l
2364 ? X86EMUL_MODE_PROT64 : cs_db
2365 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
2366
ad312c7c 2367 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
571008da
SY
2368
2369 /* Reject the instructions other than VMCALL/VMMCALL when
2370 * try to emulate invalid opcode */
2371 c = &vcpu->arch.emulate_ctxt.decode;
2372 if ((emulation_type & EMULTYPE_TRAP_UD) &&
2373 (!(c->twobyte && c->b == 0x01 &&
2374 (c->modrm_reg == 0 || c->modrm_reg == 3) &&
2375 c->modrm_mod == 3 && c->modrm_rm == 1)))
2376 return EMULATE_FAIL;
2377
f2b5756b 2378 ++vcpu->stat.insn_emulation;
bbd9b64e 2379 if (r) {
f2b5756b 2380 ++vcpu->stat.insn_emulation_fail;
bbd9b64e
CO
2381 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2382 return EMULATE_DONE;
2383 return EMULATE_FAIL;
2384 }
2385 }
2386
ad312c7c 2387 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
bbd9b64e 2388
ad312c7c 2389 if (vcpu->arch.pio.string)
bbd9b64e
CO
2390 return EMULATE_DO_MMIO;
2391
2392 if ((r || vcpu->mmio_is_write) && run) {
2393 run->exit_reason = KVM_EXIT_MMIO;
2394 run->mmio.phys_addr = vcpu->mmio_phys_addr;
2395 memcpy(run->mmio.data, vcpu->mmio_data, 8);
2396 run->mmio.len = vcpu->mmio_size;
2397 run->mmio.is_write = vcpu->mmio_is_write;
2398 }
2399
2400 if (r) {
2401 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
2402 return EMULATE_DONE;
2403 if (!vcpu->mmio_needed) {
2404 kvm_report_emulation_failure(vcpu, "mmio");
2405 return EMULATE_FAIL;
2406 }
2407 return EMULATE_DO_MMIO;
2408 }
2409
ad312c7c 2410 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
bbd9b64e
CO
2411
2412 if (vcpu->mmio_is_write) {
2413 vcpu->mmio_needed = 0;
2414 return EMULATE_DO_MMIO;
2415 }
2416
2417 return EMULATE_DONE;
2418}
2419EXPORT_SYMBOL_GPL(emulate_instruction);
2420
de7d789a
CO
2421static int pio_copy_data(struct kvm_vcpu *vcpu)
2422{
ad312c7c 2423 void *p = vcpu->arch.pio_data;
0f346074 2424 gva_t q = vcpu->arch.pio.guest_gva;
de7d789a 2425 unsigned bytes;
0f346074 2426 int ret;
de7d789a 2427
ad312c7c
ZX
2428 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
2429 if (vcpu->arch.pio.in)
0f346074 2430 ret = kvm_write_guest_virt(q, p, bytes, vcpu);
de7d789a 2431 else
0f346074
IE
2432 ret = kvm_read_guest_virt(q, p, bytes, vcpu);
2433 return ret;
de7d789a
CO
2434}
2435
2436int complete_pio(struct kvm_vcpu *vcpu)
2437{
ad312c7c 2438 struct kvm_pio_request *io = &vcpu->arch.pio;
de7d789a
CO
2439 long delta;
2440 int r;
5fdbf976 2441 unsigned long val;
de7d789a
CO
2442
2443 if (!io->string) {
5fdbf976
MT
2444 if (io->in) {
2445 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2446 memcpy(&val, vcpu->arch.pio_data, io->size);
2447 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
2448 }
de7d789a
CO
2449 } else {
2450 if (io->in) {
2451 r = pio_copy_data(vcpu);
5fdbf976 2452 if (r)
de7d789a 2453 return r;
de7d789a
CO
2454 }
2455
2456 delta = 1;
2457 if (io->rep) {
2458 delta *= io->cur_count;
2459 /*
2460 * The size of the register should really depend on
2461 * current address size.
2462 */
5fdbf976
MT
2463 val = kvm_register_read(vcpu, VCPU_REGS_RCX);
2464 val -= delta;
2465 kvm_register_write(vcpu, VCPU_REGS_RCX, val);
de7d789a
CO
2466 }
2467 if (io->down)
2468 delta = -delta;
2469 delta *= io->size;
5fdbf976
MT
2470 if (io->in) {
2471 val = kvm_register_read(vcpu, VCPU_REGS_RDI);
2472 val += delta;
2473 kvm_register_write(vcpu, VCPU_REGS_RDI, val);
2474 } else {
2475 val = kvm_register_read(vcpu, VCPU_REGS_RSI);
2476 val += delta;
2477 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
2478 }
de7d789a
CO
2479 }
2480
de7d789a
CO
2481 io->count -= io->cur_count;
2482 io->cur_count = 0;
2483
2484 return 0;
2485}
2486
2487static void kernel_pio(struct kvm_io_device *pio_dev,
2488 struct kvm_vcpu *vcpu,
2489 void *pd)
2490{
2491 /* TODO: String I/O for in kernel device */
2492
2493 mutex_lock(&vcpu->kvm->lock);
ad312c7c
ZX
2494 if (vcpu->arch.pio.in)
2495 kvm_iodevice_read(pio_dev, vcpu->arch.pio.port,
2496 vcpu->arch.pio.size,
de7d789a
CO
2497 pd);
2498 else
ad312c7c
ZX
2499 kvm_iodevice_write(pio_dev, vcpu->arch.pio.port,
2500 vcpu->arch.pio.size,
de7d789a
CO
2501 pd);
2502 mutex_unlock(&vcpu->kvm->lock);
2503}
2504
2505static void pio_string_write(struct kvm_io_device *pio_dev,
2506 struct kvm_vcpu *vcpu)
2507{
ad312c7c
ZX
2508 struct kvm_pio_request *io = &vcpu->arch.pio;
2509 void *pd = vcpu->arch.pio_data;
de7d789a
CO
2510 int i;
2511
2512 mutex_lock(&vcpu->kvm->lock);
2513 for (i = 0; i < io->cur_count; i++) {
2514 kvm_iodevice_write(pio_dev, io->port,
2515 io->size,
2516 pd);
2517 pd += io->size;
2518 }
2519 mutex_unlock(&vcpu->kvm->lock);
2520}
2521
2522static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
92760499
LV
2523 gpa_t addr, int len,
2524 int is_write)
de7d789a 2525{
92760499 2526 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr, len, is_write);
de7d789a
CO
2527}
2528
2529int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2530 int size, unsigned port)
2531{
2532 struct kvm_io_device *pio_dev;
5fdbf976 2533 unsigned long val;
de7d789a
CO
2534
2535 vcpu->run->exit_reason = KVM_EXIT_IO;
2536 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2537 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2538 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2539 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = 1;
2540 vcpu->run->io.port = vcpu->arch.pio.port = port;
2541 vcpu->arch.pio.in = in;
2542 vcpu->arch.pio.string = 0;
2543 vcpu->arch.pio.down = 0;
ad312c7c 2544 vcpu->arch.pio.rep = 0;
de7d789a 2545
2714d1d3
FEL
2546 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2547 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2548 handler);
2549 else
2550 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2551 handler);
2552
5fdbf976
MT
2553 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2554 memcpy(vcpu->arch.pio_data, &val, 4);
de7d789a 2555
92760499 2556 pio_dev = vcpu_find_pio_dev(vcpu, port, size, !in);
de7d789a 2557 if (pio_dev) {
ad312c7c 2558 kernel_pio(pio_dev, vcpu, vcpu->arch.pio_data);
de7d789a
CO
2559 complete_pio(vcpu);
2560 return 1;
2561 }
2562 return 0;
2563}
2564EXPORT_SYMBOL_GPL(kvm_emulate_pio);
2565
2566int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2567 int size, unsigned long count, int down,
2568 gva_t address, int rep, unsigned port)
2569{
2570 unsigned now, in_page;
0f346074 2571 int ret = 0;
de7d789a
CO
2572 struct kvm_io_device *pio_dev;
2573
2574 vcpu->run->exit_reason = KVM_EXIT_IO;
2575 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
ad312c7c 2576 vcpu->run->io.size = vcpu->arch.pio.size = size;
de7d789a 2577 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
ad312c7c
ZX
2578 vcpu->run->io.count = vcpu->arch.pio.count = vcpu->arch.pio.cur_count = count;
2579 vcpu->run->io.port = vcpu->arch.pio.port = port;
2580 vcpu->arch.pio.in = in;
2581 vcpu->arch.pio.string = 1;
2582 vcpu->arch.pio.down = down;
ad312c7c 2583 vcpu->arch.pio.rep = rep;
de7d789a 2584
2714d1d3
FEL
2585 if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
2586 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
2587 handler);
2588 else
2589 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2590 handler);
2591
de7d789a
CO
2592 if (!count) {
2593 kvm_x86_ops->skip_emulated_instruction(vcpu);
2594 return 1;
2595 }
2596
2597 if (!down)
2598 in_page = PAGE_SIZE - offset_in_page(address);
2599 else
2600 in_page = offset_in_page(address) + size;
2601 now = min(count, (unsigned long)in_page / size);
0f346074 2602 if (!now)
de7d789a 2603 now = 1;
de7d789a
CO
2604 if (down) {
2605 /*
2606 * String I/O in reverse. Yuck. Kill the guest, fix later.
2607 */
2608 pr_unimpl(vcpu, "guest string pio down\n");
c1a5d4f9 2609 kvm_inject_gp(vcpu, 0);
de7d789a
CO
2610 return 1;
2611 }
2612 vcpu->run->io.count = now;
ad312c7c 2613 vcpu->arch.pio.cur_count = now;
de7d789a 2614
ad312c7c 2615 if (vcpu->arch.pio.cur_count == vcpu->arch.pio.count)
de7d789a
CO
2616 kvm_x86_ops->skip_emulated_instruction(vcpu);
2617
0f346074 2618 vcpu->arch.pio.guest_gva = address;
de7d789a 2619
92760499
LV
2620 pio_dev = vcpu_find_pio_dev(vcpu, port,
2621 vcpu->arch.pio.cur_count,
2622 !vcpu->arch.pio.in);
ad312c7c 2623 if (!vcpu->arch.pio.in) {
de7d789a
CO
2624 /* string PIO write */
2625 ret = pio_copy_data(vcpu);
0f346074
IE
2626 if (ret == X86EMUL_PROPAGATE_FAULT) {
2627 kvm_inject_gp(vcpu, 0);
2628 return 1;
2629 }
2630 if (ret == 0 && pio_dev) {
de7d789a
CO
2631 pio_string_write(pio_dev, vcpu);
2632 complete_pio(vcpu);
ad312c7c 2633 if (vcpu->arch.pio.count == 0)
de7d789a
CO
2634 ret = 1;
2635 }
2636 } else if (pio_dev)
2637 pr_unimpl(vcpu, "no string pio read support yet, "
2638 "port %x size %d count %ld\n",
2639 port, size, count);
2640
2641 return ret;
2642}
2643EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
2644
f8c16bba 2645int kvm_arch_init(void *opaque)
043405e1 2646{
56c6d28a 2647 int r;
f8c16bba
ZX
2648 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
2649
f8c16bba
ZX
2650 if (kvm_x86_ops) {
2651 printk(KERN_ERR "kvm: already loaded the other module\n");
56c6d28a
ZX
2652 r = -EEXIST;
2653 goto out;
f8c16bba
ZX
2654 }
2655
2656 if (!ops->cpu_has_kvm_support()) {
2657 printk(KERN_ERR "kvm: no hardware support\n");
56c6d28a
ZX
2658 r = -EOPNOTSUPP;
2659 goto out;
f8c16bba
ZX
2660 }
2661 if (ops->disabled_by_bios()) {
2662 printk(KERN_ERR "kvm: disabled by bios\n");
56c6d28a
ZX
2663 r = -EOPNOTSUPP;
2664 goto out;
f8c16bba
ZX
2665 }
2666
97db56ce
AK
2667 r = kvm_mmu_module_init();
2668 if (r)
2669 goto out;
2670
2671 kvm_init_msr_list();
2672
f8c16bba 2673 kvm_x86_ops = ops;
56c6d28a 2674 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
7b52345e
SY
2675 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2676 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
64d4d521 2677 PT_DIRTY_MASK, PT64_NX_MASK, 0, 0);
f8c16bba 2678 return 0;
56c6d28a
ZX
2679
2680out:
56c6d28a 2681 return r;
043405e1 2682}
8776e519 2683
f8c16bba
ZX
2684void kvm_arch_exit(void)
2685{
2686 kvm_x86_ops = NULL;
56c6d28a
ZX
2687 kvm_mmu_module_exit();
2688}
f8c16bba 2689
8776e519
HB
2690int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2691{
2692 ++vcpu->stat.halt_exits;
2714d1d3 2693 KVMTRACE_0D(HLT, vcpu, handler);
8776e519 2694 if (irqchip_in_kernel(vcpu->kvm)) {
a4535290 2695 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
8776e519
HB
2696 return 1;
2697 } else {
2698 vcpu->run->exit_reason = KVM_EXIT_HLT;
2699 return 0;
2700 }
2701}
2702EXPORT_SYMBOL_GPL(kvm_emulate_halt);
2703
2f333bcb
MT
2704static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
2705 unsigned long a1)
2706{
2707 if (is_long_mode(vcpu))
2708 return a0;
2709 else
2710 return a0 | ((gpa_t)a1 << 32);
2711}
2712
8776e519
HB
2713int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
2714{
2715 unsigned long nr, a0, a1, a2, a3, ret;
2f333bcb 2716 int r = 1;
8776e519 2717
5fdbf976
MT
2718 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
2719 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
2720 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
2721 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
2722 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
8776e519 2723
2714d1d3
FEL
2724 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
2725
8776e519
HB
2726 if (!is_long_mode(vcpu)) {
2727 nr &= 0xFFFFFFFF;
2728 a0 &= 0xFFFFFFFF;
2729 a1 &= 0xFFFFFFFF;
2730 a2 &= 0xFFFFFFFF;
2731 a3 &= 0xFFFFFFFF;
2732 }
2733
2734 switch (nr) {
b93463aa
AK
2735 case KVM_HC_VAPIC_POLL_IRQ:
2736 ret = 0;
2737 break;
2f333bcb
MT
2738 case KVM_HC_MMU_OP:
2739 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
2740 break;
8776e519
HB
2741 default:
2742 ret = -KVM_ENOSYS;
2743 break;
2744 }
5fdbf976 2745 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
f11c3a8d 2746 ++vcpu->stat.hypercalls;
2f333bcb 2747 return r;
8776e519
HB
2748}
2749EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
2750
2751int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
2752{
2753 char instruction[3];
2754 int ret = 0;
5fdbf976 2755 unsigned long rip = kvm_rip_read(vcpu);
8776e519 2756
8776e519
HB
2757
2758 /*
2759 * Blow out the MMU to ensure that no other VCPU has an active mapping
2760 * to ensure that the updated hypercall appears atomically across all
2761 * VCPUs.
2762 */
2763 kvm_mmu_zap_all(vcpu->kvm);
2764
8776e519 2765 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5fdbf976 2766 if (emulator_write_emulated(rip, instruction, 3, vcpu)
8776e519
HB
2767 != X86EMUL_CONTINUE)
2768 ret = -EFAULT;
2769
8776e519
HB
2770 return ret;
2771}
2772
2773static u64 mk_cr_64(u64 curr_cr, u32 new_val)
2774{
2775 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
2776}
2777
2778void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2779{
2780 struct descriptor_table dt = { limit, base };
2781
2782 kvm_x86_ops->set_gdt(vcpu, &dt);
2783}
2784
2785void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
2786{
2787 struct descriptor_table dt = { limit, base };
2788
2789 kvm_x86_ops->set_idt(vcpu, &dt);
2790}
2791
2792void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
2793 unsigned long *rflags)
2794{
2d3ad1f4 2795 kvm_lmsw(vcpu, msw);
8776e519
HB
2796 *rflags = kvm_x86_ops->get_rflags(vcpu);
2797}
2798
2799unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
2800{
54e445ca
JR
2801 unsigned long value;
2802
8776e519
HB
2803 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
2804 switch (cr) {
2805 case 0:
54e445ca
JR
2806 value = vcpu->arch.cr0;
2807 break;
8776e519 2808 case 2:
54e445ca
JR
2809 value = vcpu->arch.cr2;
2810 break;
8776e519 2811 case 3:
54e445ca
JR
2812 value = vcpu->arch.cr3;
2813 break;
8776e519 2814 case 4:
54e445ca
JR
2815 value = vcpu->arch.cr4;
2816 break;
152ff9be 2817 case 8:
54e445ca
JR
2818 value = kvm_get_cr8(vcpu);
2819 break;
8776e519 2820 default:
b8688d51 2821 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2822 return 0;
2823 }
54e445ca
JR
2824 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
2825 (u32)((u64)value >> 32), handler);
2826
2827 return value;
8776e519
HB
2828}
2829
2830void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
2831 unsigned long *rflags)
2832{
54e445ca
JR
2833 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
2834 (u32)((u64)val >> 32), handler);
2835
8776e519
HB
2836 switch (cr) {
2837 case 0:
2d3ad1f4 2838 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
8776e519
HB
2839 *rflags = kvm_x86_ops->get_rflags(vcpu);
2840 break;
2841 case 2:
ad312c7c 2842 vcpu->arch.cr2 = val;
8776e519
HB
2843 break;
2844 case 3:
2d3ad1f4 2845 kvm_set_cr3(vcpu, val);
8776e519
HB
2846 break;
2847 case 4:
2d3ad1f4 2848 kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
8776e519 2849 break;
152ff9be 2850 case 8:
2d3ad1f4 2851 kvm_set_cr8(vcpu, val & 0xfUL);
152ff9be 2852 break;
8776e519 2853 default:
b8688d51 2854 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
8776e519
HB
2855 }
2856}
2857
07716717
DK
2858static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
2859{
ad312c7c
ZX
2860 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
2861 int j, nent = vcpu->arch.cpuid_nent;
07716717
DK
2862
2863 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
2864 /* when no next entry is found, the current entry[i] is reselected */
0fdf8e59 2865 for (j = i + 1; ; j = (j + 1) % nent) {
ad312c7c 2866 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
07716717
DK
2867 if (ej->function == e->function) {
2868 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2869 return j;
2870 }
2871 }
2872 return 0; /* silence gcc, even though control never reaches here */
2873}
2874
2875/* find an entry with matching function, matching index (if needed), and that
2876 * should be read next (if it's stateful) */
2877static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
2878 u32 function, u32 index)
2879{
2880 if (e->function != function)
2881 return 0;
2882 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
2883 return 0;
2884 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
19355475 2885 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
07716717
DK
2886 return 0;
2887 return 1;
2888}
2889
d8017474
AG
2890struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
2891 u32 function, u32 index)
8776e519
HB
2892{
2893 int i;
d8017474 2894 struct kvm_cpuid_entry2 *best = NULL;
8776e519 2895
ad312c7c 2896 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
d8017474
AG
2897 struct kvm_cpuid_entry2 *e;
2898
ad312c7c 2899 e = &vcpu->arch.cpuid_entries[i];
07716717
DK
2900 if (is_matching_cpuid_entry(e, function, index)) {
2901 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
2902 move_to_next_stateful_cpuid_entry(vcpu, i);
8776e519
HB
2903 best = e;
2904 break;
2905 }
2906 /*
2907 * Both basic or both extended?
2908 */
2909 if (((e->function ^ function) & 0x80000000) == 0)
2910 if (!best || e->function > best->function)
2911 best = e;
2912 }
d8017474
AG
2913 return best;
2914}
2915
2916void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
2917{
2918 u32 function, index;
2919 struct kvm_cpuid_entry2 *best;
2920
2921 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
2922 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
2923 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
2924 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
2925 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
2926 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
2927 best = kvm_find_cpuid_entry(vcpu, function, index);
8776e519 2928 if (best) {
5fdbf976
MT
2929 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
2930 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
2931 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
2932 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
8776e519 2933 }
8776e519 2934 kvm_x86_ops->skip_emulated_instruction(vcpu);
2714d1d3 2935 KVMTRACE_5D(CPUID, vcpu, function,
5fdbf976
MT
2936 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX),
2937 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX),
2938 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX),
2939 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler);
8776e519
HB
2940}
2941EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
d0752060 2942
b6c7a5dc
HB
2943/*
2944 * Check if userspace requested an interrupt window, and that the
2945 * interrupt window is open.
2946 *
2947 * No need to exit to userspace if we already have an interrupt queued.
2948 */
2949static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2950 struct kvm_run *kvm_run)
2951{
ad312c7c 2952 return (!vcpu->arch.irq_summary &&
b6c7a5dc 2953 kvm_run->request_interrupt_window &&
ad312c7c 2954 vcpu->arch.interrupt_window_open &&
b6c7a5dc
HB
2955 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2956}
2957
2958static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2959 struct kvm_run *kvm_run)
2960{
2961 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2d3ad1f4 2962 kvm_run->cr8 = kvm_get_cr8(vcpu);
b6c7a5dc 2963 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4531220b 2964 if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 2965 kvm_run->ready_for_interrupt_injection = 1;
4531220b 2966 else
b6c7a5dc 2967 kvm_run->ready_for_interrupt_injection =
ad312c7c
ZX
2968 (vcpu->arch.interrupt_window_open &&
2969 vcpu->arch.irq_summary == 0);
b6c7a5dc
HB
2970}
2971
b93463aa
AK
2972static void vapic_enter(struct kvm_vcpu *vcpu)
2973{
2974 struct kvm_lapic *apic = vcpu->arch.apic;
2975 struct page *page;
2976
2977 if (!apic || !apic->vapic_addr)
2978 return;
2979
2980 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
72dc67a6
IE
2981
2982 vcpu->arch.apic->vapic_page = page;
b93463aa
AK
2983}
2984
2985static void vapic_exit(struct kvm_vcpu *vcpu)
2986{
2987 struct kvm_lapic *apic = vcpu->arch.apic;
2988
2989 if (!apic || !apic->vapic_addr)
2990 return;
2991
f8b78fa3 2992 down_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2993 kvm_release_page_dirty(apic->vapic_page);
2994 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
f8b78fa3 2995 up_read(&vcpu->kvm->slots_lock);
b93463aa
AK
2996}
2997
d7690175 2998static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
b6c7a5dc
HB
2999{
3000 int r;
3001
2e53d63a
MT
3002 if (vcpu->requests)
3003 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
3004 kvm_mmu_unload(vcpu);
3005
b6c7a5dc
HB
3006 r = kvm_mmu_reload(vcpu);
3007 if (unlikely(r))
3008 goto out;
3009
2f52d58c
AK
3010 if (vcpu->requests) {
3011 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
2f599714 3012 __kvm_migrate_timers(vcpu);
4731d4c7
MT
3013 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
3014 kvm_mmu_sync_roots(vcpu);
d4acf7e7
MT
3015 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
3016 kvm_x86_ops->tlb_flush(vcpu);
b93463aa
AK
3017 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
3018 &vcpu->requests)) {
3019 kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
3020 r = 0;
3021 goto out;
3022 }
71c4dfaf
JR
3023 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
3024 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
3025 r = 0;
3026 goto out;
3027 }
2f52d58c 3028 }
b93463aa 3029
06e05645 3030 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
b6c7a5dc
HB
3031 kvm_inject_pending_timer_irqs(vcpu);
3032
3033 preempt_disable();
3034
3035 kvm_x86_ops->prepare_guest_switch(vcpu);
3036 kvm_load_guest_fpu(vcpu);
3037
3038 local_irq_disable();
3039
d7690175 3040 if (vcpu->requests || need_resched() || signal_pending(current)) {
6c142801
AK
3041 local_irq_enable();
3042 preempt_enable();
3043 r = 1;
3044 goto out;
3045 }
3046
e9571ed5
MT
3047 vcpu->guest_mode = 1;
3048 /*
3049 * Make sure that guest_mode assignment won't happen after
3050 * testing the pending IRQ vector bitmap.
3051 */
3052 smp_wmb();
3053
ad312c7c 3054 if (vcpu->arch.exception.pending)
298101da
AK
3055 __queue_exception(vcpu);
3056 else if (irqchip_in_kernel(vcpu->kvm))
b6c7a5dc 3057 kvm_x86_ops->inject_pending_irq(vcpu);
eb9774f0 3058 else
b6c7a5dc
HB
3059 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
3060
b93463aa
AK
3061 kvm_lapic_sync_to_vapic(vcpu);
3062
3200f405
MT
3063 up_read(&vcpu->kvm->slots_lock);
3064
b6c7a5dc
HB
3065 kvm_guest_enter();
3066
42dbaa5a
JK
3067 get_debugreg(vcpu->arch.host_dr6, 6);
3068 get_debugreg(vcpu->arch.host_dr7, 7);
3069 if (unlikely(vcpu->arch.switch_db_regs)) {
3070 get_debugreg(vcpu->arch.host_db[0], 0);
3071 get_debugreg(vcpu->arch.host_db[1], 1);
3072 get_debugreg(vcpu->arch.host_db[2], 2);
3073 get_debugreg(vcpu->arch.host_db[3], 3);
3074
3075 set_debugreg(0, 7);
3076 set_debugreg(vcpu->arch.eff_db[0], 0);
3077 set_debugreg(vcpu->arch.eff_db[1], 1);
3078 set_debugreg(vcpu->arch.eff_db[2], 2);
3079 set_debugreg(vcpu->arch.eff_db[3], 3);
3080 }
b6c7a5dc 3081
2714d1d3 3082 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
b6c7a5dc
HB
3083 kvm_x86_ops->run(vcpu, kvm_run);
3084
42dbaa5a
JK
3085 if (unlikely(vcpu->arch.switch_db_regs)) {
3086 set_debugreg(0, 7);
3087 set_debugreg(vcpu->arch.host_db[0], 0);
3088 set_debugreg(vcpu->arch.host_db[1], 1);
3089 set_debugreg(vcpu->arch.host_db[2], 2);
3090 set_debugreg(vcpu->arch.host_db[3], 3);
3091 }
3092 set_debugreg(vcpu->arch.host_dr6, 6);
3093 set_debugreg(vcpu->arch.host_dr7, 7);
3094
b6c7a5dc
HB
3095 vcpu->guest_mode = 0;
3096 local_irq_enable();
3097
3098 ++vcpu->stat.exits;
3099
3100 /*
3101 * We must have an instruction between local_irq_enable() and
3102 * kvm_guest_exit(), so the timer interrupt isn't delayed by
3103 * the interrupt shadow. The stat.exits increment will do nicely.
3104 * But we need to prevent reordering, hence this barrier():
3105 */
3106 barrier();
3107
3108 kvm_guest_exit();
3109
3110 preempt_enable();
3111
3200f405
MT
3112 down_read(&vcpu->kvm->slots_lock);
3113
b6c7a5dc
HB
3114 /*
3115 * Profile KVM exit RIPs:
3116 */
3117 if (unlikely(prof_on == KVM_PROFILING)) {
5fdbf976
MT
3118 unsigned long rip = kvm_rip_read(vcpu);
3119 profile_hit(KVM_PROFILING, (void *)rip);
b6c7a5dc
HB
3120 }
3121
ad312c7c
ZX
3122 if (vcpu->arch.exception.pending && kvm_x86_ops->exception_injected(vcpu))
3123 vcpu->arch.exception.pending = false;
298101da 3124
b93463aa
AK
3125 kvm_lapic_sync_from_vapic(vcpu);
3126
b6c7a5dc 3127 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
d7690175
MT
3128out:
3129 return r;
3130}
b6c7a5dc 3131
d7690175
MT
3132static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3133{
3134 int r;
3135
3136 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
1b10bf31
JK
3137 pr_debug("vcpu %d received sipi with vector # %x\n",
3138 vcpu->vcpu_id, vcpu->arch.sipi_vector);
d7690175 3139 kvm_lapic_reset(vcpu);
5f179287 3140 r = kvm_arch_vcpu_reset(vcpu);
d7690175
MT
3141 if (r)
3142 return r;
3143 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
b6c7a5dc
HB
3144 }
3145
d7690175
MT
3146 down_read(&vcpu->kvm->slots_lock);
3147 vapic_enter(vcpu);
3148
3149 r = 1;
3150 while (r > 0) {
af2152f5 3151 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
d7690175
MT
3152 r = vcpu_enter_guest(vcpu, kvm_run);
3153 else {
3154 up_read(&vcpu->kvm->slots_lock);
3155 kvm_vcpu_block(vcpu);
3156 down_read(&vcpu->kvm->slots_lock);
3157 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
3158 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
3159 vcpu->arch.mp_state =
3160 KVM_MP_STATE_RUNNABLE;
3161 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
3162 r = -EINTR;
3163 }
3164
3165 if (r > 0) {
3166 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
3167 r = -EINTR;
3168 kvm_run->exit_reason = KVM_EXIT_INTR;
3169 ++vcpu->stat.request_irq_exits;
3170 }
3171 if (signal_pending(current)) {
3172 r = -EINTR;
3173 kvm_run->exit_reason = KVM_EXIT_INTR;
3174 ++vcpu->stat.signal_exits;
3175 }
3176 if (need_resched()) {
3177 up_read(&vcpu->kvm->slots_lock);
3178 kvm_resched(vcpu);
3179 down_read(&vcpu->kvm->slots_lock);
3180 }
3181 }
b6c7a5dc
HB
3182 }
3183
d7690175 3184 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3185 post_kvm_run_save(vcpu, kvm_run);
3186
b93463aa
AK
3187 vapic_exit(vcpu);
3188
b6c7a5dc
HB
3189 return r;
3190}
3191
3192int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3193{
3194 int r;
3195 sigset_t sigsaved;
3196
3197 vcpu_load(vcpu);
3198
ac9f6dc0
AK
3199 if (vcpu->sigset_active)
3200 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3201
a4535290 3202 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
b6c7a5dc 3203 kvm_vcpu_block(vcpu);
d7690175 3204 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
ac9f6dc0
AK
3205 r = -EAGAIN;
3206 goto out;
b6c7a5dc
HB
3207 }
3208
b6c7a5dc
HB
3209 /* re-sync apic's tpr */
3210 if (!irqchip_in_kernel(vcpu->kvm))
2d3ad1f4 3211 kvm_set_cr8(vcpu, kvm_run->cr8);
b6c7a5dc 3212
ad312c7c 3213 if (vcpu->arch.pio.cur_count) {
b6c7a5dc
HB
3214 r = complete_pio(vcpu);
3215 if (r)
3216 goto out;
3217 }
3218#if CONFIG_HAS_IOMEM
3219 if (vcpu->mmio_needed) {
3220 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
3221 vcpu->mmio_read_completed = 1;
3222 vcpu->mmio_needed = 0;
3200f405
MT
3223
3224 down_read(&vcpu->kvm->slots_lock);
b6c7a5dc 3225 r = emulate_instruction(vcpu, kvm_run,
571008da
SY
3226 vcpu->arch.mmio_fault_cr2, 0,
3227 EMULTYPE_NO_DECODE);
3200f405 3228 up_read(&vcpu->kvm->slots_lock);
b6c7a5dc
HB
3229 if (r == EMULATE_DO_MMIO) {
3230 /*
3231 * Read-modify-write. Back to userspace.
3232 */
3233 r = 0;
3234 goto out;
3235 }
3236 }
3237#endif
5fdbf976
MT
3238 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
3239 kvm_register_write(vcpu, VCPU_REGS_RAX,
3240 kvm_run->hypercall.ret);
b6c7a5dc
HB
3241
3242 r = __vcpu_run(vcpu, kvm_run);
3243
3244out:
3245 if (vcpu->sigset_active)
3246 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3247
3248 vcpu_put(vcpu);
3249 return r;
3250}
3251
3252int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3253{
3254 vcpu_load(vcpu);
3255
5fdbf976
MT
3256 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3257 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3258 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3259 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3260 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3261 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
3262 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3263 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
b6c7a5dc 3264#ifdef CONFIG_X86_64
5fdbf976
MT
3265 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
3266 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
3267 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
3268 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
3269 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
3270 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
3271 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
3272 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
b6c7a5dc
HB
3273#endif
3274
5fdbf976 3275 regs->rip = kvm_rip_read(vcpu);
b6c7a5dc
HB
3276 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
3277
3278 /*
3279 * Don't leak debug flags in case they were set for guest debugging
3280 */
d0bfb940 3281 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
b6c7a5dc
HB
3282 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
3283
3284 vcpu_put(vcpu);
3285
3286 return 0;
3287}
3288
3289int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3290{
3291 vcpu_load(vcpu);
3292
5fdbf976
MT
3293 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
3294 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
3295 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
3296 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
3297 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
3298 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
3299 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
3300 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
b6c7a5dc 3301#ifdef CONFIG_X86_64
5fdbf976
MT
3302 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
3303 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
3304 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
3305 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
3306 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
3307 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
3308 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
3309 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
3310
b6c7a5dc
HB
3311#endif
3312
5fdbf976 3313 kvm_rip_write(vcpu, regs->rip);
b6c7a5dc
HB
3314 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
3315
b6c7a5dc 3316
b4f14abd
JK
3317 vcpu->arch.exception.pending = false;
3318
b6c7a5dc
HB
3319 vcpu_put(vcpu);
3320
3321 return 0;
3322}
3323
3e6e0aab
GT
3324void kvm_get_segment(struct kvm_vcpu *vcpu,
3325 struct kvm_segment *var, int seg)
b6c7a5dc 3326{
14af3f3c 3327 kvm_x86_ops->get_segment(vcpu, var, seg);
b6c7a5dc
HB
3328}
3329
3330void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3331{
3332 struct kvm_segment cs;
3333
3e6e0aab 3334 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
b6c7a5dc
HB
3335 *db = cs.db;
3336 *l = cs.l;
3337}
3338EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
3339
3340int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3341 struct kvm_sregs *sregs)
3342{
3343 struct descriptor_table dt;
3344 int pending_vec;
3345
3346 vcpu_load(vcpu);
3347
3e6e0aab
GT
3348 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3349 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3350 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3351 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3352 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3353 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3354
3e6e0aab
GT
3355 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3356 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc
HB
3357
3358 kvm_x86_ops->get_idt(vcpu, &dt);
3359 sregs->idt.limit = dt.limit;
3360 sregs->idt.base = dt.base;
3361 kvm_x86_ops->get_gdt(vcpu, &dt);
3362 sregs->gdt.limit = dt.limit;
3363 sregs->gdt.base = dt.base;
3364
3365 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
ad312c7c
ZX
3366 sregs->cr0 = vcpu->arch.cr0;
3367 sregs->cr2 = vcpu->arch.cr2;
3368 sregs->cr3 = vcpu->arch.cr3;
3369 sregs->cr4 = vcpu->arch.cr4;
2d3ad1f4 3370 sregs->cr8 = kvm_get_cr8(vcpu);
ad312c7c 3371 sregs->efer = vcpu->arch.shadow_efer;
b6c7a5dc
HB
3372 sregs->apic_base = kvm_get_apic_base(vcpu);
3373
3374 if (irqchip_in_kernel(vcpu->kvm)) {
3375 memset(sregs->interrupt_bitmap, 0,
3376 sizeof sregs->interrupt_bitmap);
3377 pending_vec = kvm_x86_ops->get_irq(vcpu);
3378 if (pending_vec >= 0)
3379 set_bit(pending_vec,
3380 (unsigned long *)sregs->interrupt_bitmap);
3381 } else
ad312c7c 3382 memcpy(sregs->interrupt_bitmap, vcpu->arch.irq_pending,
b6c7a5dc
HB
3383 sizeof sregs->interrupt_bitmap);
3384
3385 vcpu_put(vcpu);
3386
3387 return 0;
3388}
3389
62d9f0db
MT
3390int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3391 struct kvm_mp_state *mp_state)
3392{
3393 vcpu_load(vcpu);
3394 mp_state->mp_state = vcpu->arch.mp_state;
3395 vcpu_put(vcpu);
3396 return 0;
3397}
3398
3399int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3400 struct kvm_mp_state *mp_state)
3401{
3402 vcpu_load(vcpu);
3403 vcpu->arch.mp_state = mp_state->mp_state;
3404 vcpu_put(vcpu);
3405 return 0;
3406}
3407
3e6e0aab 3408static void kvm_set_segment(struct kvm_vcpu *vcpu,
b6c7a5dc
HB
3409 struct kvm_segment *var, int seg)
3410{
14af3f3c 3411 kvm_x86_ops->set_segment(vcpu, var, seg);
b6c7a5dc
HB
3412}
3413
37817f29
IE
3414static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
3415 struct kvm_segment *kvm_desct)
3416{
3417 kvm_desct->base = seg_desc->base0;
3418 kvm_desct->base |= seg_desc->base1 << 16;
3419 kvm_desct->base |= seg_desc->base2 << 24;
3420 kvm_desct->limit = seg_desc->limit0;
3421 kvm_desct->limit |= seg_desc->limit << 16;
c93cd3a5
MT
3422 if (seg_desc->g) {
3423 kvm_desct->limit <<= 12;
3424 kvm_desct->limit |= 0xfff;
3425 }
37817f29
IE
3426 kvm_desct->selector = selector;
3427 kvm_desct->type = seg_desc->type;
3428 kvm_desct->present = seg_desc->p;
3429 kvm_desct->dpl = seg_desc->dpl;
3430 kvm_desct->db = seg_desc->d;
3431 kvm_desct->s = seg_desc->s;
3432 kvm_desct->l = seg_desc->l;
3433 kvm_desct->g = seg_desc->g;
3434 kvm_desct->avl = seg_desc->avl;
3435 if (!selector)
3436 kvm_desct->unusable = 1;
3437 else
3438 kvm_desct->unusable = 0;
3439 kvm_desct->padding = 0;
3440}
3441
b8222ad2
AS
3442static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
3443 u16 selector,
3444 struct descriptor_table *dtable)
37817f29
IE
3445{
3446 if (selector & 1 << 2) {
3447 struct kvm_segment kvm_seg;
3448
3e6e0aab 3449 kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
37817f29
IE
3450
3451 if (kvm_seg.unusable)
3452 dtable->limit = 0;
3453 else
3454 dtable->limit = kvm_seg.limit;
3455 dtable->base = kvm_seg.base;
3456 }
3457 else
3458 kvm_x86_ops->get_gdt(vcpu, dtable);
3459}
3460
3461/* allowed just for 8 bytes segments */
3462static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3463 struct desc_struct *seg_desc)
3464{
98899aa0 3465 gpa_t gpa;
37817f29
IE
3466 struct descriptor_table dtable;
3467 u16 index = selector >> 3;
3468
b8222ad2 3469 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3470
3471 if (dtable.limit < index * 8 + 7) {
3472 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
3473 return 1;
3474 }
98899aa0
MT
3475 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3476 gpa += index * 8;
3477 return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3478}
3479
3480/* allowed just for 8 bytes segments */
3481static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3482 struct desc_struct *seg_desc)
3483{
98899aa0 3484 gpa_t gpa;
37817f29
IE
3485 struct descriptor_table dtable;
3486 u16 index = selector >> 3;
3487
b8222ad2 3488 get_segment_descriptor_dtable(vcpu, selector, &dtable);
37817f29
IE
3489
3490 if (dtable.limit < index * 8 + 7)
3491 return 1;
98899aa0
MT
3492 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
3493 gpa += index * 8;
3494 return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
37817f29
IE
3495}
3496
3497static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
3498 struct desc_struct *seg_desc)
3499{
3500 u32 base_addr;
3501
3502 base_addr = seg_desc->base0;
3503 base_addr |= (seg_desc->base1 << 16);
3504 base_addr |= (seg_desc->base2 << 24);
3505
98899aa0 3506 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
37817f29
IE
3507}
3508
37817f29
IE
3509static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
3510{
3511 struct kvm_segment kvm_seg;
3512
3e6e0aab 3513 kvm_get_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3514 return kvm_seg.selector;
3515}
3516
3517static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
3518 u16 selector,
3519 struct kvm_segment *kvm_seg)
3520{
3521 struct desc_struct seg_desc;
3522
3523 if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
3524 return 1;
3525 seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
3526 return 0;
3527}
3528
2259e3a7 3529static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
f4bbd9aa
AK
3530{
3531 struct kvm_segment segvar = {
3532 .base = selector << 4,
3533 .limit = 0xffff,
3534 .selector = selector,
3535 .type = 3,
3536 .present = 1,
3537 .dpl = 3,
3538 .db = 0,
3539 .s = 1,
3540 .l = 0,
3541 .g = 0,
3542 .avl = 0,
3543 .unusable = 0,
3544 };
3545 kvm_x86_ops->set_segment(vcpu, &segvar, seg);
3546 return 0;
3547}
3548
3e6e0aab
GT
3549int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
3550 int type_bits, int seg)
37817f29
IE
3551{
3552 struct kvm_segment kvm_seg;
3553
f4bbd9aa
AK
3554 if (!(vcpu->arch.cr0 & X86_CR0_PE))
3555 return kvm_load_realmode_segment(vcpu, selector, seg);
37817f29
IE
3556 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
3557 return 1;
3558 kvm_seg.type |= type_bits;
3559
3560 if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
3561 seg != VCPU_SREG_LDTR)
3562 if (!kvm_seg.s)
3563 kvm_seg.unusable = 1;
3564
3e6e0aab 3565 kvm_set_segment(vcpu, &kvm_seg, seg);
37817f29
IE
3566 return 0;
3567}
3568
3569static void save_state_to_tss32(struct kvm_vcpu *vcpu,
3570 struct tss_segment_32 *tss)
3571{
3572 tss->cr3 = vcpu->arch.cr3;
5fdbf976 3573 tss->eip = kvm_rip_read(vcpu);
37817f29 3574 tss->eflags = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3575 tss->eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3576 tss->ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3577 tss->edx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3578 tss->ebx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3579 tss->esp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3580 tss->ebp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3581 tss->esi = kvm_register_read(vcpu, VCPU_REGS_RSI);
3582 tss->edi = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3583 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3584 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3585 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3586 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3587 tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
3588 tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
3589 tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3590 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3591}
3592
3593static int load_state_from_tss32(struct kvm_vcpu *vcpu,
3594 struct tss_segment_32 *tss)
3595{
3596 kvm_set_cr3(vcpu, tss->cr3);
3597
5fdbf976 3598 kvm_rip_write(vcpu, tss->eip);
37817f29
IE
3599 kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
3600
5fdbf976
MT
3601 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->eax);
3602 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->ecx);
3603 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->edx);
3604 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->ebx);
3605 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->esp);
3606 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->ebp);
3607 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
3608 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
37817f29 3609
3e6e0aab 3610 if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
37817f29
IE
3611 return 1;
3612
3e6e0aab 3613 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3614 return 1;
3615
3e6e0aab 3616 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3617 return 1;
3618
3e6e0aab 3619 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3620 return 1;
3621
3e6e0aab 3622 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3623 return 1;
3624
3e6e0aab 3625 if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
37817f29
IE
3626 return 1;
3627
3e6e0aab 3628 if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
37817f29
IE
3629 return 1;
3630 return 0;
3631}
3632
3633static void save_state_to_tss16(struct kvm_vcpu *vcpu,
3634 struct tss_segment_16 *tss)
3635{
5fdbf976 3636 tss->ip = kvm_rip_read(vcpu);
37817f29 3637 tss->flag = kvm_x86_ops->get_rflags(vcpu);
5fdbf976
MT
3638 tss->ax = kvm_register_read(vcpu, VCPU_REGS_RAX);
3639 tss->cx = kvm_register_read(vcpu, VCPU_REGS_RCX);
3640 tss->dx = kvm_register_read(vcpu, VCPU_REGS_RDX);
3641 tss->bx = kvm_register_read(vcpu, VCPU_REGS_RBX);
3642 tss->sp = kvm_register_read(vcpu, VCPU_REGS_RSP);
3643 tss->bp = kvm_register_read(vcpu, VCPU_REGS_RBP);
3644 tss->si = kvm_register_read(vcpu, VCPU_REGS_RSI);
3645 tss->di = kvm_register_read(vcpu, VCPU_REGS_RDI);
37817f29
IE
3646
3647 tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
3648 tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
3649 tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
3650 tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
3651 tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
3652 tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
3653}
3654
3655static int load_state_from_tss16(struct kvm_vcpu *vcpu,
3656 struct tss_segment_16 *tss)
3657{
5fdbf976 3658 kvm_rip_write(vcpu, tss->ip);
37817f29 3659 kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
5fdbf976
MT
3660 kvm_register_write(vcpu, VCPU_REGS_RAX, tss->ax);
3661 kvm_register_write(vcpu, VCPU_REGS_RCX, tss->cx);
3662 kvm_register_write(vcpu, VCPU_REGS_RDX, tss->dx);
3663 kvm_register_write(vcpu, VCPU_REGS_RBX, tss->bx);
3664 kvm_register_write(vcpu, VCPU_REGS_RSP, tss->sp);
3665 kvm_register_write(vcpu, VCPU_REGS_RBP, tss->bp);
3666 kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
3667 kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
37817f29 3668
3e6e0aab 3669 if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
37817f29
IE
3670 return 1;
3671
3e6e0aab 3672 if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
37817f29
IE
3673 return 1;
3674
3e6e0aab 3675 if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
37817f29
IE
3676 return 1;
3677
3e6e0aab 3678 if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
37817f29
IE
3679 return 1;
3680
3e6e0aab 3681 if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
37817f29
IE
3682 return 1;
3683 return 0;
3684}
3685
8b2cf73c 3686static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
34198bf8 3687 u32 old_tss_base,
37817f29
IE
3688 struct desc_struct *nseg_desc)
3689{
3690 struct tss_segment_16 tss_segment_16;
3691 int ret = 0;
3692
34198bf8
MT
3693 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3694 sizeof tss_segment_16))
37817f29
IE
3695 goto out;
3696
3697 save_state_to_tss16(vcpu, &tss_segment_16);
37817f29 3698
34198bf8
MT
3699 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
3700 sizeof tss_segment_16))
37817f29 3701 goto out;
34198bf8
MT
3702
3703 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3704 &tss_segment_16, sizeof tss_segment_16))
3705 goto out;
3706
37817f29
IE
3707 if (load_state_from_tss16(vcpu, &tss_segment_16))
3708 goto out;
3709
3710 ret = 1;
3711out:
3712 return ret;
3713}
3714
8b2cf73c 3715static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
34198bf8 3716 u32 old_tss_base,
37817f29
IE
3717 struct desc_struct *nseg_desc)
3718{
3719 struct tss_segment_32 tss_segment_32;
3720 int ret = 0;
3721
34198bf8
MT
3722 if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3723 sizeof tss_segment_32))
37817f29
IE
3724 goto out;
3725
3726 save_state_to_tss32(vcpu, &tss_segment_32);
37817f29 3727
34198bf8
MT
3728 if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
3729 sizeof tss_segment_32))
3730 goto out;
3731
3732 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
3733 &tss_segment_32, sizeof tss_segment_32))
37817f29 3734 goto out;
34198bf8 3735
37817f29
IE
3736 if (load_state_from_tss32(vcpu, &tss_segment_32))
3737 goto out;
3738
3739 ret = 1;
3740out:
3741 return ret;
3742}
3743
3744int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
3745{
3746 struct kvm_segment tr_seg;
3747 struct desc_struct cseg_desc;
3748 struct desc_struct nseg_desc;
3749 int ret = 0;
34198bf8
MT
3750 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
3751 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
37817f29 3752
34198bf8 3753 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
37817f29 3754
34198bf8
MT
3755 /* FIXME: Handle errors. Failure to read either TSS or their
3756 * descriptors should generate a pagefault.
3757 */
37817f29
IE
3758 if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
3759 goto out;
3760
34198bf8 3761 if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
37817f29
IE
3762 goto out;
3763
37817f29
IE
3764 if (reason != TASK_SWITCH_IRET) {
3765 int cpl;
3766
3767 cpl = kvm_x86_ops->get_cpl(vcpu);
3768 if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
3769 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
3770 return 1;
3771 }
3772 }
3773
3774 if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
3775 kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
3776 return 1;
3777 }
3778
3779 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
3fe913e7 3780 cseg_desc.type &= ~(1 << 1); //clear the B flag
34198bf8 3781 save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
37817f29
IE
3782 }
3783
3784 if (reason == TASK_SWITCH_IRET) {
3785 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3786 kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
3787 }
3788
3789 kvm_x86_ops->skip_emulated_instruction(vcpu);
37817f29
IE
3790
3791 if (nseg_desc.type & 8)
34198bf8 3792 ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
37817f29
IE
3793 &nseg_desc);
3794 else
34198bf8 3795 ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
37817f29
IE
3796 &nseg_desc);
3797
3798 if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
3799 u32 eflags = kvm_x86_ops->get_rflags(vcpu);
3800 kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
3801 }
3802
3803 if (reason != TASK_SWITCH_IRET) {
3fe913e7 3804 nseg_desc.type |= (1 << 1);
37817f29
IE
3805 save_guest_segment_descriptor(vcpu, tss_selector,
3806 &nseg_desc);
3807 }
3808
3809 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
3810 seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
3811 tr_seg.type = 11;
3e6e0aab 3812 kvm_set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
37817f29 3813out:
37817f29
IE
3814 return ret;
3815}
3816EXPORT_SYMBOL_GPL(kvm_task_switch);
3817
b6c7a5dc
HB
3818int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3819 struct kvm_sregs *sregs)
3820{
3821 int mmu_reset_needed = 0;
3822 int i, pending_vec, max_bits;
3823 struct descriptor_table dt;
3824
3825 vcpu_load(vcpu);
3826
3827 dt.limit = sregs->idt.limit;
3828 dt.base = sregs->idt.base;
3829 kvm_x86_ops->set_idt(vcpu, &dt);
3830 dt.limit = sregs->gdt.limit;
3831 dt.base = sregs->gdt.base;
3832 kvm_x86_ops->set_gdt(vcpu, &dt);
3833
ad312c7c
ZX
3834 vcpu->arch.cr2 = sregs->cr2;
3835 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
3836 vcpu->arch.cr3 = sregs->cr3;
b6c7a5dc 3837
2d3ad1f4 3838 kvm_set_cr8(vcpu, sregs->cr8);
b6c7a5dc 3839
ad312c7c 3840 mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
b6c7a5dc 3841 kvm_x86_ops->set_efer(vcpu, sregs->efer);
b6c7a5dc
HB
3842 kvm_set_apic_base(vcpu, sregs->apic_base);
3843
3844 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
3845
ad312c7c 3846 mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
b6c7a5dc 3847 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
d7306163 3848 vcpu->arch.cr0 = sregs->cr0;
b6c7a5dc 3849
ad312c7c 3850 mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
b6c7a5dc
HB
3851 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
3852 if (!is_long_mode(vcpu) && is_pae(vcpu))
ad312c7c 3853 load_pdptrs(vcpu, vcpu->arch.cr3);
b6c7a5dc
HB
3854
3855 if (mmu_reset_needed)
3856 kvm_mmu_reset_context(vcpu);
3857
3858 if (!irqchip_in_kernel(vcpu->kvm)) {
ad312c7c
ZX
3859 memcpy(vcpu->arch.irq_pending, sregs->interrupt_bitmap,
3860 sizeof vcpu->arch.irq_pending);
3861 vcpu->arch.irq_summary = 0;
3862 for (i = 0; i < ARRAY_SIZE(vcpu->arch.irq_pending); ++i)
3863 if (vcpu->arch.irq_pending[i])
3864 __set_bit(i, &vcpu->arch.irq_summary);
b6c7a5dc
HB
3865 } else {
3866 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
3867 pending_vec = find_first_bit(
3868 (const unsigned long *)sregs->interrupt_bitmap,
3869 max_bits);
3870 /* Only pending external irq is handled here */
3871 if (pending_vec < max_bits) {
3872 kvm_x86_ops->set_irq(vcpu, pending_vec);
3873 pr_debug("Set back pending irq %d\n",
3874 pending_vec);
3875 }
e4825800 3876 kvm_pic_clear_isr_ack(vcpu->kvm);
b6c7a5dc
HB
3877 }
3878
3e6e0aab
GT
3879 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
3880 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
3881 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
3882 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
3883 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
3884 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
b6c7a5dc 3885
3e6e0aab
GT
3886 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
3887 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
b6c7a5dc 3888
9c3e4aab
MT
3889 /* Older userspace won't unhalt the vcpu on reset. */
3890 if (vcpu->vcpu_id == 0 && kvm_rip_read(vcpu) == 0xfff0 &&
3891 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
3892 !(vcpu->arch.cr0 & X86_CR0_PE))
3893 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3894
b6c7a5dc
HB
3895 vcpu_put(vcpu);
3896
3897 return 0;
3898}
3899
d0bfb940
JK
3900int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3901 struct kvm_guest_debug *dbg)
b6c7a5dc 3902{
ae675ef0 3903 int i, r;
b6c7a5dc
HB
3904
3905 vcpu_load(vcpu);
3906
ae675ef0
JK
3907 if ((dbg->control & (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) ==
3908 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP)) {
3909 for (i = 0; i < KVM_NR_DB_REGS; ++i)
3910 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
3911 vcpu->arch.switch_db_regs =
3912 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
3913 } else {
3914 for (i = 0; i < KVM_NR_DB_REGS; i++)
3915 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
3916 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
3917 }
3918
b6c7a5dc
HB
3919 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
3920
d0bfb940
JK
3921 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
3922 kvm_queue_exception(vcpu, DB_VECTOR);
3923 else if (dbg->control & KVM_GUESTDBG_INJECT_BP)
3924 kvm_queue_exception(vcpu, BP_VECTOR);
3925
b6c7a5dc
HB
3926 vcpu_put(vcpu);
3927
3928 return r;
3929}
3930
d0752060
HB
3931/*
3932 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
3933 * we have asm/x86/processor.h
3934 */
3935struct fxsave {
3936 u16 cwd;
3937 u16 swd;
3938 u16 twd;
3939 u16 fop;
3940 u64 rip;
3941 u64 rdp;
3942 u32 mxcsr;
3943 u32 mxcsr_mask;
3944 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
3945#ifdef CONFIG_X86_64
3946 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
3947#else
3948 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
3949#endif
3950};
3951
8b006791
ZX
3952/*
3953 * Translate a guest virtual address to a guest physical address.
3954 */
3955int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3956 struct kvm_translation *tr)
3957{
3958 unsigned long vaddr = tr->linear_address;
3959 gpa_t gpa;
3960
3961 vcpu_load(vcpu);
72dc67a6 3962 down_read(&vcpu->kvm->slots_lock);
ad312c7c 3963 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
72dc67a6 3964 up_read(&vcpu->kvm->slots_lock);
8b006791
ZX
3965 tr->physical_address = gpa;
3966 tr->valid = gpa != UNMAPPED_GVA;
3967 tr->writeable = 1;
3968 tr->usermode = 0;
8b006791
ZX
3969 vcpu_put(vcpu);
3970
3971 return 0;
3972}
3973
d0752060
HB
3974int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3975{
ad312c7c 3976 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3977
3978 vcpu_load(vcpu);
3979
3980 memcpy(fpu->fpr, fxsave->st_space, 128);
3981 fpu->fcw = fxsave->cwd;
3982 fpu->fsw = fxsave->swd;
3983 fpu->ftwx = fxsave->twd;
3984 fpu->last_opcode = fxsave->fop;
3985 fpu->last_ip = fxsave->rip;
3986 fpu->last_dp = fxsave->rdp;
3987 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
3988
3989 vcpu_put(vcpu);
3990
3991 return 0;
3992}
3993
3994int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3995{
ad312c7c 3996 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
d0752060
HB
3997
3998 vcpu_load(vcpu);
3999
4000 memcpy(fxsave->st_space, fpu->fpr, 128);
4001 fxsave->cwd = fpu->fcw;
4002 fxsave->swd = fpu->fsw;
4003 fxsave->twd = fpu->ftwx;
4004 fxsave->fop = fpu->last_opcode;
4005 fxsave->rip = fpu->last_ip;
4006 fxsave->rdp = fpu->last_dp;
4007 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
4008
4009 vcpu_put(vcpu);
4010
4011 return 0;
4012}
4013
4014void fx_init(struct kvm_vcpu *vcpu)
4015{
4016 unsigned after_mxcsr_mask;
4017
bc1a34f1
AA
4018 /*
4019 * Touch the fpu the first time in non atomic context as if
4020 * this is the first fpu instruction the exception handler
4021 * will fire before the instruction returns and it'll have to
4022 * allocate ram with GFP_KERNEL.
4023 */
4024 if (!used_math())
d6e88aec 4025 kvm_fx_save(&vcpu->arch.host_fx_image);
bc1a34f1 4026
d0752060
HB
4027 /* Initialize guest FPU by resetting ours and saving into guest's */
4028 preempt_disable();
d6e88aec
AK
4029 kvm_fx_save(&vcpu->arch.host_fx_image);
4030 kvm_fx_finit();
4031 kvm_fx_save(&vcpu->arch.guest_fx_image);
4032 kvm_fx_restore(&vcpu->arch.host_fx_image);
d0752060
HB
4033 preempt_enable();
4034
ad312c7c 4035 vcpu->arch.cr0 |= X86_CR0_ET;
d0752060 4036 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
ad312c7c
ZX
4037 vcpu->arch.guest_fx_image.mxcsr = 0x1f80;
4038 memset((void *)&vcpu->arch.guest_fx_image + after_mxcsr_mask,
d0752060
HB
4039 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
4040}
4041EXPORT_SYMBOL_GPL(fx_init);
4042
4043void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
4044{
4045 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
4046 return;
4047
4048 vcpu->guest_fpu_loaded = 1;
d6e88aec
AK
4049 kvm_fx_save(&vcpu->arch.host_fx_image);
4050 kvm_fx_restore(&vcpu->arch.guest_fx_image);
d0752060
HB
4051}
4052EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
4053
4054void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
4055{
4056 if (!vcpu->guest_fpu_loaded)
4057 return;
4058
4059 vcpu->guest_fpu_loaded = 0;
d6e88aec
AK
4060 kvm_fx_save(&vcpu->arch.guest_fx_image);
4061 kvm_fx_restore(&vcpu->arch.host_fx_image);
f096ed85 4062 ++vcpu->stat.fpu_reload;
d0752060
HB
4063}
4064EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
e9b11c17
ZX
4065
4066void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
4067{
4068 kvm_x86_ops->vcpu_free(vcpu);
4069}
4070
4071struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
4072 unsigned int id)
4073{
26e5215f
AK
4074 return kvm_x86_ops->vcpu_create(kvm, id);
4075}
e9b11c17 4076
26e5215f
AK
4077int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
4078{
4079 int r;
e9b11c17
ZX
4080
4081 /* We do fxsave: this must be aligned. */
ad312c7c 4082 BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
e9b11c17 4083
0bed3b56 4084 vcpu->arch.mtrr_state.have_fixed = 1;
e9b11c17
ZX
4085 vcpu_load(vcpu);
4086 r = kvm_arch_vcpu_reset(vcpu);
4087 if (r == 0)
4088 r = kvm_mmu_setup(vcpu);
4089 vcpu_put(vcpu);
4090 if (r < 0)
4091 goto free_vcpu;
4092
26e5215f 4093 return 0;
e9b11c17
ZX
4094free_vcpu:
4095 kvm_x86_ops->vcpu_free(vcpu);
26e5215f 4096 return r;
e9b11c17
ZX
4097}
4098
d40ccc62 4099void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
e9b11c17
ZX
4100{
4101 vcpu_load(vcpu);
4102 kvm_mmu_unload(vcpu);
4103 vcpu_put(vcpu);
4104
4105 kvm_x86_ops->vcpu_free(vcpu);
4106}
4107
4108int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4109{
448fa4a9
JK
4110 vcpu->arch.nmi_pending = false;
4111 vcpu->arch.nmi_injected = false;
4112
42dbaa5a
JK
4113 vcpu->arch.switch_db_regs = 0;
4114 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4115 vcpu->arch.dr6 = DR6_FIXED_1;
4116 vcpu->arch.dr7 = DR7_FIXED_1;
4117
e9b11c17
ZX
4118 return kvm_x86_ops->vcpu_reset(vcpu);
4119}
4120
4121void kvm_arch_hardware_enable(void *garbage)
4122{
4123 kvm_x86_ops->hardware_enable(garbage);
4124}
4125
4126void kvm_arch_hardware_disable(void *garbage)
4127{
4128 kvm_x86_ops->hardware_disable(garbage);
4129}
4130
4131int kvm_arch_hardware_setup(void)
4132{
4133 return kvm_x86_ops->hardware_setup();
4134}
4135
4136void kvm_arch_hardware_unsetup(void)
4137{
4138 kvm_x86_ops->hardware_unsetup();
4139}
4140
4141void kvm_arch_check_processor_compat(void *rtn)
4142{
4143 kvm_x86_ops->check_processor_compatibility(rtn);
4144}
4145
4146int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
4147{
4148 struct page *page;
4149 struct kvm *kvm;
4150 int r;
4151
4152 BUG_ON(vcpu->kvm == NULL);
4153 kvm = vcpu->kvm;
4154
ad312c7c 4155 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
e9b11c17 4156 if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
a4535290 4157 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
e9b11c17 4158 else
a4535290 4159 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
e9b11c17
ZX
4160
4161 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
4162 if (!page) {
4163 r = -ENOMEM;
4164 goto fail;
4165 }
ad312c7c 4166 vcpu->arch.pio_data = page_address(page);
e9b11c17
ZX
4167
4168 r = kvm_mmu_create(vcpu);
4169 if (r < 0)
4170 goto fail_free_pio_data;
4171
4172 if (irqchip_in_kernel(kvm)) {
4173 r = kvm_create_lapic(vcpu);
4174 if (r < 0)
4175 goto fail_mmu_destroy;
4176 }
4177
4178 return 0;
4179
4180fail_mmu_destroy:
4181 kvm_mmu_destroy(vcpu);
4182fail_free_pio_data:
ad312c7c 4183 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17
ZX
4184fail:
4185 return r;
4186}
4187
4188void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
4189{
4190 kvm_free_lapic(vcpu);
3200f405 4191 down_read(&vcpu->kvm->slots_lock);
e9b11c17 4192 kvm_mmu_destroy(vcpu);
3200f405 4193 up_read(&vcpu->kvm->slots_lock);
ad312c7c 4194 free_page((unsigned long)vcpu->arch.pio_data);
e9b11c17 4195}
d19a9cd2
ZX
4196
4197struct kvm *kvm_arch_create_vm(void)
4198{
4199 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
4200
4201 if (!kvm)
4202 return ERR_PTR(-ENOMEM);
4203
f05e70ac 4204 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6cffe8ca 4205 INIT_LIST_HEAD(&kvm->arch.oos_global_pages);
4d5c5d0f 4206 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
d19a9cd2 4207
5550af4d
SY
4208 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
4209 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
4210
53f658b3
MT
4211 rdtscll(kvm->arch.vm_init_tsc);
4212
d19a9cd2
ZX
4213 return kvm;
4214}
4215
4216static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
4217{
4218 vcpu_load(vcpu);
4219 kvm_mmu_unload(vcpu);
4220 vcpu_put(vcpu);
4221}
4222
4223static void kvm_free_vcpus(struct kvm *kvm)
4224{
4225 unsigned int i;
4226
4227 /*
4228 * Unpin any mmu pages first.
4229 */
4230 for (i = 0; i < KVM_MAX_VCPUS; ++i)
4231 if (kvm->vcpus[i])
4232 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
4233 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
4234 if (kvm->vcpus[i]) {
4235 kvm_arch_vcpu_free(kvm->vcpus[i]);
4236 kvm->vcpus[i] = NULL;
4237 }
4238 }
4239
4240}
4241
ad8ba2cd
SY
4242void kvm_arch_sync_events(struct kvm *kvm)
4243{
ba4cef31 4244 kvm_free_all_assigned_devices(kvm);
ad8ba2cd
SY
4245}
4246
d19a9cd2
ZX
4247void kvm_arch_destroy_vm(struct kvm *kvm)
4248{
6eb55818 4249 kvm_iommu_unmap_guest(kvm);
7837699f 4250 kvm_free_pit(kvm);
d7deeeb0
ZX
4251 kfree(kvm->arch.vpic);
4252 kfree(kvm->arch.vioapic);
d19a9cd2
ZX
4253 kvm_free_vcpus(kvm);
4254 kvm_free_physmem(kvm);
3d45830c
AK
4255 if (kvm->arch.apic_access_page)
4256 put_page(kvm->arch.apic_access_page);
b7ebfb05
SY
4257 if (kvm->arch.ept_identity_pagetable)
4258 put_page(kvm->arch.ept_identity_pagetable);
d19a9cd2
ZX
4259 kfree(kvm);
4260}
0de10343
ZX
4261
4262int kvm_arch_set_memory_region(struct kvm *kvm,
4263 struct kvm_userspace_memory_region *mem,
4264 struct kvm_memory_slot old,
4265 int user_alloc)
4266{
4267 int npages = mem->memory_size >> PAGE_SHIFT;
4268 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
4269
4270 /*To keep backward compatibility with older userspace,
4271 *x86 needs to hanlde !user_alloc case.
4272 */
4273 if (!user_alloc) {
4274 if (npages && !old.rmap) {
604b38ac
AA
4275 unsigned long userspace_addr;
4276
72dc67a6 4277 down_write(&current->mm->mmap_sem);
604b38ac
AA
4278 userspace_addr = do_mmap(NULL, 0,
4279 npages * PAGE_SIZE,
4280 PROT_READ | PROT_WRITE,
acee3c04 4281 MAP_PRIVATE | MAP_ANONYMOUS,
604b38ac 4282 0);
72dc67a6 4283 up_write(&current->mm->mmap_sem);
0de10343 4284
604b38ac
AA
4285 if (IS_ERR((void *)userspace_addr))
4286 return PTR_ERR((void *)userspace_addr);
4287
4288 /* set userspace_addr atomically for kvm_hva_to_rmapp */
4289 spin_lock(&kvm->mmu_lock);
4290 memslot->userspace_addr = userspace_addr;
4291 spin_unlock(&kvm->mmu_lock);
0de10343
ZX
4292 } else {
4293 if (!old.user_alloc && old.rmap) {
4294 int ret;
4295
72dc67a6 4296 down_write(&current->mm->mmap_sem);
0de10343
ZX
4297 ret = do_munmap(current->mm, old.userspace_addr,
4298 old.npages * PAGE_SIZE);
72dc67a6 4299 up_write(&current->mm->mmap_sem);
0de10343
ZX
4300 if (ret < 0)
4301 printk(KERN_WARNING
4302 "kvm_vm_ioctl_set_memory_region: "
4303 "failed to munmap memory\n");
4304 }
4305 }
4306 }
4307
f05e70ac 4308 if (!kvm->arch.n_requested_mmu_pages) {
0de10343
ZX
4309 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
4310 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
4311 }
4312
4313 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
4314 kvm_flush_remote_tlbs(kvm);
4315
4316 return 0;
4317}
1d737c8a 4318
34d4cb8f
MT
4319void kvm_arch_flush_shadow(struct kvm *kvm)
4320{
4321 kvm_mmu_zap_all(kvm);
4322}
4323
1d737c8a
ZX
4324int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
4325{
a4535290 4326 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
0496fbb9
JK
4327 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
4328 || vcpu->arch.nmi_pending;
1d737c8a 4329}
5736199a
ZX
4330
4331static void vcpu_kick_intr(void *info)
4332{
4333#ifdef DEBUG
4334 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
4335 printk(KERN_DEBUG "vcpu_kick_intr %p \n", vcpu);
4336#endif
4337}
4338
4339void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
4340{
4341 int ipi_pcpu = vcpu->cpu;
e9571ed5 4342 int cpu = get_cpu();
5736199a
ZX
4343
4344 if (waitqueue_active(&vcpu->wq)) {
4345 wake_up_interruptible(&vcpu->wq);
4346 ++vcpu->stat.halt_wakeup;
4347 }
e9571ed5
MT
4348 /*
4349 * We may be called synchronously with irqs disabled in guest mode,
4350 * So need not to call smp_call_function_single() in that case.
4351 */
4352 if (vcpu->guest_mode && vcpu->cpu != cpu)
8691e5a8 4353 smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
e9571ed5 4354 put_cpu();
5736199a 4355}