KVM: remove duplicated task_switch check
[linux-block.git] / arch / x86 / kvm / vmx.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
85f455f7 18#include "irq.h"
1d737c8a 19#include "mmu.h"
e495606d 20
edf88417 21#include <linux/kvm_host.h>
6aa8b732 22#include <linux/module.h>
9d8f549d 23#include <linux/kernel.h>
6aa8b732
AK
24#include <linux/mm.h>
25#include <linux/highmem.h>
e8edc6e0 26#include <linux/sched.h>
c7addb90 27#include <linux/moduleparam.h>
229456fc 28#include <linux/ftrace_event.h>
5fdbf976 29#include "kvm_cache_regs.h"
35920a35 30#include "x86.h"
e495606d 31
6aa8b732 32#include <asm/io.h>
3b3be0d1 33#include <asm/desc.h>
13673a90 34#include <asm/vmx.h>
6210e37b 35#include <asm/virtext.h>
a0861c02 36#include <asm/mce.h>
6aa8b732 37
229456fc
MT
38#include "trace.h"
39
4ecac3fd
AK
40#define __ex(x) __kvm_handle_fault_on_reboot(x)
41
6aa8b732
AK
42MODULE_AUTHOR("Qumranet");
43MODULE_LICENSE("GPL");
44
4462d21a 45static int __read_mostly bypass_guest_pf = 1;
c1f8bc04 46module_param(bypass_guest_pf, bool, S_IRUGO);
c7addb90 47
4462d21a 48static int __read_mostly enable_vpid = 1;
736caefe 49module_param_named(vpid, enable_vpid, bool, 0444);
2384d2b3 50
4462d21a 51static int __read_mostly flexpriority_enabled = 1;
736caefe 52module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
4c9fc8ef 53
4462d21a 54static int __read_mostly enable_ept = 1;
736caefe 55module_param_named(ept, enable_ept, bool, S_IRUGO);
d56f546d 56
3a624e29
NK
57static int __read_mostly enable_unrestricted_guest = 1;
58module_param_named(unrestricted_guest,
59 enable_unrestricted_guest, bool, S_IRUGO);
60
4462d21a 61static int __read_mostly emulate_invalid_guest_state = 0;
c1f8bc04 62module_param(emulate_invalid_guest_state, bool, S_IRUGO);
04fa4d32 63
4b8d54f9
ZE
64/*
65 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
66 * ple_gap: upper bound on the amount of time between two successive
67 * executions of PAUSE in a loop. Also indicate if ple enabled.
68 * According to test, this time is usually small than 41 cycles.
69 * ple_window: upper bound on the amount of time a guest is allowed to execute
70 * in a PAUSE loop. Tests indicate that most spinlocks are held for
71 * less than 2^12 cycles
72 * Time is measured based on a counter that runs at the same rate as the TSC,
73 * refer SDM volume 3b section 21.6.13 & 22.1.3.
74 */
75#define KVM_VMX_DEFAULT_PLE_GAP 41
76#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
77static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
78module_param(ple_gap, int, S_IRUGO);
79
80static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
81module_param(ple_window, int, S_IRUGO);
82
a2fa3e9f
GH
83struct vmcs {
84 u32 revision_id;
85 u32 abort;
86 char data[0];
87};
88
26bb0981
AK
89struct shared_msr_entry {
90 unsigned index;
91 u64 data;
92};
93
a2fa3e9f 94struct vcpu_vmx {
fb3f0f51 95 struct kvm_vcpu vcpu;
543e4243 96 struct list_head local_vcpus_link;
313dbd49 97 unsigned long host_rsp;
a2fa3e9f 98 int launched;
29bd8a78 99 u8 fail;
1155f76a 100 u32 idt_vectoring_info;
26bb0981 101 struct shared_msr_entry *guest_msrs;
a2fa3e9f
GH
102 int nmsrs;
103 int save_nmsrs;
104 int msr_offset_efer;
105#ifdef CONFIG_X86_64
44ea2b17
AK
106 u64 msr_host_kernel_gs_base;
107 u64 msr_guest_kernel_gs_base;
a2fa3e9f
GH
108#endif
109 struct vmcs *vmcs;
110 struct {
111 int loaded;
112 u16 fs_sel, gs_sel, ldt_sel;
152d3f2f
LV
113 int gs_ldt_reload_needed;
114 int fs_reload_needed;
d77c26fc 115 } host_state;
9c8cba37 116 struct {
7ffd92c5
AK
117 int vm86_active;
118 u8 save_iopl;
119 struct kvm_save_segment {
120 u16 selector;
121 unsigned long base;
122 u32 limit;
123 u32 ar;
124 } tr, es, ds, fs, gs;
9c8cba37
AK
125 struct {
126 bool pending;
127 u8 vector;
128 unsigned rip;
129 } irq;
130 } rmode;
2384d2b3 131 int vpid;
04fa4d32 132 bool emulation_required;
3b86cd99
JK
133
134 /* Support for vnmi-less CPUs */
135 int soft_vnmi_blocked;
136 ktime_t entry_time;
137 s64 vnmi_blocked_time;
a0861c02 138 u32 exit_reason;
a2fa3e9f
GH
139};
140
141static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
142{
fb3f0f51 143 return container_of(vcpu, struct vcpu_vmx, vcpu);
a2fa3e9f
GH
144}
145
b7ebfb05 146static int init_rmode(struct kvm *kvm);
4e1096d2 147static u64 construct_eptp(unsigned long root_hpa);
75880a01 148
6aa8b732
AK
149static DEFINE_PER_CPU(struct vmcs *, vmxarea);
150static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
543e4243 151static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
6aa8b732 152
3e7c73e9
AK
153static unsigned long *vmx_io_bitmap_a;
154static unsigned long *vmx_io_bitmap_b;
5897297b
AK
155static unsigned long *vmx_msr_bitmap_legacy;
156static unsigned long *vmx_msr_bitmap_longmode;
fdef3ad1 157
2384d2b3
SY
158static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
159static DEFINE_SPINLOCK(vmx_vpid_lock);
160
1c3d14fe 161static struct vmcs_config {
6aa8b732
AK
162 int size;
163 int order;
164 u32 revision_id;
1c3d14fe
YS
165 u32 pin_based_exec_ctrl;
166 u32 cpu_based_exec_ctrl;
f78e0e2e 167 u32 cpu_based_2nd_exec_ctrl;
1c3d14fe
YS
168 u32 vmexit_ctrl;
169 u32 vmentry_ctrl;
170} vmcs_config;
6aa8b732 171
efff9e53 172static struct vmx_capability {
d56f546d
SY
173 u32 ept;
174 u32 vpid;
175} vmx_capability;
176
6aa8b732
AK
177#define VMX_SEGMENT_FIELD(seg) \
178 [VCPU_SREG_##seg] = { \
179 .selector = GUEST_##seg##_SELECTOR, \
180 .base = GUEST_##seg##_BASE, \
181 .limit = GUEST_##seg##_LIMIT, \
182 .ar_bytes = GUEST_##seg##_AR_BYTES, \
183 }
184
185static struct kvm_vmx_segment_field {
186 unsigned selector;
187 unsigned base;
188 unsigned limit;
189 unsigned ar_bytes;
190} kvm_vmx_segment_fields[] = {
191 VMX_SEGMENT_FIELD(CS),
192 VMX_SEGMENT_FIELD(DS),
193 VMX_SEGMENT_FIELD(ES),
194 VMX_SEGMENT_FIELD(FS),
195 VMX_SEGMENT_FIELD(GS),
196 VMX_SEGMENT_FIELD(SS),
197 VMX_SEGMENT_FIELD(TR),
198 VMX_SEGMENT_FIELD(LDTR),
199};
200
26bb0981
AK
201static u64 host_efer;
202
6de4f3ad
AK
203static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
204
4d56c8a7
AK
205/*
206 * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
207 * away by decrementing the array size.
208 */
6aa8b732 209static const u32 vmx_msr_index[] = {
05b3e0c2 210#ifdef CONFIG_X86_64
44ea2b17 211 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
6aa8b732
AK
212#endif
213 MSR_EFER, MSR_K6_STAR,
214};
9d8f549d 215#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
6aa8b732 216
6aa8b732
AK
217static inline int is_page_fault(u32 intr_info)
218{
219 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
220 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 221 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
6aa8b732
AK
222}
223
2ab455cc
AL
224static inline int is_no_device(u32 intr_info)
225{
226 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
227 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 228 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
2ab455cc
AL
229}
230
7aa81cc0
AL
231static inline int is_invalid_opcode(u32 intr_info)
232{
233 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
234 INTR_INFO_VALID_MASK)) ==
8ab2d2e2 235 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
7aa81cc0
AL
236}
237
6aa8b732
AK
238static inline int is_external_interrupt(u32 intr_info)
239{
240 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
241 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
242}
243
a0861c02
AK
244static inline int is_machine_check(u32 intr_info)
245{
246 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
247 INTR_INFO_VALID_MASK)) ==
248 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
249}
250
25c5f225
SY
251static inline int cpu_has_vmx_msr_bitmap(void)
252{
04547156 253 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
25c5f225
SY
254}
255
6e5d865c
YS
256static inline int cpu_has_vmx_tpr_shadow(void)
257{
04547156 258 return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
6e5d865c
YS
259}
260
261static inline int vm_need_tpr_shadow(struct kvm *kvm)
262{
04547156 263 return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
6e5d865c
YS
264}
265
f78e0e2e
SY
266static inline int cpu_has_secondary_exec_ctrls(void)
267{
04547156
SY
268 return vmcs_config.cpu_based_exec_ctrl &
269 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
f78e0e2e
SY
270}
271
774ead3a 272static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
f78e0e2e 273{
04547156
SY
274 return vmcs_config.cpu_based_2nd_exec_ctrl &
275 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
276}
277
278static inline bool cpu_has_vmx_flexpriority(void)
279{
280 return cpu_has_vmx_tpr_shadow() &&
281 cpu_has_vmx_virtualize_apic_accesses();
f78e0e2e
SY
282}
283
e799794e
MT
284static inline bool cpu_has_vmx_ept_execute_only(void)
285{
286 return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
287}
288
289static inline bool cpu_has_vmx_eptp_uncacheable(void)
290{
291 return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
292}
293
294static inline bool cpu_has_vmx_eptp_writeback(void)
295{
296 return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
297}
298
299static inline bool cpu_has_vmx_ept_2m_page(void)
300{
301 return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
302}
303
d56f546d
SY
304static inline int cpu_has_vmx_invept_individual_addr(void)
305{
04547156 306 return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
d56f546d
SY
307}
308
309static inline int cpu_has_vmx_invept_context(void)
310{
04547156 311 return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
d56f546d
SY
312}
313
314static inline int cpu_has_vmx_invept_global(void)
315{
04547156 316 return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
d56f546d
SY
317}
318
319static inline int cpu_has_vmx_ept(void)
320{
04547156
SY
321 return vmcs_config.cpu_based_2nd_exec_ctrl &
322 SECONDARY_EXEC_ENABLE_EPT;
d56f546d
SY
323}
324
3a624e29
NK
325static inline int cpu_has_vmx_unrestricted_guest(void)
326{
327 return vmcs_config.cpu_based_2nd_exec_ctrl &
328 SECONDARY_EXEC_UNRESTRICTED_GUEST;
329}
330
4b8d54f9
ZE
331static inline int cpu_has_vmx_ple(void)
332{
333 return vmcs_config.cpu_based_2nd_exec_ctrl &
334 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
335}
336
f78e0e2e
SY
337static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
338{
04547156
SY
339 return flexpriority_enabled &&
340 (cpu_has_vmx_virtualize_apic_accesses()) &&
341 (irqchip_in_kernel(kvm));
f78e0e2e
SY
342}
343
2384d2b3
SY
344static inline int cpu_has_vmx_vpid(void)
345{
04547156
SY
346 return vmcs_config.cpu_based_2nd_exec_ctrl &
347 SECONDARY_EXEC_ENABLE_VPID;
2384d2b3
SY
348}
349
f08864b4
SY
350static inline int cpu_has_virtual_nmis(void)
351{
352 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
353}
354
04547156
SY
355static inline bool report_flexpriority(void)
356{
357 return flexpriority_enabled;
358}
359
8b9cf98c 360static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
7725f0ba
AK
361{
362 int i;
363
a2fa3e9f 364 for (i = 0; i < vmx->nmsrs; ++i)
26bb0981 365 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
a75beee6
ED
366 return i;
367 return -1;
368}
369
2384d2b3
SY
370static inline void __invvpid(int ext, u16 vpid, gva_t gva)
371{
372 struct {
373 u64 vpid : 16;
374 u64 rsvd : 48;
375 u64 gva;
376 } operand = { vpid, 0, gva };
377
4ecac3fd 378 asm volatile (__ex(ASM_VMX_INVVPID)
2384d2b3
SY
379 /* CF==1 or ZF==1 --> rc = -1 */
380 "; ja 1f ; ud2 ; 1:"
381 : : "a"(&operand), "c"(ext) : "cc", "memory");
382}
383
1439442c
SY
384static inline void __invept(int ext, u64 eptp, gpa_t gpa)
385{
386 struct {
387 u64 eptp, gpa;
388 } operand = {eptp, gpa};
389
4ecac3fd 390 asm volatile (__ex(ASM_VMX_INVEPT)
1439442c
SY
391 /* CF==1 or ZF==1 --> rc = -1 */
392 "; ja 1f ; ud2 ; 1:\n"
393 : : "a" (&operand), "c" (ext) : "cc", "memory");
394}
395
26bb0981 396static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
a75beee6
ED
397{
398 int i;
399
8b9cf98c 400 i = __find_msr_index(vmx, msr);
a75beee6 401 if (i >= 0)
a2fa3e9f 402 return &vmx->guest_msrs[i];
8b6d44c7 403 return NULL;
7725f0ba
AK
404}
405
6aa8b732
AK
406static void vmcs_clear(struct vmcs *vmcs)
407{
408 u64 phys_addr = __pa(vmcs);
409 u8 error;
410
4ecac3fd 411 asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
6aa8b732
AK
412 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
413 : "cc", "memory");
414 if (error)
415 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
416 vmcs, phys_addr);
417}
418
419static void __vcpu_clear(void *arg)
420{
8b9cf98c 421 struct vcpu_vmx *vmx = arg;
d3b2c338 422 int cpu = raw_smp_processor_id();
6aa8b732 423
8b9cf98c 424 if (vmx->vcpu.cpu == cpu)
a2fa3e9f
GH
425 vmcs_clear(vmx->vmcs);
426 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
6aa8b732 427 per_cpu(current_vmcs, cpu) = NULL;
ad312c7c 428 rdtscll(vmx->vcpu.arch.host_tsc);
543e4243
AK
429 list_del(&vmx->local_vcpus_link);
430 vmx->vcpu.cpu = -1;
431 vmx->launched = 0;
6aa8b732
AK
432}
433
8b9cf98c 434static void vcpu_clear(struct vcpu_vmx *vmx)
8d0be2b3 435{
eae5ecb5
AK
436 if (vmx->vcpu.cpu == -1)
437 return;
8691e5a8 438 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
8d0be2b3
AK
439}
440
2384d2b3
SY
441static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
442{
443 if (vmx->vpid == 0)
444 return;
445
446 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
447}
448
1439442c
SY
449static inline void ept_sync_global(void)
450{
451 if (cpu_has_vmx_invept_global())
452 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
453}
454
455static inline void ept_sync_context(u64 eptp)
456{
089d034e 457 if (enable_ept) {
1439442c
SY
458 if (cpu_has_vmx_invept_context())
459 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
460 else
461 ept_sync_global();
462 }
463}
464
465static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
466{
089d034e 467 if (enable_ept) {
1439442c
SY
468 if (cpu_has_vmx_invept_individual_addr())
469 __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
470 eptp, gpa);
471 else
472 ept_sync_context(eptp);
473 }
474}
475
6aa8b732
AK
476static unsigned long vmcs_readl(unsigned long field)
477{
478 unsigned long value;
479
4ecac3fd 480 asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
6aa8b732
AK
481 : "=a"(value) : "d"(field) : "cc");
482 return value;
483}
484
485static u16 vmcs_read16(unsigned long field)
486{
487 return vmcs_readl(field);
488}
489
490static u32 vmcs_read32(unsigned long field)
491{
492 return vmcs_readl(field);
493}
494
495static u64 vmcs_read64(unsigned long field)
496{
05b3e0c2 497#ifdef CONFIG_X86_64
6aa8b732
AK
498 return vmcs_readl(field);
499#else
500 return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
501#endif
502}
503
e52de1b8
AK
504static noinline void vmwrite_error(unsigned long field, unsigned long value)
505{
506 printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
507 field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
508 dump_stack();
509}
510
6aa8b732
AK
511static void vmcs_writel(unsigned long field, unsigned long value)
512{
513 u8 error;
514
4ecac3fd 515 asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
d77c26fc 516 : "=q"(error) : "a"(value), "d"(field) : "cc");
e52de1b8
AK
517 if (unlikely(error))
518 vmwrite_error(field, value);
6aa8b732
AK
519}
520
521static void vmcs_write16(unsigned long field, u16 value)
522{
523 vmcs_writel(field, value);
524}
525
526static void vmcs_write32(unsigned long field, u32 value)
527{
528 vmcs_writel(field, value);
529}
530
531static void vmcs_write64(unsigned long field, u64 value)
532{
6aa8b732 533 vmcs_writel(field, value);
7682f2d0 534#ifndef CONFIG_X86_64
6aa8b732
AK
535 asm volatile ("");
536 vmcs_writel(field+1, value >> 32);
537#endif
538}
539
2ab455cc
AL
540static void vmcs_clear_bits(unsigned long field, u32 mask)
541{
542 vmcs_writel(field, vmcs_readl(field) & ~mask);
543}
544
545static void vmcs_set_bits(unsigned long field, u32 mask)
546{
547 vmcs_writel(field, vmcs_readl(field) | mask);
548}
549
abd3f2d6
AK
550static void update_exception_bitmap(struct kvm_vcpu *vcpu)
551{
552 u32 eb;
553
a0861c02 554 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
abd3f2d6
AK
555 if (!vcpu->fpu_active)
556 eb |= 1u << NM_VECTOR;
e8a48342
AK
557 /*
558 * Unconditionally intercept #DB so we can maintain dr6 without
559 * reading it every exit.
560 */
561 eb |= 1u << DB_VECTOR;
d0bfb940 562 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
d0bfb940
JK
563 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
564 eb |= 1u << BP_VECTOR;
565 }
7ffd92c5 566 if (to_vmx(vcpu)->rmode.vm86_active)
abd3f2d6 567 eb = ~0;
089d034e 568 if (enable_ept)
1439442c 569 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
abd3f2d6
AK
570 vmcs_write32(EXCEPTION_BITMAP, eb);
571}
572
33ed6329
AK
573static void reload_tss(void)
574{
33ed6329
AK
575 /*
576 * VT restores TR but not its size. Useless.
577 */
578 struct descriptor_table gdt;
a5f61300 579 struct desc_struct *descs;
33ed6329 580
d6e88aec 581 kvm_get_gdt(&gdt);
33ed6329
AK
582 descs = (void *)gdt.base;
583 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
584 load_TR_desc();
33ed6329
AK
585}
586
26bb0981 587static bool update_transition_efer(struct vcpu_vmx *vmx)
2cc51560 588{
a2fa3e9f 589 int efer_offset = vmx->msr_offset_efer;
3a34a881 590 u64 guest_efer;
51c6cf66
AK
591 u64 ignore_bits;
592
593 if (efer_offset < 0)
26bb0981
AK
594 return false;
595 guest_efer = vmx->vcpu.arch.shadow_efer;
3a34a881 596
51c6cf66
AK
597 /*
598 * NX is emulated; LMA and LME handled by hardware; SCE meaninless
599 * outside long mode
600 */
601 ignore_bits = EFER_NX | EFER_SCE;
602#ifdef CONFIG_X86_64
603 ignore_bits |= EFER_LMA | EFER_LME;
604 /* SCE is meaningful only in long mode on Intel */
605 if (guest_efer & EFER_LMA)
606 ignore_bits &= ~(u64)EFER_SCE;
607#endif
608 if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
26bb0981 609 return false;
2cc51560 610
51c6cf66
AK
611 guest_efer &= ~ignore_bits;
612 guest_efer |= host_efer & ignore_bits;
26bb0981
AK
613 vmx->guest_msrs[efer_offset].data = guest_efer;
614 return true;
51c6cf66
AK
615}
616
04d2cc77 617static void vmx_save_host_state(struct kvm_vcpu *vcpu)
33ed6329 618{
04d2cc77 619 struct vcpu_vmx *vmx = to_vmx(vcpu);
26bb0981 620 int i;
04d2cc77 621
a2fa3e9f 622 if (vmx->host_state.loaded)
33ed6329
AK
623 return;
624
a2fa3e9f 625 vmx->host_state.loaded = 1;
33ed6329
AK
626 /*
627 * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
628 * allow segment selectors with cpl > 0 or ti == 1.
629 */
d6e88aec 630 vmx->host_state.ldt_sel = kvm_read_ldt();
152d3f2f 631 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
d6e88aec 632 vmx->host_state.fs_sel = kvm_read_fs();
152d3f2f 633 if (!(vmx->host_state.fs_sel & 7)) {
a2fa3e9f 634 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
152d3f2f
LV
635 vmx->host_state.fs_reload_needed = 0;
636 } else {
33ed6329 637 vmcs_write16(HOST_FS_SELECTOR, 0);
152d3f2f 638 vmx->host_state.fs_reload_needed = 1;
33ed6329 639 }
d6e88aec 640 vmx->host_state.gs_sel = kvm_read_gs();
a2fa3e9f
GH
641 if (!(vmx->host_state.gs_sel & 7))
642 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
33ed6329
AK
643 else {
644 vmcs_write16(HOST_GS_SELECTOR, 0);
152d3f2f 645 vmx->host_state.gs_ldt_reload_needed = 1;
33ed6329
AK
646 }
647
648#ifdef CONFIG_X86_64
649 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
650 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
651#else
a2fa3e9f
GH
652 vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
653 vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
33ed6329 654#endif
707c0874
AK
655
656#ifdef CONFIG_X86_64
44ea2b17
AK
657 if (is_long_mode(&vmx->vcpu)) {
658 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
659 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
660 }
707c0874 661#endif
26bb0981
AK
662 for (i = 0; i < vmx->save_nmsrs; ++i)
663 kvm_set_shared_msr(vmx->guest_msrs[i].index,
664 vmx->guest_msrs[i].data);
33ed6329
AK
665}
666
a9b21b62 667static void __vmx_load_host_state(struct vcpu_vmx *vmx)
33ed6329 668{
15ad7146 669 unsigned long flags;
33ed6329 670
a2fa3e9f 671 if (!vmx->host_state.loaded)
33ed6329
AK
672 return;
673
e1beb1d3 674 ++vmx->vcpu.stat.host_state_reload;
a2fa3e9f 675 vmx->host_state.loaded = 0;
152d3f2f 676 if (vmx->host_state.fs_reload_needed)
d6e88aec 677 kvm_load_fs(vmx->host_state.fs_sel);
152d3f2f 678 if (vmx->host_state.gs_ldt_reload_needed) {
d6e88aec 679 kvm_load_ldt(vmx->host_state.ldt_sel);
33ed6329
AK
680 /*
681 * If we have to reload gs, we must take care to
682 * preserve our gs base.
683 */
15ad7146 684 local_irq_save(flags);
d6e88aec 685 kvm_load_gs(vmx->host_state.gs_sel);
33ed6329
AK
686#ifdef CONFIG_X86_64
687 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
688#endif
15ad7146 689 local_irq_restore(flags);
33ed6329 690 }
152d3f2f 691 reload_tss();
44ea2b17
AK
692#ifdef CONFIG_X86_64
693 if (is_long_mode(&vmx->vcpu)) {
694 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
695 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
696 }
697#endif
33ed6329
AK
698}
699
a9b21b62
AK
700static void vmx_load_host_state(struct vcpu_vmx *vmx)
701{
702 preempt_disable();
703 __vmx_load_host_state(vmx);
704 preempt_enable();
705}
706
6aa8b732
AK
707/*
708 * Switches to specified vcpu, until a matching vcpu_put(), but assumes
709 * vcpu mutex is already taken.
710 */
15ad7146 711static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 712{
a2fa3e9f
GH
713 struct vcpu_vmx *vmx = to_vmx(vcpu);
714 u64 phys_addr = __pa(vmx->vmcs);
019960ae 715 u64 tsc_this, delta, new_offset;
6aa8b732 716
a3d7f85f 717 if (vcpu->cpu != cpu) {
8b9cf98c 718 vcpu_clear(vmx);
2f599714 719 kvm_migrate_timers(vcpu);
eb5109e3 720 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
543e4243
AK
721 local_irq_disable();
722 list_add(&vmx->local_vcpus_link,
723 &per_cpu(vcpus_on_cpu, cpu));
724 local_irq_enable();
a3d7f85f 725 }
6aa8b732 726
a2fa3e9f 727 if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
6aa8b732
AK
728 u8 error;
729
a2fa3e9f 730 per_cpu(current_vmcs, cpu) = vmx->vmcs;
4ecac3fd 731 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
6aa8b732
AK
732 : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
733 : "cc");
734 if (error)
735 printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
a2fa3e9f 736 vmx->vmcs, phys_addr);
6aa8b732
AK
737 }
738
739 if (vcpu->cpu != cpu) {
740 struct descriptor_table dt;
741 unsigned long sysenter_esp;
742
743 vcpu->cpu = cpu;
744 /*
745 * Linux uses per-cpu TSS and GDT, so set these when switching
746 * processors.
747 */
d6e88aec
AK
748 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
749 kvm_get_gdt(&dt);
6aa8b732
AK
750 vmcs_writel(HOST_GDTR_BASE, dt.base); /* 22.2.4 */
751
752 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
753 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
7700270e
AK
754
755 /*
756 * Make sure the time stamp counter is monotonous.
757 */
758 rdtscll(tsc_this);
019960ae
AK
759 if (tsc_this < vcpu->arch.host_tsc) {
760 delta = vcpu->arch.host_tsc - tsc_this;
761 new_offset = vmcs_read64(TSC_OFFSET) + delta;
762 vmcs_write64(TSC_OFFSET, new_offset);
763 }
6aa8b732 764 }
6aa8b732
AK
765}
766
767static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
768{
a9b21b62 769 __vmx_load_host_state(to_vmx(vcpu));
6aa8b732
AK
770}
771
5fd86fcf
AK
772static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
773{
774 if (vcpu->fpu_active)
775 return;
776 vcpu->fpu_active = 1;
707d92fa 777 vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
ad312c7c 778 if (vcpu->arch.cr0 & X86_CR0_TS)
707d92fa 779 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
780 update_exception_bitmap(vcpu);
781}
782
783static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
784{
785 if (!vcpu->fpu_active)
786 return;
787 vcpu->fpu_active = 0;
707d92fa 788 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
5fd86fcf
AK
789 update_exception_bitmap(vcpu);
790}
791
6aa8b732
AK
792static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
793{
345dcaa8
AK
794 unsigned long rflags;
795
796 rflags = vmcs_readl(GUEST_RFLAGS);
797 if (to_vmx(vcpu)->rmode.vm86_active)
798 rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
799 return rflags;
6aa8b732
AK
800}
801
802static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
803{
7ffd92c5 804 if (to_vmx(vcpu)->rmode.vm86_active)
053de044 805 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
806 vmcs_writel(GUEST_RFLAGS, rflags);
807}
808
2809f5d2
GC
809static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
810{
811 u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
812 int ret = 0;
813
814 if (interruptibility & GUEST_INTR_STATE_STI)
815 ret |= X86_SHADOW_INT_STI;
816 if (interruptibility & GUEST_INTR_STATE_MOV_SS)
817 ret |= X86_SHADOW_INT_MOV_SS;
818
819 return ret & mask;
820}
821
822static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
823{
824 u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
825 u32 interruptibility = interruptibility_old;
826
827 interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
828
829 if (mask & X86_SHADOW_INT_MOV_SS)
830 interruptibility |= GUEST_INTR_STATE_MOV_SS;
831 if (mask & X86_SHADOW_INT_STI)
832 interruptibility |= GUEST_INTR_STATE_STI;
833
834 if ((interruptibility != interruptibility_old))
835 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
836}
837
6aa8b732
AK
838static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
839{
840 unsigned long rip;
6aa8b732 841
5fdbf976 842 rip = kvm_rip_read(vcpu);
6aa8b732 843 rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
5fdbf976 844 kvm_rip_write(vcpu, rip);
6aa8b732 845
2809f5d2
GC
846 /* skipping an emulated instruction also counts */
847 vmx_set_interrupt_shadow(vcpu, 0);
6aa8b732
AK
848}
849
298101da
AK
850static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
851 bool has_error_code, u32 error_code)
852{
77ab6db0 853 struct vcpu_vmx *vmx = to_vmx(vcpu);
8ab2d2e2 854 u32 intr_info = nr | INTR_INFO_VALID_MASK;
77ab6db0 855
8ab2d2e2 856 if (has_error_code) {
77ab6db0 857 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
8ab2d2e2
JK
858 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
859 }
77ab6db0 860
7ffd92c5 861 if (vmx->rmode.vm86_active) {
77ab6db0
JK
862 vmx->rmode.irq.pending = true;
863 vmx->rmode.irq.vector = nr;
864 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
ae0bb3e0
GN
865 if (kvm_exception_is_soft(nr))
866 vmx->rmode.irq.rip +=
867 vmx->vcpu.arch.event_exit_inst_len;
8ab2d2e2
JK
868 intr_info |= INTR_TYPE_SOFT_INTR;
869 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
77ab6db0
JK
870 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
871 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
872 return;
873 }
874
66fd3f7f
GN
875 if (kvm_exception_is_soft(nr)) {
876 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
877 vmx->vcpu.arch.event_exit_inst_len);
8ab2d2e2
JK
878 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
879 } else
880 intr_info |= INTR_TYPE_HARD_EXCEPTION;
881
882 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
298101da
AK
883}
884
a75beee6
ED
885/*
886 * Swap MSR entry in host/guest MSR entry array.
887 */
8b9cf98c 888static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
a75beee6 889{
26bb0981 890 struct shared_msr_entry tmp;
a2fa3e9f
GH
891
892 tmp = vmx->guest_msrs[to];
893 vmx->guest_msrs[to] = vmx->guest_msrs[from];
894 vmx->guest_msrs[from] = tmp;
a75beee6
ED
895}
896
e38aea3e
AK
897/*
898 * Set up the vmcs to automatically save and restore system
899 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
900 * mode, as fiddling with msrs is very expensive.
901 */
8b9cf98c 902static void setup_msrs(struct vcpu_vmx *vmx)
e38aea3e 903{
26bb0981 904 int save_nmsrs, index;
5897297b 905 unsigned long *msr_bitmap;
e38aea3e 906
33f9c505 907 vmx_load_host_state(vmx);
a75beee6
ED
908 save_nmsrs = 0;
909#ifdef CONFIG_X86_64
8b9cf98c 910 if (is_long_mode(&vmx->vcpu)) {
8b9cf98c 911 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
a75beee6 912 if (index >= 0)
8b9cf98c
RR
913 move_msr_up(vmx, index, save_nmsrs++);
914 index = __find_msr_index(vmx, MSR_LSTAR);
a75beee6 915 if (index >= 0)
8b9cf98c
RR
916 move_msr_up(vmx, index, save_nmsrs++);
917 index = __find_msr_index(vmx, MSR_CSTAR);
a75beee6 918 if (index >= 0)
8b9cf98c 919 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
920 /*
921 * MSR_K6_STAR is only needed on long mode guests, and only
922 * if efer.sce is enabled.
923 */
8b9cf98c 924 index = __find_msr_index(vmx, MSR_K6_STAR);
ad312c7c 925 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
8b9cf98c 926 move_msr_up(vmx, index, save_nmsrs++);
a75beee6
ED
927 }
928#endif
26bb0981
AK
929 vmx->msr_offset_efer = index = __find_msr_index(vmx, MSR_EFER);
930 if (index >= 0 && update_transition_efer(vmx))
931 move_msr_up(vmx, index, save_nmsrs++);
e38aea3e 932
26bb0981 933 vmx->save_nmsrs = save_nmsrs;
5897297b
AK
934
935 if (cpu_has_vmx_msr_bitmap()) {
936 if (is_long_mode(&vmx->vcpu))
937 msr_bitmap = vmx_msr_bitmap_longmode;
938 else
939 msr_bitmap = vmx_msr_bitmap_legacy;
940
941 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
942 }
e38aea3e
AK
943}
944
6aa8b732
AK
945/*
946 * reads and returns guest's timestamp counter "register"
947 * guest_tsc = host_tsc + tsc_offset -- 21.3
948 */
949static u64 guest_read_tsc(void)
950{
951 u64 host_tsc, tsc_offset;
952
953 rdtscll(host_tsc);
954 tsc_offset = vmcs_read64(TSC_OFFSET);
955 return host_tsc + tsc_offset;
956}
957
958/*
959 * writes 'guest_tsc' into guest's timestamp counter "register"
960 * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
961 */
53f658b3 962static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
6aa8b732 963{
6aa8b732
AK
964 vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
965}
966
6aa8b732
AK
967/*
968 * Reads an msr value (of 'msr_index') into 'pdata'.
969 * Returns 0 on success, non-0 otherwise.
970 * Assumes vcpu_load() was already called.
971 */
972static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
973{
974 u64 data;
26bb0981 975 struct shared_msr_entry *msr;
6aa8b732
AK
976
977 if (!pdata) {
978 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
979 return -EINVAL;
980 }
981
982 switch (msr_index) {
05b3e0c2 983#ifdef CONFIG_X86_64
6aa8b732
AK
984 case MSR_FS_BASE:
985 data = vmcs_readl(GUEST_FS_BASE);
986 break;
987 case MSR_GS_BASE:
988 data = vmcs_readl(GUEST_GS_BASE);
989 break;
44ea2b17
AK
990 case MSR_KERNEL_GS_BASE:
991 vmx_load_host_state(to_vmx(vcpu));
992 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
993 break;
26bb0981 994#endif
6aa8b732 995 case MSR_EFER:
3bab1f5d 996 return kvm_get_msr_common(vcpu, msr_index, pdata);
af24a4e4 997 case MSR_IA32_TSC:
6aa8b732
AK
998 data = guest_read_tsc();
999 break;
1000 case MSR_IA32_SYSENTER_CS:
1001 data = vmcs_read32(GUEST_SYSENTER_CS);
1002 break;
1003 case MSR_IA32_SYSENTER_EIP:
f5b42c33 1004 data = vmcs_readl(GUEST_SYSENTER_EIP);
6aa8b732
AK
1005 break;
1006 case MSR_IA32_SYSENTER_ESP:
f5b42c33 1007 data = vmcs_readl(GUEST_SYSENTER_ESP);
6aa8b732 1008 break;
6aa8b732 1009 default:
26bb0981 1010 vmx_load_host_state(to_vmx(vcpu));
8b9cf98c 1011 msr = find_msr_entry(to_vmx(vcpu), msr_index);
3bab1f5d 1012 if (msr) {
542423b0 1013 vmx_load_host_state(to_vmx(vcpu));
3bab1f5d
AK
1014 data = msr->data;
1015 break;
6aa8b732 1016 }
3bab1f5d 1017 return kvm_get_msr_common(vcpu, msr_index, pdata);
6aa8b732
AK
1018 }
1019
1020 *pdata = data;
1021 return 0;
1022}
1023
1024/*
1025 * Writes msr value into into the appropriate "register".
1026 * Returns 0 on success, non-0 otherwise.
1027 * Assumes vcpu_load() was already called.
1028 */
1029static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1030{
a2fa3e9f 1031 struct vcpu_vmx *vmx = to_vmx(vcpu);
26bb0981 1032 struct shared_msr_entry *msr;
53f658b3 1033 u64 host_tsc;
2cc51560
ED
1034 int ret = 0;
1035
6aa8b732 1036 switch (msr_index) {
3bab1f5d 1037 case MSR_EFER:
a9b21b62 1038 vmx_load_host_state(vmx);
2cc51560 1039 ret = kvm_set_msr_common(vcpu, msr_index, data);
2cc51560 1040 break;
16175a79 1041#ifdef CONFIG_X86_64
6aa8b732
AK
1042 case MSR_FS_BASE:
1043 vmcs_writel(GUEST_FS_BASE, data);
1044 break;
1045 case MSR_GS_BASE:
1046 vmcs_writel(GUEST_GS_BASE, data);
1047 break;
44ea2b17
AK
1048 case MSR_KERNEL_GS_BASE:
1049 vmx_load_host_state(vmx);
1050 vmx->msr_guest_kernel_gs_base = data;
1051 break;
6aa8b732
AK
1052#endif
1053 case MSR_IA32_SYSENTER_CS:
1054 vmcs_write32(GUEST_SYSENTER_CS, data);
1055 break;
1056 case MSR_IA32_SYSENTER_EIP:
f5b42c33 1057 vmcs_writel(GUEST_SYSENTER_EIP, data);
6aa8b732
AK
1058 break;
1059 case MSR_IA32_SYSENTER_ESP:
f5b42c33 1060 vmcs_writel(GUEST_SYSENTER_ESP, data);
6aa8b732 1061 break;
af24a4e4 1062 case MSR_IA32_TSC:
53f658b3
MT
1063 rdtscll(host_tsc);
1064 guest_write_tsc(data, host_tsc);
6aa8b732 1065 break;
468d472f
SY
1066 case MSR_IA32_CR_PAT:
1067 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1068 vmcs_write64(GUEST_IA32_PAT, data);
1069 vcpu->arch.pat = data;
1070 break;
1071 }
1072 /* Otherwise falls through to kvm_set_msr_common */
6aa8b732 1073 default:
8b9cf98c 1074 msr = find_msr_entry(vmx, msr_index);
3bab1f5d 1075 if (msr) {
542423b0 1076 vmx_load_host_state(vmx);
3bab1f5d
AK
1077 msr->data = data;
1078 break;
6aa8b732 1079 }
2cc51560 1080 ret = kvm_set_msr_common(vcpu, msr_index, data);
6aa8b732
AK
1081 }
1082
2cc51560 1083 return ret;
6aa8b732
AK
1084}
1085
5fdbf976 1086static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
6aa8b732 1087{
5fdbf976
MT
1088 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
1089 switch (reg) {
1090 case VCPU_REGS_RSP:
1091 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
1092 break;
1093 case VCPU_REGS_RIP:
1094 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1095 break;
6de4f3ad
AK
1096 case VCPU_EXREG_PDPTR:
1097 if (enable_ept)
1098 ept_save_pdptrs(vcpu);
1099 break;
5fdbf976
MT
1100 default:
1101 break;
1102 }
6aa8b732
AK
1103}
1104
355be0b9 1105static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
6aa8b732 1106{
ae675ef0
JK
1107 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1108 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1109 else
1110 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1111
abd3f2d6 1112 update_exception_bitmap(vcpu);
6aa8b732
AK
1113}
1114
1115static __init int cpu_has_kvm_support(void)
1116{
6210e37b 1117 return cpu_has_vmx();
6aa8b732
AK
1118}
1119
1120static __init int vmx_disabled_by_bios(void)
1121{
1122 u64 msr;
1123
1124 rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
9ea542fa
SY
1125 return (msr & (FEATURE_CONTROL_LOCKED |
1126 FEATURE_CONTROL_VMXON_ENABLED))
1127 == FEATURE_CONTROL_LOCKED;
62b3ffb8 1128 /* locked but not enabled */
6aa8b732
AK
1129}
1130
10474ae8 1131static int hardware_enable(void *garbage)
6aa8b732
AK
1132{
1133 int cpu = raw_smp_processor_id();
1134 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1135 u64 old;
1136
10474ae8
AG
1137 if (read_cr4() & X86_CR4_VMXE)
1138 return -EBUSY;
1139
543e4243 1140 INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
6aa8b732 1141 rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
9ea542fa
SY
1142 if ((old & (FEATURE_CONTROL_LOCKED |
1143 FEATURE_CONTROL_VMXON_ENABLED))
1144 != (FEATURE_CONTROL_LOCKED |
1145 FEATURE_CONTROL_VMXON_ENABLED))
6aa8b732 1146 /* enable and lock */
62b3ffb8 1147 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
9ea542fa
SY
1148 FEATURE_CONTROL_LOCKED |
1149 FEATURE_CONTROL_VMXON_ENABLED);
66aee91a 1150 write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
4ecac3fd
AK
1151 asm volatile (ASM_VMX_VMXON_RAX
1152 : : "a"(&phys_addr), "m"(phys_addr)
6aa8b732 1153 : "memory", "cc");
10474ae8
AG
1154
1155 ept_sync_global();
1156
1157 return 0;
6aa8b732
AK
1158}
1159
543e4243
AK
1160static void vmclear_local_vcpus(void)
1161{
1162 int cpu = raw_smp_processor_id();
1163 struct vcpu_vmx *vmx, *n;
1164
1165 list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
1166 local_vcpus_link)
1167 __vcpu_clear(vmx);
1168}
1169
710ff4a8
EH
1170
1171/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
1172 * tricks.
1173 */
1174static void kvm_cpu_vmxoff(void)
6aa8b732 1175{
4ecac3fd 1176 asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
e693d71b 1177 write_cr4(read_cr4() & ~X86_CR4_VMXE);
6aa8b732
AK
1178}
1179
710ff4a8
EH
1180static void hardware_disable(void *garbage)
1181{
1182 vmclear_local_vcpus();
1183 kvm_cpu_vmxoff();
1184}
1185
1c3d14fe 1186static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
d77c26fc 1187 u32 msr, u32 *result)
1c3d14fe
YS
1188{
1189 u32 vmx_msr_low, vmx_msr_high;
1190 u32 ctl = ctl_min | ctl_opt;
1191
1192 rdmsr(msr, vmx_msr_low, vmx_msr_high);
1193
1194 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
1195 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
1196
1197 /* Ensure minimum (required) set of control bits are supported. */
1198 if (ctl_min & ~ctl)
002c7f7c 1199 return -EIO;
1c3d14fe
YS
1200
1201 *result = ctl;
1202 return 0;
1203}
1204
002c7f7c 1205static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
6aa8b732
AK
1206{
1207 u32 vmx_msr_low, vmx_msr_high;
d56f546d 1208 u32 min, opt, min2, opt2;
1c3d14fe
YS
1209 u32 _pin_based_exec_control = 0;
1210 u32 _cpu_based_exec_control = 0;
f78e0e2e 1211 u32 _cpu_based_2nd_exec_control = 0;
1c3d14fe
YS
1212 u32 _vmexit_control = 0;
1213 u32 _vmentry_control = 0;
1214
1215 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
f08864b4 1216 opt = PIN_BASED_VIRTUAL_NMIS;
1c3d14fe
YS
1217 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1218 &_pin_based_exec_control) < 0)
002c7f7c 1219 return -EIO;
1c3d14fe
YS
1220
1221 min = CPU_BASED_HLT_EXITING |
1222#ifdef CONFIG_X86_64
1223 CPU_BASED_CR8_LOAD_EXITING |
1224 CPU_BASED_CR8_STORE_EXITING |
1225#endif
d56f546d
SY
1226 CPU_BASED_CR3_LOAD_EXITING |
1227 CPU_BASED_CR3_STORE_EXITING |
1c3d14fe
YS
1228 CPU_BASED_USE_IO_BITMAPS |
1229 CPU_BASED_MOV_DR_EXITING |
a7052897
MT
1230 CPU_BASED_USE_TSC_OFFSETING |
1231 CPU_BASED_INVLPG_EXITING;
f78e0e2e 1232 opt = CPU_BASED_TPR_SHADOW |
25c5f225 1233 CPU_BASED_USE_MSR_BITMAPS |
f78e0e2e 1234 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1c3d14fe
YS
1235 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1236 &_cpu_based_exec_control) < 0)
002c7f7c 1237 return -EIO;
6e5d865c
YS
1238#ifdef CONFIG_X86_64
1239 if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1240 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1241 ~CPU_BASED_CR8_STORE_EXITING;
1242#endif
f78e0e2e 1243 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
d56f546d
SY
1244 min2 = 0;
1245 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2384d2b3 1246 SECONDARY_EXEC_WBINVD_EXITING |
d56f546d 1247 SECONDARY_EXEC_ENABLE_VPID |
3a624e29 1248 SECONDARY_EXEC_ENABLE_EPT |
4b8d54f9
ZE
1249 SECONDARY_EXEC_UNRESTRICTED_GUEST |
1250 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
d56f546d
SY
1251 if (adjust_vmx_controls(min2, opt2,
1252 MSR_IA32_VMX_PROCBASED_CTLS2,
f78e0e2e
SY
1253 &_cpu_based_2nd_exec_control) < 0)
1254 return -EIO;
1255 }
1256#ifndef CONFIG_X86_64
1257 if (!(_cpu_based_2nd_exec_control &
1258 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1259 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1260#endif
d56f546d 1261 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
a7052897
MT
1262 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1263 enabled */
5fff7d27
GN
1264 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
1265 CPU_BASED_CR3_STORE_EXITING |
1266 CPU_BASED_INVLPG_EXITING);
d56f546d
SY
1267 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1268 vmx_capability.ept, vmx_capability.vpid);
1269 }
1c3d14fe
YS
1270
1271 min = 0;
1272#ifdef CONFIG_X86_64
1273 min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1274#endif
468d472f 1275 opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1c3d14fe
YS
1276 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1277 &_vmexit_control) < 0)
002c7f7c 1278 return -EIO;
1c3d14fe 1279
468d472f
SY
1280 min = 0;
1281 opt = VM_ENTRY_LOAD_IA32_PAT;
1c3d14fe
YS
1282 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1283 &_vmentry_control) < 0)
002c7f7c 1284 return -EIO;
6aa8b732 1285
c68876fd 1286 rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1c3d14fe
YS
1287
1288 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1289 if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
002c7f7c 1290 return -EIO;
1c3d14fe
YS
1291
1292#ifdef CONFIG_X86_64
1293 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1294 if (vmx_msr_high & (1u<<16))
002c7f7c 1295 return -EIO;
1c3d14fe
YS
1296#endif
1297
1298 /* Require Write-Back (WB) memory type for VMCS accesses. */
1299 if (((vmx_msr_high >> 18) & 15) != 6)
002c7f7c 1300 return -EIO;
1c3d14fe 1301
002c7f7c
YS
1302 vmcs_conf->size = vmx_msr_high & 0x1fff;
1303 vmcs_conf->order = get_order(vmcs_config.size);
1304 vmcs_conf->revision_id = vmx_msr_low;
1c3d14fe 1305
002c7f7c
YS
1306 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1307 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
f78e0e2e 1308 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
002c7f7c
YS
1309 vmcs_conf->vmexit_ctrl = _vmexit_control;
1310 vmcs_conf->vmentry_ctrl = _vmentry_control;
1c3d14fe
YS
1311
1312 return 0;
c68876fd 1313}
6aa8b732
AK
1314
1315static struct vmcs *alloc_vmcs_cpu(int cpu)
1316{
1317 int node = cpu_to_node(cpu);
1318 struct page *pages;
1319 struct vmcs *vmcs;
1320
6484eb3e 1321 pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
6aa8b732
AK
1322 if (!pages)
1323 return NULL;
1324 vmcs = page_address(pages);
1c3d14fe
YS
1325 memset(vmcs, 0, vmcs_config.size);
1326 vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
6aa8b732
AK
1327 return vmcs;
1328}
1329
1330static struct vmcs *alloc_vmcs(void)
1331{
d3b2c338 1332 return alloc_vmcs_cpu(raw_smp_processor_id());
6aa8b732
AK
1333}
1334
1335static void free_vmcs(struct vmcs *vmcs)
1336{
1c3d14fe 1337 free_pages((unsigned long)vmcs, vmcs_config.order);
6aa8b732
AK
1338}
1339
39959588 1340static void free_kvm_area(void)
6aa8b732
AK
1341{
1342 int cpu;
1343
3230bb47 1344 for_each_possible_cpu(cpu) {
6aa8b732 1345 free_vmcs(per_cpu(vmxarea, cpu));
3230bb47
ZA
1346 per_cpu(vmxarea, cpu) = NULL;
1347 }
6aa8b732
AK
1348}
1349
6aa8b732
AK
1350static __init int alloc_kvm_area(void)
1351{
1352 int cpu;
1353
3230bb47 1354 for_each_possible_cpu(cpu) {
6aa8b732
AK
1355 struct vmcs *vmcs;
1356
1357 vmcs = alloc_vmcs_cpu(cpu);
1358 if (!vmcs) {
1359 free_kvm_area();
1360 return -ENOMEM;
1361 }
1362
1363 per_cpu(vmxarea, cpu) = vmcs;
1364 }
1365 return 0;
1366}
1367
1368static __init int hardware_setup(void)
1369{
002c7f7c
YS
1370 if (setup_vmcs_config(&vmcs_config) < 0)
1371 return -EIO;
50a37eb4
JR
1372
1373 if (boot_cpu_has(X86_FEATURE_NX))
1374 kvm_enable_efer_bits(EFER_NX);
1375
93ba03c2
SY
1376 if (!cpu_has_vmx_vpid())
1377 enable_vpid = 0;
1378
3a624e29 1379 if (!cpu_has_vmx_ept()) {
93ba03c2 1380 enable_ept = 0;
3a624e29
NK
1381 enable_unrestricted_guest = 0;
1382 }
1383
1384 if (!cpu_has_vmx_unrestricted_guest())
1385 enable_unrestricted_guest = 0;
93ba03c2
SY
1386
1387 if (!cpu_has_vmx_flexpriority())
1388 flexpriority_enabled = 0;
1389
95ba8273
GN
1390 if (!cpu_has_vmx_tpr_shadow())
1391 kvm_x86_ops->update_cr8_intercept = NULL;
1392
54dee993
MT
1393 if (enable_ept && !cpu_has_vmx_ept_2m_page())
1394 kvm_disable_largepages();
1395
4b8d54f9
ZE
1396 if (!cpu_has_vmx_ple())
1397 ple_gap = 0;
1398
6aa8b732
AK
1399 return alloc_kvm_area();
1400}
1401
1402static __exit void hardware_unsetup(void)
1403{
1404 free_kvm_area();
1405}
1406
6aa8b732
AK
1407static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1408{
1409 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1410
6af11b9e 1411 if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
6aa8b732
AK
1412 vmcs_write16(sf->selector, save->selector);
1413 vmcs_writel(sf->base, save->base);
1414 vmcs_write32(sf->limit, save->limit);
1415 vmcs_write32(sf->ar_bytes, save->ar);
1416 } else {
1417 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1418 << AR_DPL_SHIFT;
1419 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1420 }
1421}
1422
1423static void enter_pmode(struct kvm_vcpu *vcpu)
1424{
1425 unsigned long flags;
a89a8fb9 1426 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 1427
a89a8fb9 1428 vmx->emulation_required = 1;
7ffd92c5 1429 vmx->rmode.vm86_active = 0;
6aa8b732 1430
7ffd92c5
AK
1431 vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1432 vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1433 vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
6aa8b732
AK
1434
1435 flags = vmcs_readl(GUEST_RFLAGS);
053de044 1436 flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
7ffd92c5 1437 flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
6aa8b732
AK
1438 vmcs_writel(GUEST_RFLAGS, flags);
1439
66aee91a
RR
1440 vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1441 (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
6aa8b732
AK
1442
1443 update_exception_bitmap(vcpu);
1444
a89a8fb9
MG
1445 if (emulate_invalid_guest_state)
1446 return;
1447
7ffd92c5
AK
1448 fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1449 fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1450 fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1451 fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
6aa8b732
AK
1452
1453 vmcs_write16(GUEST_SS_SELECTOR, 0);
1454 vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1455
1456 vmcs_write16(GUEST_CS_SELECTOR,
1457 vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1458 vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1459}
1460
d77c26fc 1461static gva_t rmode_tss_base(struct kvm *kvm)
6aa8b732 1462{
bfc6d222 1463 if (!kvm->arch.tss_addr) {
cbc94022
IE
1464 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1465 kvm->memslots[0].npages - 3;
1466 return base_gfn << PAGE_SHIFT;
1467 }
bfc6d222 1468 return kvm->arch.tss_addr;
6aa8b732
AK
1469}
1470
1471static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1472{
1473 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1474
1475 save->selector = vmcs_read16(sf->selector);
1476 save->base = vmcs_readl(sf->base);
1477 save->limit = vmcs_read32(sf->limit);
1478 save->ar = vmcs_read32(sf->ar_bytes);
15b00f32
JK
1479 vmcs_write16(sf->selector, save->base >> 4);
1480 vmcs_write32(sf->base, save->base & 0xfffff);
6aa8b732
AK
1481 vmcs_write32(sf->limit, 0xffff);
1482 vmcs_write32(sf->ar_bytes, 0xf3);
1483}
1484
1485static void enter_rmode(struct kvm_vcpu *vcpu)
1486{
1487 unsigned long flags;
a89a8fb9 1488 struct vcpu_vmx *vmx = to_vmx(vcpu);
6aa8b732 1489
3a624e29
NK
1490 if (enable_unrestricted_guest)
1491 return;
1492
a89a8fb9 1493 vmx->emulation_required = 1;
7ffd92c5 1494 vmx->rmode.vm86_active = 1;
6aa8b732 1495
7ffd92c5 1496 vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
6aa8b732
AK
1497 vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1498
7ffd92c5 1499 vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
6aa8b732
AK
1500 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1501
7ffd92c5 1502 vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
6aa8b732
AK
1503 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1504
1505 flags = vmcs_readl(GUEST_RFLAGS);
7ffd92c5 1506 vmx->rmode.save_iopl
ad312c7c 1507 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
6aa8b732 1508
053de044 1509 flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
6aa8b732
AK
1510
1511 vmcs_writel(GUEST_RFLAGS, flags);
66aee91a 1512 vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
6aa8b732
AK
1513 update_exception_bitmap(vcpu);
1514
a89a8fb9
MG
1515 if (emulate_invalid_guest_state)
1516 goto continue_rmode;
1517
6aa8b732
AK
1518 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1519 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1520 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1521
1522 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
abacf8df 1523 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
8cb5b033
AK
1524 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1525 vmcs_writel(GUEST_CS_BASE, 0xf0000);
6aa8b732
AK
1526 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1527
7ffd92c5
AK
1528 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1529 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1530 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1531 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
75880a01 1532
a89a8fb9 1533continue_rmode:
8668a3c4 1534 kvm_mmu_reset_context(vcpu);
b7ebfb05 1535 init_rmode(vcpu->kvm);
6aa8b732
AK
1536}
1537
401d10de
AS
1538static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1539{
1540 struct vcpu_vmx *vmx = to_vmx(vcpu);
26bb0981
AK
1541 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1542
1543 if (!msr)
1544 return;
401d10de 1545
44ea2b17
AK
1546 /*
1547 * Force kernel_gs_base reloading before EFER changes, as control
1548 * of this msr depends on is_long_mode().
1549 */
1550 vmx_load_host_state(to_vmx(vcpu));
401d10de
AS
1551 vcpu->arch.shadow_efer = efer;
1552 if (!msr)
1553 return;
1554 if (efer & EFER_LMA) {
1555 vmcs_write32(VM_ENTRY_CONTROLS,
1556 vmcs_read32(VM_ENTRY_CONTROLS) |
1557 VM_ENTRY_IA32E_MODE);
1558 msr->data = efer;
1559 } else {
1560 vmcs_write32(VM_ENTRY_CONTROLS,
1561 vmcs_read32(VM_ENTRY_CONTROLS) &
1562 ~VM_ENTRY_IA32E_MODE);
1563
1564 msr->data = efer & ~EFER_LME;
1565 }
1566 setup_msrs(vmx);
1567}
1568
05b3e0c2 1569#ifdef CONFIG_X86_64
6aa8b732
AK
1570
1571static void enter_lmode(struct kvm_vcpu *vcpu)
1572{
1573 u32 guest_tr_ar;
1574
1575 guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1576 if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1577 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
b8688d51 1578 __func__);
6aa8b732
AK
1579 vmcs_write32(GUEST_TR_AR_BYTES,
1580 (guest_tr_ar & ~AR_TYPE_MASK)
1581 | AR_TYPE_BUSY_64_TSS);
1582 }
ad312c7c 1583 vcpu->arch.shadow_efer |= EFER_LMA;
401d10de 1584 vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
6aa8b732
AK
1585}
1586
1587static void exit_lmode(struct kvm_vcpu *vcpu)
1588{
ad312c7c 1589 vcpu->arch.shadow_efer &= ~EFER_LMA;
6aa8b732
AK
1590
1591 vmcs_write32(VM_ENTRY_CONTROLS,
1592 vmcs_read32(VM_ENTRY_CONTROLS)
1e4e6e00 1593 & ~VM_ENTRY_IA32E_MODE);
6aa8b732
AK
1594}
1595
1596#endif
1597
2384d2b3
SY
1598static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1599{
1600 vpid_sync_vcpu_all(to_vmx(vcpu));
089d034e 1601 if (enable_ept)
4e1096d2 1602 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
2384d2b3
SY
1603}
1604
25c4c276 1605static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3 1606{
ad312c7c
ZX
1607 vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1608 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
399badf3
AK
1609}
1610
1439442c
SY
1611static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1612{
6de4f3ad
AK
1613 if (!test_bit(VCPU_EXREG_PDPTR,
1614 (unsigned long *)&vcpu->arch.regs_dirty))
1615 return;
1616
1439442c 1617 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1439442c
SY
1618 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
1619 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
1620 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
1621 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]);
1622 }
1623}
1624
8f5d549f
AK
1625static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1626{
1627 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1628 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1629 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1630 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1631 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1632 }
6de4f3ad
AK
1633
1634 __set_bit(VCPU_EXREG_PDPTR,
1635 (unsigned long *)&vcpu->arch.regs_avail);
1636 __set_bit(VCPU_EXREG_PDPTR,
1637 (unsigned long *)&vcpu->arch.regs_dirty);
8f5d549f
AK
1638}
1639
1439442c
SY
1640static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1641
1642static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1643 unsigned long cr0,
1644 struct kvm_vcpu *vcpu)
1645{
1646 if (!(cr0 & X86_CR0_PG)) {
1647 /* From paging/starting to nonpaging */
1648 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
65267ea1 1649 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
1439442c
SY
1650 (CPU_BASED_CR3_LOAD_EXITING |
1651 CPU_BASED_CR3_STORE_EXITING));
1652 vcpu->arch.cr0 = cr0;
1653 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1439442c
SY
1654 } else if (!is_paging(vcpu)) {
1655 /* From nonpaging to paging */
1656 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
65267ea1 1657 vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
1439442c
SY
1658 ~(CPU_BASED_CR3_LOAD_EXITING |
1659 CPU_BASED_CR3_STORE_EXITING));
1660 vcpu->arch.cr0 = cr0;
1661 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1439442c 1662 }
95eb84a7
SY
1663
1664 if (!(cr0 & X86_CR0_WP))
1665 *hw_cr0 &= ~X86_CR0_WP;
1439442c
SY
1666}
1667
1668static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1669 struct kvm_vcpu *vcpu)
1670{
1671 if (!is_paging(vcpu)) {
1672 *hw_cr4 &= ~X86_CR4_PAE;
1673 *hw_cr4 |= X86_CR4_PSE;
1674 } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
1675 *hw_cr4 &= ~X86_CR4_PAE;
1676}
1677
6aa8b732
AK
1678static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1679{
7ffd92c5 1680 struct vcpu_vmx *vmx = to_vmx(vcpu);
3a624e29
NK
1681 unsigned long hw_cr0;
1682
1683 if (enable_unrestricted_guest)
1684 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
1685 | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
1686 else
1687 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1439442c 1688
5fd86fcf
AK
1689 vmx_fpu_deactivate(vcpu);
1690
7ffd92c5 1691 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
6aa8b732
AK
1692 enter_pmode(vcpu);
1693
7ffd92c5 1694 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
6aa8b732
AK
1695 enter_rmode(vcpu);
1696
05b3e0c2 1697#ifdef CONFIG_X86_64
ad312c7c 1698 if (vcpu->arch.shadow_efer & EFER_LME) {
707d92fa 1699 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
6aa8b732 1700 enter_lmode(vcpu);
707d92fa 1701 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
6aa8b732
AK
1702 exit_lmode(vcpu);
1703 }
1704#endif
1705
089d034e 1706 if (enable_ept)
1439442c
SY
1707 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1708
6aa8b732 1709 vmcs_writel(CR0_READ_SHADOW, cr0);
1439442c 1710 vmcs_writel(GUEST_CR0, hw_cr0);
ad312c7c 1711 vcpu->arch.cr0 = cr0;
5fd86fcf 1712
707d92fa 1713 if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
5fd86fcf 1714 vmx_fpu_activate(vcpu);
6aa8b732
AK
1715}
1716
1439442c
SY
1717static u64 construct_eptp(unsigned long root_hpa)
1718{
1719 u64 eptp;
1720
1721 /* TODO write the value reading from MSR */
1722 eptp = VMX_EPT_DEFAULT_MT |
1723 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
1724 eptp |= (root_hpa & PAGE_MASK);
1725
1726 return eptp;
1727}
1728
6aa8b732
AK
1729static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1730{
1439442c
SY
1731 unsigned long guest_cr3;
1732 u64 eptp;
1733
1734 guest_cr3 = cr3;
089d034e 1735 if (enable_ept) {
1439442c
SY
1736 eptp = construct_eptp(cr3);
1737 vmcs_write64(EPT_POINTER, eptp);
1439442c 1738 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
b927a3ce 1739 vcpu->kvm->arch.ept_identity_map_addr;
1439442c
SY
1740 }
1741
2384d2b3 1742 vmx_flush_tlb(vcpu);
1439442c 1743 vmcs_writel(GUEST_CR3, guest_cr3);
ad312c7c 1744 if (vcpu->arch.cr0 & X86_CR0_PE)
5fd86fcf 1745 vmx_fpu_deactivate(vcpu);
6aa8b732
AK
1746}
1747
1748static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1749{
7ffd92c5 1750 unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1439442c
SY
1751 KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1752
ad312c7c 1753 vcpu->arch.cr4 = cr4;
089d034e 1754 if (enable_ept)
1439442c
SY
1755 ept_update_paging_mode_cr4(&hw_cr4, vcpu);
1756
1757 vmcs_writel(CR4_READ_SHADOW, cr4);
1758 vmcs_writel(GUEST_CR4, hw_cr4);
6aa8b732
AK
1759}
1760
6aa8b732
AK
1761static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1762{
1763 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1764
1765 return vmcs_readl(sf->base);
1766}
1767
1768static void vmx_get_segment(struct kvm_vcpu *vcpu,
1769 struct kvm_segment *var, int seg)
1770{
1771 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1772 u32 ar;
1773
1774 var->base = vmcs_readl(sf->base);
1775 var->limit = vmcs_read32(sf->limit);
1776 var->selector = vmcs_read16(sf->selector);
1777 ar = vmcs_read32(sf->ar_bytes);
9fd4a3b7 1778 if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
6aa8b732
AK
1779 ar = 0;
1780 var->type = ar & 15;
1781 var->s = (ar >> 4) & 1;
1782 var->dpl = (ar >> 5) & 3;
1783 var->present = (ar >> 7) & 1;
1784 var->avl = (ar >> 12) & 1;
1785 var->l = (ar >> 13) & 1;
1786 var->db = (ar >> 14) & 1;
1787 var->g = (ar >> 15) & 1;
1788 var->unusable = (ar >> 16) & 1;
1789}
1790
2e4d2653
IE
1791static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1792{
2e4d2653
IE
1793 if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1794 return 0;
1795
1796 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1797 return 3;
1798
eab4b8aa 1799 return vmcs_read16(GUEST_CS_SELECTOR) & 3;
2e4d2653
IE
1800}
1801
653e3108 1802static u32 vmx_segment_access_rights(struct kvm_segment *var)
6aa8b732 1803{
6aa8b732
AK
1804 u32 ar;
1805
653e3108 1806 if (var->unusable)
6aa8b732
AK
1807 ar = 1 << 16;
1808 else {
1809 ar = var->type & 15;
1810 ar |= (var->s & 1) << 4;
1811 ar |= (var->dpl & 3) << 5;
1812 ar |= (var->present & 1) << 7;
1813 ar |= (var->avl & 1) << 12;
1814 ar |= (var->l & 1) << 13;
1815 ar |= (var->db & 1) << 14;
1816 ar |= (var->g & 1) << 15;
1817 }
f7fbf1fd
UL
1818 if (ar == 0) /* a 0 value means unusable */
1819 ar = AR_UNUSABLE_MASK;
653e3108
AK
1820
1821 return ar;
1822}
1823
1824static void vmx_set_segment(struct kvm_vcpu *vcpu,
1825 struct kvm_segment *var, int seg)
1826{
7ffd92c5 1827 struct vcpu_vmx *vmx = to_vmx(vcpu);
653e3108
AK
1828 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1829 u32 ar;
1830
7ffd92c5
AK
1831 if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
1832 vmx->rmode.tr.selector = var->selector;
1833 vmx->rmode.tr.base = var->base;
1834 vmx->rmode.tr.limit = var->limit;
1835 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
653e3108
AK
1836 return;
1837 }
1838 vmcs_writel(sf->base, var->base);
1839 vmcs_write32(sf->limit, var->limit);
1840 vmcs_write16(sf->selector, var->selector);
7ffd92c5 1841 if (vmx->rmode.vm86_active && var->s) {
653e3108
AK
1842 /*
1843 * Hack real-mode segments into vm86 compatibility.
1844 */
1845 if (var->base == 0xffff0000 && var->selector == 0xf000)
1846 vmcs_writel(sf->base, 0xf0000);
1847 ar = 0xf3;
1848 } else
1849 ar = vmx_segment_access_rights(var);
3a624e29
NK
1850
1851 /*
1852 * Fix the "Accessed" bit in AR field of segment registers for older
1853 * qemu binaries.
1854 * IA32 arch specifies that at the time of processor reset the
1855 * "Accessed" bit in the AR field of segment registers is 1. And qemu
1856 * is setting it to 0 in the usedland code. This causes invalid guest
1857 * state vmexit when "unrestricted guest" mode is turned on.
1858 * Fix for this setup issue in cpu_reset is being pushed in the qemu
1859 * tree. Newer qemu binaries with that qemu fix would not need this
1860 * kvm hack.
1861 */
1862 if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
1863 ar |= 0x1; /* Accessed */
1864
6aa8b732
AK
1865 vmcs_write32(sf->ar_bytes, ar);
1866}
1867
6aa8b732
AK
1868static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1869{
1870 u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1871
1872 *db = (ar >> 14) & 1;
1873 *l = (ar >> 13) & 1;
1874}
1875
1876static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1877{
1878 dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1879 dt->base = vmcs_readl(GUEST_IDTR_BASE);
1880}
1881
1882static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1883{
1884 vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1885 vmcs_writel(GUEST_IDTR_BASE, dt->base);
1886}
1887
1888static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1889{
1890 dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1891 dt->base = vmcs_readl(GUEST_GDTR_BASE);
1892}
1893
1894static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1895{
1896 vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1897 vmcs_writel(GUEST_GDTR_BASE, dt->base);
1898}
1899
648dfaa7
MG
1900static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
1901{
1902 struct kvm_segment var;
1903 u32 ar;
1904
1905 vmx_get_segment(vcpu, &var, seg);
1906 ar = vmx_segment_access_rights(&var);
1907
1908 if (var.base != (var.selector << 4))
1909 return false;
1910 if (var.limit != 0xffff)
1911 return false;
1912 if (ar != 0xf3)
1913 return false;
1914
1915 return true;
1916}
1917
1918static bool code_segment_valid(struct kvm_vcpu *vcpu)
1919{
1920 struct kvm_segment cs;
1921 unsigned int cs_rpl;
1922
1923 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1924 cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1925
1872a3f4
AK
1926 if (cs.unusable)
1927 return false;
648dfaa7
MG
1928 if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1929 return false;
1930 if (!cs.s)
1931 return false;
1872a3f4 1932 if (cs.type & AR_TYPE_WRITEABLE_MASK) {
648dfaa7
MG
1933 if (cs.dpl > cs_rpl)
1934 return false;
1872a3f4 1935 } else {
648dfaa7
MG
1936 if (cs.dpl != cs_rpl)
1937 return false;
1938 }
1939 if (!cs.present)
1940 return false;
1941
1942 /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1943 return true;
1944}
1945
1946static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1947{
1948 struct kvm_segment ss;
1949 unsigned int ss_rpl;
1950
1951 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1952 ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1953
1872a3f4
AK
1954 if (ss.unusable)
1955 return true;
1956 if (ss.type != 3 && ss.type != 7)
648dfaa7
MG
1957 return false;
1958 if (!ss.s)
1959 return false;
1960 if (ss.dpl != ss_rpl) /* DPL != RPL */
1961 return false;
1962 if (!ss.present)
1963 return false;
1964
1965 return true;
1966}
1967
1968static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
1969{
1970 struct kvm_segment var;
1971 unsigned int rpl;
1972
1973 vmx_get_segment(vcpu, &var, seg);
1974 rpl = var.selector & SELECTOR_RPL_MASK;
1975
1872a3f4
AK
1976 if (var.unusable)
1977 return true;
648dfaa7
MG
1978 if (!var.s)
1979 return false;
1980 if (!var.present)
1981 return false;
1982 if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
1983 if (var.dpl < rpl) /* DPL < RPL */
1984 return false;
1985 }
1986
1987 /* TODO: Add other members to kvm_segment_field to allow checking for other access
1988 * rights flags
1989 */
1990 return true;
1991}
1992
1993static bool tr_valid(struct kvm_vcpu *vcpu)
1994{
1995 struct kvm_segment tr;
1996
1997 vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
1998
1872a3f4
AK
1999 if (tr.unusable)
2000 return false;
648dfaa7
MG
2001 if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */
2002 return false;
1872a3f4 2003 if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
648dfaa7
MG
2004 return false;
2005 if (!tr.present)
2006 return false;
2007
2008 return true;
2009}
2010
2011static bool ldtr_valid(struct kvm_vcpu *vcpu)
2012{
2013 struct kvm_segment ldtr;
2014
2015 vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
2016
1872a3f4
AK
2017 if (ldtr.unusable)
2018 return true;
648dfaa7
MG
2019 if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */
2020 return false;
2021 if (ldtr.type != 2)
2022 return false;
2023 if (!ldtr.present)
2024 return false;
2025
2026 return true;
2027}
2028
2029static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2030{
2031 struct kvm_segment cs, ss;
2032
2033 vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
2034 vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
2035
2036 return ((cs.selector & SELECTOR_RPL_MASK) ==
2037 (ss.selector & SELECTOR_RPL_MASK));
2038}
2039
2040/*
2041 * Check if guest state is valid. Returns true if valid, false if
2042 * not.
2043 * We assume that registers are always usable
2044 */
2045static bool guest_state_valid(struct kvm_vcpu *vcpu)
2046{
2047 /* real mode guest state checks */
2048 if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
2049 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2050 return false;
2051 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
2052 return false;
2053 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
2054 return false;
2055 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
2056 return false;
2057 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
2058 return false;
2059 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
2060 return false;
2061 } else {
2062 /* protected mode guest state checks */
2063 if (!cs_ss_rpl_check(vcpu))
2064 return false;
2065 if (!code_segment_valid(vcpu))
2066 return false;
2067 if (!stack_segment_valid(vcpu))
2068 return false;
2069 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
2070 return false;
2071 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
2072 return false;
2073 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
2074 return false;
2075 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
2076 return false;
2077 if (!tr_valid(vcpu))
2078 return false;
2079 if (!ldtr_valid(vcpu))
2080 return false;
2081 }
2082 /* TODO:
2083 * - Add checks on RIP
2084 * - Add checks on RFLAGS
2085 */
2086
2087 return true;
2088}
2089
d77c26fc 2090static int init_rmode_tss(struct kvm *kvm)
6aa8b732 2091{
6aa8b732 2092 gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
195aefde 2093 u16 data = 0;
10589a46 2094 int ret = 0;
195aefde 2095 int r;
6aa8b732 2096
195aefde
IE
2097 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2098 if (r < 0)
10589a46 2099 goto out;
195aefde 2100 data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
464d17c8
SY
2101 r = kvm_write_guest_page(kvm, fn++, &data,
2102 TSS_IOPB_BASE_OFFSET, sizeof(u16));
195aefde 2103 if (r < 0)
10589a46 2104 goto out;
195aefde
IE
2105 r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
2106 if (r < 0)
10589a46 2107 goto out;
195aefde
IE
2108 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2109 if (r < 0)
10589a46 2110 goto out;
195aefde 2111 data = ~0;
10589a46
MT
2112 r = kvm_write_guest_page(kvm, fn, &data,
2113 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
2114 sizeof(u8));
195aefde 2115 if (r < 0)
10589a46
MT
2116 goto out;
2117
2118 ret = 1;
2119out:
10589a46 2120 return ret;
6aa8b732
AK
2121}
2122
b7ebfb05
SY
2123static int init_rmode_identity_map(struct kvm *kvm)
2124{
2125 int i, r, ret;
2126 pfn_t identity_map_pfn;
2127 u32 tmp;
2128
089d034e 2129 if (!enable_ept)
b7ebfb05
SY
2130 return 1;
2131 if (unlikely(!kvm->arch.ept_identity_pagetable)) {
2132 printk(KERN_ERR "EPT: identity-mapping pagetable "
2133 "haven't been allocated!\n");
2134 return 0;
2135 }
2136 if (likely(kvm->arch.ept_identity_pagetable_done))
2137 return 1;
2138 ret = 0;
b927a3ce 2139 identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
b7ebfb05
SY
2140 r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2141 if (r < 0)
2142 goto out;
2143 /* Set up identity-mapping pagetable for EPT in real mode */
2144 for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
2145 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
2146 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
2147 r = kvm_write_guest_page(kvm, identity_map_pfn,
2148 &tmp, i * sizeof(tmp), sizeof(tmp));
2149 if (r < 0)
2150 goto out;
2151 }
2152 kvm->arch.ept_identity_pagetable_done = true;
2153 ret = 1;
2154out:
2155 return ret;
2156}
2157
6aa8b732
AK
2158static void seg_setup(int seg)
2159{
2160 struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3a624e29 2161 unsigned int ar;
6aa8b732
AK
2162
2163 vmcs_write16(sf->selector, 0);
2164 vmcs_writel(sf->base, 0);
2165 vmcs_write32(sf->limit, 0xffff);
3a624e29
NK
2166 if (enable_unrestricted_guest) {
2167 ar = 0x93;
2168 if (seg == VCPU_SREG_CS)
2169 ar |= 0x08; /* code segment */
2170 } else
2171 ar = 0xf3;
2172
2173 vmcs_write32(sf->ar_bytes, ar);
6aa8b732
AK
2174}
2175
f78e0e2e
SY
2176static int alloc_apic_access_page(struct kvm *kvm)
2177{
2178 struct kvm_userspace_memory_region kvm_userspace_mem;
2179 int r = 0;
2180
72dc67a6 2181 down_write(&kvm->slots_lock);
bfc6d222 2182 if (kvm->arch.apic_access_page)
f78e0e2e
SY
2183 goto out;
2184 kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
2185 kvm_userspace_mem.flags = 0;
2186 kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
2187 kvm_userspace_mem.memory_size = PAGE_SIZE;
2188 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2189 if (r)
2190 goto out;
72dc67a6 2191
bfc6d222 2192 kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
f78e0e2e 2193out:
72dc67a6 2194 up_write(&kvm->slots_lock);
f78e0e2e
SY
2195 return r;
2196}
2197
b7ebfb05
SY
2198static int alloc_identity_pagetable(struct kvm *kvm)
2199{
2200 struct kvm_userspace_memory_region kvm_userspace_mem;
2201 int r = 0;
2202
2203 down_write(&kvm->slots_lock);
2204 if (kvm->arch.ept_identity_pagetable)
2205 goto out;
2206 kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2207 kvm_userspace_mem.flags = 0;
b927a3ce
SY
2208 kvm_userspace_mem.guest_phys_addr =
2209 kvm->arch.ept_identity_map_addr;
b7ebfb05
SY
2210 kvm_userspace_mem.memory_size = PAGE_SIZE;
2211 r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2212 if (r)
2213 goto out;
2214
b7ebfb05 2215 kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
b927a3ce 2216 kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
b7ebfb05
SY
2217out:
2218 up_write(&kvm->slots_lock);
2219 return r;
2220}
2221
2384d2b3
SY
2222static void allocate_vpid(struct vcpu_vmx *vmx)
2223{
2224 int vpid;
2225
2226 vmx->vpid = 0;
919818ab 2227 if (!enable_vpid)
2384d2b3
SY
2228 return;
2229 spin_lock(&vmx_vpid_lock);
2230 vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
2231 if (vpid < VMX_NR_VPIDS) {
2232 vmx->vpid = vpid;
2233 __set_bit(vpid, vmx_vpid_bitmap);
2234 }
2235 spin_unlock(&vmx_vpid_lock);
2236}
2237
5897297b 2238static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
25c5f225 2239{
3e7c73e9 2240 int f = sizeof(unsigned long);
25c5f225
SY
2241
2242 if (!cpu_has_vmx_msr_bitmap())
2243 return;
2244
2245 /*
2246 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2247 * have the write-low and read-high bitmap offsets the wrong way round.
2248 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2249 */
25c5f225 2250 if (msr <= 0x1fff) {
3e7c73e9
AK
2251 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
2252 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
25c5f225
SY
2253 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2254 msr &= 0x1fff;
3e7c73e9
AK
2255 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
2256 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
25c5f225 2257 }
25c5f225
SY
2258}
2259
5897297b
AK
2260static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2261{
2262 if (!longmode_only)
2263 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2264 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2265}
2266
6aa8b732
AK
2267/*
2268 * Sets up the vmcs for emulated real mode.
2269 */
8b9cf98c 2270static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
6aa8b732 2271{
468d472f 2272 u32 host_sysenter_cs, msr_low, msr_high;
6aa8b732 2273 u32 junk;
53f658b3 2274 u64 host_pat, tsc_this, tsc_base;
6aa8b732
AK
2275 unsigned long a;
2276 struct descriptor_table dt;
2277 int i;
cd2276a7 2278 unsigned long kvm_vmx_return;
6e5d865c 2279 u32 exec_control;
6aa8b732 2280
6aa8b732 2281 /* I/O */
3e7c73e9
AK
2282 vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
2283 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
6aa8b732 2284
25c5f225 2285 if (cpu_has_vmx_msr_bitmap())
5897297b 2286 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
25c5f225 2287
6aa8b732
AK
2288 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2289
6aa8b732 2290 /* Control */
1c3d14fe
YS
2291 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
2292 vmcs_config.pin_based_exec_ctrl);
6e5d865c
YS
2293
2294 exec_control = vmcs_config.cpu_based_exec_ctrl;
2295 if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
2296 exec_control &= ~CPU_BASED_TPR_SHADOW;
2297#ifdef CONFIG_X86_64
2298 exec_control |= CPU_BASED_CR8_STORE_EXITING |
2299 CPU_BASED_CR8_LOAD_EXITING;
2300#endif
2301 }
089d034e 2302 if (!enable_ept)
d56f546d 2303 exec_control |= CPU_BASED_CR3_STORE_EXITING |
83dbc83a
MT
2304 CPU_BASED_CR3_LOAD_EXITING |
2305 CPU_BASED_INVLPG_EXITING;
6e5d865c 2306 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
6aa8b732 2307
83ff3b9d
SY
2308 if (cpu_has_secondary_exec_ctrls()) {
2309 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
2310 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2311 exec_control &=
2312 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2384d2b3
SY
2313 if (vmx->vpid == 0)
2314 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
089d034e 2315 if (!enable_ept)
d56f546d 2316 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
3a624e29
NK
2317 if (!enable_unrestricted_guest)
2318 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4b8d54f9
ZE
2319 if (!ple_gap)
2320 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
83ff3b9d
SY
2321 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2322 }
f78e0e2e 2323
4b8d54f9
ZE
2324 if (ple_gap) {
2325 vmcs_write32(PLE_GAP, ple_gap);
2326 vmcs_write32(PLE_WINDOW, ple_window);
2327 }
2328
c7addb90
AK
2329 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
2330 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
6aa8b732
AK
2331 vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
2332
2333 vmcs_writel(HOST_CR0, read_cr0()); /* 22.2.3 */
2334 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
2335 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
2336
2337 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
2338 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
2339 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
d6e88aec
AK
2340 vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
2341 vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
6aa8b732 2342 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
05b3e0c2 2343#ifdef CONFIG_X86_64
6aa8b732
AK
2344 rdmsrl(MSR_FS_BASE, a);
2345 vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
2346 rdmsrl(MSR_GS_BASE, a);
2347 vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
2348#else
2349 vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
2350 vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
2351#endif
2352
2353 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
2354
d6e88aec 2355 kvm_get_idt(&dt);
6aa8b732
AK
2356 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
2357
d77c26fc 2358 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
cd2276a7 2359 vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2cc51560
ED
2360 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2361 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2362 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6aa8b732
AK
2363
2364 rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
2365 vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
2366 rdmsrl(MSR_IA32_SYSENTER_ESP, a);
2367 vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */
2368 rdmsrl(MSR_IA32_SYSENTER_EIP, a);
2369 vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */
2370
468d472f
SY
2371 if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
2372 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2373 host_pat = msr_low | ((u64) msr_high << 32);
2374 vmcs_write64(HOST_IA32_PAT, host_pat);
2375 }
2376 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2377 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2378 host_pat = msr_low | ((u64) msr_high << 32);
2379 /* Write the default value follow host pat */
2380 vmcs_write64(GUEST_IA32_PAT, host_pat);
2381 /* Keep arch.pat sync with GUEST_IA32_PAT */
2382 vmx->vcpu.arch.pat = host_pat;
2383 }
2384
6aa8b732
AK
2385 for (i = 0; i < NR_VMX_MSR; ++i) {
2386 u32 index = vmx_msr_index[i];
2387 u32 data_low, data_high;
2388 u64 data;
a2fa3e9f 2389 int j = vmx->nmsrs;
6aa8b732
AK
2390
2391 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2392 continue;
432bd6cb
AK
2393 if (wrmsr_safe(index, data_low, data_high) < 0)
2394 continue;
6aa8b732 2395 data = data_low | ((u64)data_high << 32);
26bb0981
AK
2396 vmx->guest_msrs[j].index = i;
2397 vmx->guest_msrs[j].data = 0;
a2fa3e9f 2398 ++vmx->nmsrs;
6aa8b732 2399 }
6aa8b732 2400
1c3d14fe 2401 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
6aa8b732
AK
2402
2403 /* 22.2.1, 20.8.1 */
1c3d14fe
YS
2404 vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2405
e00c8cf2
AK
2406 vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2407 vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2408
53f658b3
MT
2409 tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2410 rdtscll(tsc_this);
2411 if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2412 tsc_base = tsc_this;
2413
2414 guest_write_tsc(0, tsc_base);
f78e0e2e 2415
e00c8cf2
AK
2416 return 0;
2417}
2418
b7ebfb05
SY
2419static int init_rmode(struct kvm *kvm)
2420{
2421 if (!init_rmode_tss(kvm))
2422 return 0;
2423 if (!init_rmode_identity_map(kvm))
2424 return 0;
2425 return 1;
2426}
2427
e00c8cf2
AK
2428static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2429{
2430 struct vcpu_vmx *vmx = to_vmx(vcpu);
2431 u64 msr;
2432 int ret;
2433
5fdbf976 2434 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3200f405 2435 down_read(&vcpu->kvm->slots_lock);
b7ebfb05 2436 if (!init_rmode(vmx->vcpu.kvm)) {
e00c8cf2
AK
2437 ret = -ENOMEM;
2438 goto out;
2439 }
2440
7ffd92c5 2441 vmx->rmode.vm86_active = 0;
e00c8cf2 2442
3b86cd99
JK
2443 vmx->soft_vnmi_blocked = 0;
2444
ad312c7c 2445 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2d3ad1f4 2446 kvm_set_cr8(&vmx->vcpu, 0);
e00c8cf2 2447 msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
c5af89b6 2448 if (kvm_vcpu_is_bsp(&vmx->vcpu))
e00c8cf2
AK
2449 msr |= MSR_IA32_APICBASE_BSP;
2450 kvm_set_apic_base(&vmx->vcpu, msr);
2451
2452 fx_init(&vmx->vcpu);
2453
5706be0d 2454 seg_setup(VCPU_SREG_CS);
e00c8cf2
AK
2455 /*
2456 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2457 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
2458 */
c5af89b6 2459 if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
e00c8cf2
AK
2460 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2461 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2462 } else {
ad312c7c
ZX
2463 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
2464 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
e00c8cf2 2465 }
e00c8cf2
AK
2466
2467 seg_setup(VCPU_SREG_DS);
2468 seg_setup(VCPU_SREG_ES);
2469 seg_setup(VCPU_SREG_FS);
2470 seg_setup(VCPU_SREG_GS);
2471 seg_setup(VCPU_SREG_SS);
2472
2473 vmcs_write16(GUEST_TR_SELECTOR, 0);
2474 vmcs_writel(GUEST_TR_BASE, 0);
2475 vmcs_write32(GUEST_TR_LIMIT, 0xffff);
2476 vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2477
2478 vmcs_write16(GUEST_LDTR_SELECTOR, 0);
2479 vmcs_writel(GUEST_LDTR_BASE, 0);
2480 vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
2481 vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
2482
2483 vmcs_write32(GUEST_SYSENTER_CS, 0);
2484 vmcs_writel(GUEST_SYSENTER_ESP, 0);
2485 vmcs_writel(GUEST_SYSENTER_EIP, 0);
2486
2487 vmcs_writel(GUEST_RFLAGS, 0x02);
c5af89b6 2488 if (kvm_vcpu_is_bsp(&vmx->vcpu))
5fdbf976 2489 kvm_rip_write(vcpu, 0xfff0);
e00c8cf2 2490 else
5fdbf976
MT
2491 kvm_rip_write(vcpu, 0);
2492 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
e00c8cf2 2493
e00c8cf2
AK
2494 vmcs_writel(GUEST_DR7, 0x400);
2495
2496 vmcs_writel(GUEST_GDTR_BASE, 0);
2497 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
2498
2499 vmcs_writel(GUEST_IDTR_BASE, 0);
2500 vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
2501
2502 vmcs_write32(GUEST_ACTIVITY_STATE, 0);
2503 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2504 vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2505
e00c8cf2
AK
2506 /* Special registers */
2507 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2508
2509 setup_msrs(vmx);
2510
6aa8b732
AK
2511 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
2512
f78e0e2e
SY
2513 if (cpu_has_vmx_tpr_shadow()) {
2514 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
2515 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
2516 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
ad312c7c 2517 page_to_phys(vmx->vcpu.arch.apic->regs_page));
f78e0e2e
SY
2518 vmcs_write32(TPR_THRESHOLD, 0);
2519 }
2520
2521 if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2522 vmcs_write64(APIC_ACCESS_ADDR,
bfc6d222 2523 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
6aa8b732 2524
2384d2b3
SY
2525 if (vmx->vpid != 0)
2526 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2527
fa40052c 2528 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
ad312c7c 2529 vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
8b9cf98c 2530 vmx_set_cr4(&vmx->vcpu, 0);
8b9cf98c 2531 vmx_set_efer(&vmx->vcpu, 0);
8b9cf98c
RR
2532 vmx_fpu_activate(&vmx->vcpu);
2533 update_exception_bitmap(&vmx->vcpu);
6aa8b732 2534
2384d2b3
SY
2535 vpid_sync_vcpu_all(vmx);
2536
3200f405 2537 ret = 0;
6aa8b732 2538
a89a8fb9
MG
2539 /* HACK: Don't enable emulation on guest boot/reset */
2540 vmx->emulation_required = 0;
2541
6aa8b732 2542out:
3200f405 2543 up_read(&vcpu->kvm->slots_lock);
6aa8b732
AK
2544 return ret;
2545}
2546
3b86cd99
JK
2547static void enable_irq_window(struct kvm_vcpu *vcpu)
2548{
2549 u32 cpu_based_vm_exec_control;
2550
2551 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2552 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2553 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2554}
2555
2556static void enable_nmi_window(struct kvm_vcpu *vcpu)
2557{
2558 u32 cpu_based_vm_exec_control;
2559
2560 if (!cpu_has_virtual_nmis()) {
2561 enable_irq_window(vcpu);
2562 return;
2563 }
2564
2565 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2566 cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
2567 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2568}
2569
66fd3f7f 2570static void vmx_inject_irq(struct kvm_vcpu *vcpu)
85f455f7 2571{
9c8cba37 2572 struct vcpu_vmx *vmx = to_vmx(vcpu);
66fd3f7f
GN
2573 uint32_t intr;
2574 int irq = vcpu->arch.interrupt.nr;
9c8cba37 2575
229456fc 2576 trace_kvm_inj_virq(irq);
2714d1d3 2577
fa89a817 2578 ++vcpu->stat.irq_injections;
7ffd92c5 2579 if (vmx->rmode.vm86_active) {
9c8cba37
AK
2580 vmx->rmode.irq.pending = true;
2581 vmx->rmode.irq.vector = irq;
5fdbf976 2582 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
ae0bb3e0
GN
2583 if (vcpu->arch.interrupt.soft)
2584 vmx->rmode.irq.rip +=
2585 vmx->vcpu.arch.event_exit_inst_len;
9c5623e3
AK
2586 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2587 irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2588 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
5fdbf976 2589 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
85f455f7
ED
2590 return;
2591 }
66fd3f7f
GN
2592 intr = irq | INTR_INFO_VALID_MASK;
2593 if (vcpu->arch.interrupt.soft) {
2594 intr |= INTR_TYPE_SOFT_INTR;
2595 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2596 vmx->vcpu.arch.event_exit_inst_len);
2597 } else
2598 intr |= INTR_TYPE_EXT_INTR;
2599 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
85f455f7
ED
2600}
2601
f08864b4
SY
2602static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2603{
66a5a347
JK
2604 struct vcpu_vmx *vmx = to_vmx(vcpu);
2605
3b86cd99
JK
2606 if (!cpu_has_virtual_nmis()) {
2607 /*
2608 * Tracking the NMI-blocked state in software is built upon
2609 * finding the next open IRQ window. This, in turn, depends on
2610 * well-behaving guests: They have to keep IRQs disabled at
2611 * least as long as the NMI handler runs. Otherwise we may
2612 * cause NMI nesting, maybe breaking the guest. But as this is
2613 * highly unlikely, we can live with the residual risk.
2614 */
2615 vmx->soft_vnmi_blocked = 1;
2616 vmx->vnmi_blocked_time = 0;
2617 }
2618
487b391d 2619 ++vcpu->stat.nmi_injections;
7ffd92c5 2620 if (vmx->rmode.vm86_active) {
66a5a347
JK
2621 vmx->rmode.irq.pending = true;
2622 vmx->rmode.irq.vector = NMI_VECTOR;
2623 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2624 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2625 NMI_VECTOR | INTR_TYPE_SOFT_INTR |
2626 INTR_INFO_VALID_MASK);
2627 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2628 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2629 return;
2630 }
f08864b4
SY
2631 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2632 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
f08864b4
SY
2633}
2634
c4282df9 2635static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
33f089ca 2636{
3b86cd99 2637 if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
c4282df9 2638 return 0;
33f089ca 2639
c4282df9
GN
2640 return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2641 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
2642 GUEST_INTR_STATE_NMI));
33f089ca
JK
2643}
2644
78646121
GN
2645static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
2646{
c4282df9
GN
2647 return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2648 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2649 (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
78646121
GN
2650}
2651
cbc94022
IE
2652static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2653{
2654 int ret;
2655 struct kvm_userspace_memory_region tss_mem = {
6fe63979 2656 .slot = TSS_PRIVATE_MEMSLOT,
cbc94022
IE
2657 .guest_phys_addr = addr,
2658 .memory_size = PAGE_SIZE * 3,
2659 .flags = 0,
2660 };
2661
2662 ret = kvm_set_memory_region(kvm, &tss_mem, 0);
2663 if (ret)
2664 return ret;
bfc6d222 2665 kvm->arch.tss_addr = addr;
cbc94022
IE
2666 return 0;
2667}
2668
6aa8b732
AK
2669static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2670 int vec, u32 err_code)
2671{
b3f37707
NK
2672 /*
2673 * Instruction with address size override prefix opcode 0x67
2674 * Cause the #SS fault with 0 error code in VM86 mode.
2675 */
2676 if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
851ba692 2677 if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
6aa8b732 2678 return 1;
77ab6db0
JK
2679 /*
2680 * Forward all other exceptions that are valid in real mode.
2681 * FIXME: Breaks guest debugging in real mode, needs to be fixed with
2682 * the required debugging infrastructure rework.
2683 */
2684 switch (vec) {
77ab6db0 2685 case DB_VECTOR:
d0bfb940
JK
2686 if (vcpu->guest_debug &
2687 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2688 return 0;
2689 kvm_queue_exception(vcpu, vec);
2690 return 1;
77ab6db0 2691 case BP_VECTOR:
d0bfb940
JK
2692 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2693 return 0;
2694 /* fall through */
2695 case DE_VECTOR:
77ab6db0
JK
2696 case OF_VECTOR:
2697 case BR_VECTOR:
2698 case UD_VECTOR:
2699 case DF_VECTOR:
2700 case SS_VECTOR:
2701 case GP_VECTOR:
2702 case MF_VECTOR:
2703 kvm_queue_exception(vcpu, vec);
2704 return 1;
2705 }
6aa8b732
AK
2706 return 0;
2707}
2708
a0861c02
AK
2709/*
2710 * Trigger machine check on the host. We assume all the MSRs are already set up
2711 * by the CPU and that we still run on the same CPU as the MCE occurred on.
2712 * We pass a fake environment to the machine check handler because we want
2713 * the guest to be always treated like user space, no matter what context
2714 * it used internally.
2715 */
2716static void kvm_machine_check(void)
2717{
2718#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
2719 struct pt_regs regs = {
2720 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
2721 .flags = X86_EFLAGS_IF,
2722 };
2723
2724 do_machine_check(&regs, 0);
2725#endif
2726}
2727
851ba692 2728static int handle_machine_check(struct kvm_vcpu *vcpu)
a0861c02
AK
2729{
2730 /* already handled by vcpu_run */
2731 return 1;
2732}
2733
851ba692 2734static int handle_exception(struct kvm_vcpu *vcpu)
6aa8b732 2735{
1155f76a 2736 struct vcpu_vmx *vmx = to_vmx(vcpu);
851ba692 2737 struct kvm_run *kvm_run = vcpu->run;
d0bfb940 2738 u32 intr_info, ex_no, error_code;
42dbaa5a 2739 unsigned long cr2, rip, dr6;
6aa8b732
AK
2740 u32 vect_info;
2741 enum emulation_result er;
2742
1155f76a 2743 vect_info = vmx->idt_vectoring_info;
6aa8b732
AK
2744 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2745
a0861c02 2746 if (is_machine_check(intr_info))
851ba692 2747 return handle_machine_check(vcpu);
a0861c02 2748
6aa8b732 2749 if ((vect_info & VECTORING_INFO_VALID_MASK) &&
d77c26fc 2750 !is_page_fault(intr_info))
6aa8b732 2751 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
b8688d51 2752 "intr info 0x%x\n", __func__, vect_info, intr_info);
6aa8b732 2753
e4a41889 2754 if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
1b6269db 2755 return 1; /* already handled by vmx_vcpu_run() */
2ab455cc
AL
2756
2757 if (is_no_device(intr_info)) {
5fd86fcf 2758 vmx_fpu_activate(vcpu);
2ab455cc
AL
2759 return 1;
2760 }
2761
7aa81cc0 2762 if (is_invalid_opcode(intr_info)) {
851ba692 2763 er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
7aa81cc0 2764 if (er != EMULATE_DONE)
7ee5d940 2765 kvm_queue_exception(vcpu, UD_VECTOR);
7aa81cc0
AL
2766 return 1;
2767 }
2768
6aa8b732 2769 error_code = 0;
5fdbf976 2770 rip = kvm_rip_read(vcpu);
2e11384c 2771 if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
6aa8b732
AK
2772 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2773 if (is_page_fault(intr_info)) {
1439442c 2774 /* EPT won't cause page fault directly */
089d034e 2775 if (enable_ept)
1439442c 2776 BUG();
6aa8b732 2777 cr2 = vmcs_readl(EXIT_QUALIFICATION);
229456fc
MT
2778 trace_kvm_page_fault(cr2, error_code);
2779
3298b75c 2780 if (kvm_event_needs_reinjection(vcpu))
577bdc49 2781 kvm_mmu_unprotect_page_virt(vcpu, cr2);
3067714c 2782 return kvm_mmu_page_fault(vcpu, cr2, error_code);
6aa8b732
AK
2783 }
2784
7ffd92c5 2785 if (vmx->rmode.vm86_active &&
6aa8b732 2786 handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
72d6e5a0 2787 error_code)) {
ad312c7c
ZX
2788 if (vcpu->arch.halt_request) {
2789 vcpu->arch.halt_request = 0;
72d6e5a0
AK
2790 return kvm_emulate_halt(vcpu);
2791 }
6aa8b732 2792 return 1;
72d6e5a0 2793 }
6aa8b732 2794
d0bfb940 2795 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
42dbaa5a
JK
2796 switch (ex_no) {
2797 case DB_VECTOR:
2798 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2799 if (!(vcpu->guest_debug &
2800 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2801 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2802 kvm_queue_exception(vcpu, DB_VECTOR);
2803 return 1;
2804 }
2805 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2806 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2807 /* fall through */
2808 case BP_VECTOR:
6aa8b732 2809 kvm_run->exit_reason = KVM_EXIT_DEBUG;
d0bfb940
JK
2810 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2811 kvm_run->debug.arch.exception = ex_no;
42dbaa5a
JK
2812 break;
2813 default:
d0bfb940
JK
2814 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2815 kvm_run->ex.exception = ex_no;
2816 kvm_run->ex.error_code = error_code;
42dbaa5a 2817 break;
6aa8b732 2818 }
6aa8b732
AK
2819 return 0;
2820}
2821
851ba692 2822static int handle_external_interrupt(struct kvm_vcpu *vcpu)
6aa8b732 2823{
1165f5fe 2824 ++vcpu->stat.irq_exits;
6aa8b732
AK
2825 return 1;
2826}
2827
851ba692 2828static int handle_triple_fault(struct kvm_vcpu *vcpu)
988ad74f 2829{
851ba692 2830 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
988ad74f
AK
2831 return 0;
2832}
6aa8b732 2833
851ba692 2834static int handle_io(struct kvm_vcpu *vcpu)
6aa8b732 2835{
bfdaab09 2836 unsigned long exit_qualification;
34c33d16 2837 int size, in, string;
039576c0 2838 unsigned port;
6aa8b732 2839
1165f5fe 2840 ++vcpu->stat.io_exits;
bfdaab09 2841 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
039576c0 2842 string = (exit_qualification & 16) != 0;
e70669ab
LV
2843
2844 if (string) {
851ba692 2845 if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
e70669ab
LV
2846 return 0;
2847 return 1;
2848 }
2849
2850 size = (exit_qualification & 7) + 1;
2851 in = (exit_qualification & 8) != 0;
039576c0 2852 port = exit_qualification >> 16;
e70669ab 2853
e93f36bc 2854 skip_emulated_instruction(vcpu);
851ba692 2855 return kvm_emulate_pio(vcpu, in, size, port);
6aa8b732
AK
2856}
2857
102d8325
IM
2858static void
2859vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2860{
2861 /*
2862 * Patch in the VMCALL instruction:
2863 */
2864 hypercall[0] = 0x0f;
2865 hypercall[1] = 0x01;
2866 hypercall[2] = 0xc1;
102d8325
IM
2867}
2868
851ba692 2869static int handle_cr(struct kvm_vcpu *vcpu)
6aa8b732 2870{
229456fc 2871 unsigned long exit_qualification, val;
6aa8b732
AK
2872 int cr;
2873 int reg;
2874
bfdaab09 2875 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
6aa8b732
AK
2876 cr = exit_qualification & 15;
2877 reg = (exit_qualification >> 8) & 15;
2878 switch ((exit_qualification >> 4) & 3) {
2879 case 0: /* mov to cr */
229456fc
MT
2880 val = kvm_register_read(vcpu, reg);
2881 trace_kvm_cr_write(cr, val);
6aa8b732
AK
2882 switch (cr) {
2883 case 0:
229456fc 2884 kvm_set_cr0(vcpu, val);
6aa8b732
AK
2885 skip_emulated_instruction(vcpu);
2886 return 1;
2887 case 3:
229456fc 2888 kvm_set_cr3(vcpu, val);
6aa8b732
AK
2889 skip_emulated_instruction(vcpu);
2890 return 1;
2891 case 4:
229456fc 2892 kvm_set_cr4(vcpu, val);
6aa8b732
AK
2893 skip_emulated_instruction(vcpu);
2894 return 1;
0a5fff19
GN
2895 case 8: {
2896 u8 cr8_prev = kvm_get_cr8(vcpu);
2897 u8 cr8 = kvm_register_read(vcpu, reg);
2898 kvm_set_cr8(vcpu, cr8);
2899 skip_emulated_instruction(vcpu);
2900 if (irqchip_in_kernel(vcpu->kvm))
2901 return 1;
2902 if (cr8_prev <= cr8)
2903 return 1;
851ba692 2904 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
0a5fff19
GN
2905 return 0;
2906 }
6aa8b732
AK
2907 };
2908 break;
25c4c276 2909 case 2: /* clts */
5fd86fcf 2910 vmx_fpu_deactivate(vcpu);
ad312c7c
ZX
2911 vcpu->arch.cr0 &= ~X86_CR0_TS;
2912 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
5fd86fcf 2913 vmx_fpu_activate(vcpu);
25c4c276
AL
2914 skip_emulated_instruction(vcpu);
2915 return 1;
6aa8b732
AK
2916 case 1: /*mov from cr*/
2917 switch (cr) {
2918 case 3:
5fdbf976 2919 kvm_register_write(vcpu, reg, vcpu->arch.cr3);
229456fc 2920 trace_kvm_cr_read(cr, vcpu->arch.cr3);
6aa8b732
AK
2921 skip_emulated_instruction(vcpu);
2922 return 1;
2923 case 8:
229456fc
MT
2924 val = kvm_get_cr8(vcpu);
2925 kvm_register_write(vcpu, reg, val);
2926 trace_kvm_cr_read(cr, val);
6aa8b732
AK
2927 skip_emulated_instruction(vcpu);
2928 return 1;
2929 }
2930 break;
2931 case 3: /* lmsw */
2d3ad1f4 2932 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
6aa8b732
AK
2933
2934 skip_emulated_instruction(vcpu);
2935 return 1;
2936 default:
2937 break;
2938 }
851ba692 2939 vcpu->run->exit_reason = 0;
f0242478 2940 pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
6aa8b732
AK
2941 (int)(exit_qualification >> 4) & 3, cr);
2942 return 0;
2943}
2944
851ba692 2945static int handle_dr(struct kvm_vcpu *vcpu)
6aa8b732 2946{
bfdaab09 2947 unsigned long exit_qualification;
6aa8b732
AK
2948 unsigned long val;
2949 int dr, reg;
2950
0a79b009
AK
2951 if (!kvm_require_cpl(vcpu, 0))
2952 return 1;
42dbaa5a
JK
2953 dr = vmcs_readl(GUEST_DR7);
2954 if (dr & DR7_GD) {
2955 /*
2956 * As the vm-exit takes precedence over the debug trap, we
2957 * need to emulate the latter, either for the host or the
2958 * guest debugging itself.
2959 */
2960 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
851ba692
AK
2961 vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
2962 vcpu->run->debug.arch.dr7 = dr;
2963 vcpu->run->debug.arch.pc =
42dbaa5a
JK
2964 vmcs_readl(GUEST_CS_BASE) +
2965 vmcs_readl(GUEST_RIP);
851ba692
AK
2966 vcpu->run->debug.arch.exception = DB_VECTOR;
2967 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
42dbaa5a
JK
2968 return 0;
2969 } else {
2970 vcpu->arch.dr7 &= ~DR7_GD;
2971 vcpu->arch.dr6 |= DR6_BD;
2972 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2973 kvm_queue_exception(vcpu, DB_VECTOR);
2974 return 1;
2975 }
2976 }
2977
bfdaab09 2978 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
42dbaa5a
JK
2979 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
2980 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
2981 if (exit_qualification & TYPE_MOV_FROM_DR) {
6aa8b732 2982 switch (dr) {
42dbaa5a
JK
2983 case 0 ... 3:
2984 val = vcpu->arch.db[dr];
2985 break;
6aa8b732 2986 case 6:
42dbaa5a 2987 val = vcpu->arch.dr6;
6aa8b732
AK
2988 break;
2989 case 7:
42dbaa5a 2990 val = vcpu->arch.dr7;
6aa8b732
AK
2991 break;
2992 default:
2993 val = 0;
2994 }
5fdbf976 2995 kvm_register_write(vcpu, reg, val);
6aa8b732 2996 } else {
42dbaa5a
JK
2997 val = vcpu->arch.regs[reg];
2998 switch (dr) {
2999 case 0 ... 3:
3000 vcpu->arch.db[dr] = val;
3001 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
3002 vcpu->arch.eff_db[dr] = val;
3003 break;
3004 case 4 ... 5:
3005 if (vcpu->arch.cr4 & X86_CR4_DE)
3006 kvm_queue_exception(vcpu, UD_VECTOR);
3007 break;
3008 case 6:
3009 if (val & 0xffffffff00000000ULL) {
3010 kvm_queue_exception(vcpu, GP_VECTOR);
3011 break;
3012 }
3013 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
3014 break;
3015 case 7:
3016 if (val & 0xffffffff00000000ULL) {
3017 kvm_queue_exception(vcpu, GP_VECTOR);
3018 break;
3019 }
3020 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
3021 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
3022 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3023 vcpu->arch.switch_db_regs =
3024 (val & DR7_BP_EN_MASK);
3025 }
3026 break;
3027 }
6aa8b732 3028 }
6aa8b732
AK
3029 skip_emulated_instruction(vcpu);
3030 return 1;
3031}
3032
851ba692 3033static int handle_cpuid(struct kvm_vcpu *vcpu)
6aa8b732 3034{
06465c5a
AK
3035 kvm_emulate_cpuid(vcpu);
3036 return 1;
6aa8b732
AK
3037}
3038
851ba692 3039static int handle_rdmsr(struct kvm_vcpu *vcpu)
6aa8b732 3040{
ad312c7c 3041 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
6aa8b732
AK
3042 u64 data;
3043
3044 if (vmx_get_msr(vcpu, ecx, &data)) {
c1a5d4f9 3045 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
3046 return 1;
3047 }
3048
229456fc 3049 trace_kvm_msr_read(ecx, data);
2714d1d3 3050
6aa8b732 3051 /* FIXME: handling of bits 32:63 of rax, rdx */
ad312c7c
ZX
3052 vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
3053 vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
6aa8b732
AK
3054 skip_emulated_instruction(vcpu);
3055 return 1;
3056}
3057
851ba692 3058static int handle_wrmsr(struct kvm_vcpu *vcpu)
6aa8b732 3059{
ad312c7c
ZX
3060 u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3061 u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
3062 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
6aa8b732 3063
229456fc 3064 trace_kvm_msr_write(ecx, data);
2714d1d3 3065
6aa8b732 3066 if (vmx_set_msr(vcpu, ecx, data) != 0) {
c1a5d4f9 3067 kvm_inject_gp(vcpu, 0);
6aa8b732
AK
3068 return 1;
3069 }
3070
3071 skip_emulated_instruction(vcpu);
3072 return 1;
3073}
3074
851ba692 3075static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
6e5d865c
YS
3076{
3077 return 1;
3078}
3079
851ba692 3080static int handle_interrupt_window(struct kvm_vcpu *vcpu)
6aa8b732 3081{
85f455f7
ED
3082 u32 cpu_based_vm_exec_control;
3083
3084 /* clear pending irq */
3085 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3086 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
3087 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2714d1d3 3088
a26bf12a 3089 ++vcpu->stat.irq_window_exits;
2714d1d3 3090
c1150d8c
DL
3091 /*
3092 * If the user space waits to inject interrupts, exit as soon as
3093 * possible
3094 */
8061823a 3095 if (!irqchip_in_kernel(vcpu->kvm) &&
851ba692 3096 vcpu->run->request_interrupt_window &&
8061823a 3097 !kvm_cpu_has_interrupt(vcpu)) {
851ba692 3098 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
c1150d8c
DL
3099 return 0;
3100 }
6aa8b732
AK
3101 return 1;
3102}
3103
851ba692 3104static int handle_halt(struct kvm_vcpu *vcpu)
6aa8b732
AK
3105{
3106 skip_emulated_instruction(vcpu);
d3bef15f 3107 return kvm_emulate_halt(vcpu);
6aa8b732
AK
3108}
3109
851ba692 3110static int handle_vmcall(struct kvm_vcpu *vcpu)
c21415e8 3111{
510043da 3112 skip_emulated_instruction(vcpu);
7aa81cc0
AL
3113 kvm_emulate_hypercall(vcpu);
3114 return 1;
c21415e8
IM
3115}
3116
851ba692 3117static int handle_vmx_insn(struct kvm_vcpu *vcpu)
e3c7cb6a
AK
3118{
3119 kvm_queue_exception(vcpu, UD_VECTOR);
3120 return 1;
3121}
3122
851ba692 3123static int handle_invlpg(struct kvm_vcpu *vcpu)
a7052897 3124{
f9c617f6 3125 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
a7052897
MT
3126
3127 kvm_mmu_invlpg(vcpu, exit_qualification);
3128 skip_emulated_instruction(vcpu);
3129 return 1;
3130}
3131
851ba692 3132static int handle_wbinvd(struct kvm_vcpu *vcpu)
e5edaa01
ED
3133{
3134 skip_emulated_instruction(vcpu);
3135 /* TODO: Add support for VT-d/pass-through device */
3136 return 1;
3137}
3138
851ba692 3139static int handle_apic_access(struct kvm_vcpu *vcpu)
f78e0e2e 3140{
f9c617f6 3141 unsigned long exit_qualification;
f78e0e2e
SY
3142 enum emulation_result er;
3143 unsigned long offset;
3144
f9c617f6 3145 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
f78e0e2e
SY
3146 offset = exit_qualification & 0xffful;
3147
851ba692 3148 er = emulate_instruction(vcpu, 0, 0, 0);
f78e0e2e
SY
3149
3150 if (er != EMULATE_DONE) {
3151 printk(KERN_ERR
3152 "Fail to handle apic access vmexit! Offset is 0x%lx\n",
3153 offset);
7f582ab6 3154 return -ENOEXEC;
f78e0e2e
SY
3155 }
3156 return 1;
3157}
3158
851ba692 3159static int handle_task_switch(struct kvm_vcpu *vcpu)
37817f29 3160{
60637aac 3161 struct vcpu_vmx *vmx = to_vmx(vcpu);
37817f29
IE
3162 unsigned long exit_qualification;
3163 u16 tss_selector;
64a7ec06
GN
3164 int reason, type, idt_v;
3165
3166 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
3167 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
37817f29
IE
3168
3169 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3170
3171 reason = (u32)exit_qualification >> 30;
64a7ec06
GN
3172 if (reason == TASK_SWITCH_GATE && idt_v) {
3173 switch (type) {
3174 case INTR_TYPE_NMI_INTR:
3175 vcpu->arch.nmi_injected = false;
3176 if (cpu_has_virtual_nmis())
3177 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3178 GUEST_INTR_STATE_NMI);
3179 break;
3180 case INTR_TYPE_EXT_INTR:
66fd3f7f 3181 case INTR_TYPE_SOFT_INTR:
64a7ec06
GN
3182 kvm_clear_interrupt_queue(vcpu);
3183 break;
3184 case INTR_TYPE_HARD_EXCEPTION:
3185 case INTR_TYPE_SOFT_EXCEPTION:
3186 kvm_clear_exception_queue(vcpu);
3187 break;
3188 default:
3189 break;
3190 }
60637aac 3191 }
37817f29
IE
3192 tss_selector = exit_qualification;
3193
64a7ec06
GN
3194 if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
3195 type != INTR_TYPE_EXT_INTR &&
3196 type != INTR_TYPE_NMI_INTR))
3197 skip_emulated_instruction(vcpu);
3198
42dbaa5a
JK
3199 if (!kvm_task_switch(vcpu, tss_selector, reason))
3200 return 0;
3201
3202 /* clear all local breakpoint enable flags */
3203 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3204
3205 /*
3206 * TODO: What about debug traps on tss switch?
3207 * Are we supposed to inject them and update dr6?
3208 */
3209
3210 return 1;
37817f29
IE
3211}
3212
851ba692 3213static int handle_ept_violation(struct kvm_vcpu *vcpu)
1439442c 3214{
f9c617f6 3215 unsigned long exit_qualification;
1439442c 3216 gpa_t gpa;
1439442c 3217 int gla_validity;
1439442c 3218
f9c617f6 3219 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
1439442c
SY
3220
3221 if (exit_qualification & (1 << 6)) {
3222 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
7f582ab6 3223 return -EINVAL;
1439442c
SY
3224 }
3225
3226 gla_validity = (exit_qualification >> 7) & 0x3;
3227 if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
3228 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
3229 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3230 (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
f9c617f6 3231 vmcs_readl(GUEST_LINEAR_ADDRESS));
1439442c
SY
3232 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3233 (long unsigned int)exit_qualification);
851ba692
AK
3234 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3235 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
596ae895 3236 return 0;
1439442c
SY
3237 }
3238
3239 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
229456fc 3240 trace_kvm_page_fault(gpa, exit_qualification);
49cd7d22 3241 return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
1439442c
SY
3242}
3243
68f89400
MT
3244static u64 ept_rsvd_mask(u64 spte, int level)
3245{
3246 int i;
3247 u64 mask = 0;
3248
3249 for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
3250 mask |= (1ULL << i);
3251
3252 if (level > 2)
3253 /* bits 7:3 reserved */
3254 mask |= 0xf8;
3255 else if (level == 2) {
3256 if (spte & (1ULL << 7))
3257 /* 2MB ref, bits 20:12 reserved */
3258 mask |= 0x1ff000;
3259 else
3260 /* bits 6:3 reserved */
3261 mask |= 0x78;
3262 }
3263
3264 return mask;
3265}
3266
3267static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
3268 int level)
3269{
3270 printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
3271
3272 /* 010b (write-only) */
3273 WARN_ON((spte & 0x7) == 0x2);
3274
3275 /* 110b (write/execute) */
3276 WARN_ON((spte & 0x7) == 0x6);
3277
3278 /* 100b (execute-only) and value not supported by logical processor */
3279 if (!cpu_has_vmx_ept_execute_only())
3280 WARN_ON((spte & 0x7) == 0x4);
3281
3282 /* not 000b */
3283 if ((spte & 0x7)) {
3284 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
3285
3286 if (rsvd_bits != 0) {
3287 printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
3288 __func__, rsvd_bits);
3289 WARN_ON(1);
3290 }
3291
3292 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
3293 u64 ept_mem_type = (spte & 0x38) >> 3;
3294
3295 if (ept_mem_type == 2 || ept_mem_type == 3 ||
3296 ept_mem_type == 7) {
3297 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
3298 __func__, ept_mem_type);
3299 WARN_ON(1);
3300 }
3301 }
3302 }
3303}
3304
851ba692 3305static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
68f89400
MT
3306{
3307 u64 sptes[4];
3308 int nr_sptes, i;
3309 gpa_t gpa;
3310
3311 gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3312
3313 printk(KERN_ERR "EPT: Misconfiguration.\n");
3314 printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
3315
3316 nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
3317
3318 for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
3319 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
3320
851ba692
AK
3321 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3322 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
68f89400
MT
3323
3324 return 0;
3325}
3326
851ba692 3327static int handle_nmi_window(struct kvm_vcpu *vcpu)
f08864b4
SY
3328{
3329 u32 cpu_based_vm_exec_control;
3330
3331 /* clear pending NMI */
3332 cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3333 cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3334 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3335 ++vcpu->stat.nmi_window_exits;
3336
3337 return 1;
3338}
3339
80ced186 3340static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
ea953ef0 3341{
8b3079a5
AK
3342 struct vcpu_vmx *vmx = to_vmx(vcpu);
3343 enum emulation_result err = EMULATE_DONE;
80ced186 3344 int ret = 1;
ea953ef0
MG
3345
3346 while (!guest_state_valid(vcpu)) {
851ba692 3347 err = emulate_instruction(vcpu, 0, 0, 0);
ea953ef0 3348
80ced186
MG
3349 if (err == EMULATE_DO_MMIO) {
3350 ret = 0;
3351 goto out;
3352 }
1d5a4d9b
GT
3353
3354 if (err != EMULATE_DONE) {
3355 kvm_report_emulation_failure(vcpu, "emulation failure");
80ced186
MG
3356 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3357 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3358 ret = 0;
3359 goto out;
ea953ef0
MG
3360 }
3361
3362 if (signal_pending(current))
80ced186 3363 goto out;
ea953ef0
MG
3364 if (need_resched())
3365 schedule();
3366 }
3367
80ced186
MG
3368 vmx->emulation_required = 0;
3369out:
3370 return ret;
ea953ef0
MG
3371}
3372
4b8d54f9
ZE
3373/*
3374 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
3375 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
3376 */
9fb41ba8 3377static int handle_pause(struct kvm_vcpu *vcpu)
4b8d54f9
ZE
3378{
3379 skip_emulated_instruction(vcpu);
3380 kvm_vcpu_on_spin(vcpu);
3381
3382 return 1;
3383}
3384
6aa8b732
AK
3385/*
3386 * The exit handlers return 1 if the exit was handled fully and guest execution
3387 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
3388 * to be done to userspace and return 0.
3389 */
851ba692 3390static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6aa8b732
AK
3391 [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
3392 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
988ad74f 3393 [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
f08864b4 3394 [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
6aa8b732 3395 [EXIT_REASON_IO_INSTRUCTION] = handle_io,
6aa8b732
AK
3396 [EXIT_REASON_CR_ACCESS] = handle_cr,
3397 [EXIT_REASON_DR_ACCESS] = handle_dr,
3398 [EXIT_REASON_CPUID] = handle_cpuid,
3399 [EXIT_REASON_MSR_READ] = handle_rdmsr,
3400 [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
3401 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
3402 [EXIT_REASON_HLT] = handle_halt,
a7052897 3403 [EXIT_REASON_INVLPG] = handle_invlpg,
c21415e8 3404 [EXIT_REASON_VMCALL] = handle_vmcall,
e3c7cb6a
AK
3405 [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
3406 [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
3407 [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
3408 [EXIT_REASON_VMPTRST] = handle_vmx_insn,
3409 [EXIT_REASON_VMREAD] = handle_vmx_insn,
3410 [EXIT_REASON_VMRESUME] = handle_vmx_insn,
3411 [EXIT_REASON_VMWRITE] = handle_vmx_insn,
3412 [EXIT_REASON_VMOFF] = handle_vmx_insn,
3413 [EXIT_REASON_VMON] = handle_vmx_insn,
f78e0e2e
SY
3414 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
3415 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
e5edaa01 3416 [EXIT_REASON_WBINVD] = handle_wbinvd,
37817f29 3417 [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
a0861c02 3418 [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
68f89400
MT
3419 [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
3420 [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
4b8d54f9 3421 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
6aa8b732
AK
3422};
3423
3424static const int kvm_vmx_max_exit_handlers =
50a3485c 3425 ARRAY_SIZE(kvm_vmx_exit_handlers);
6aa8b732
AK
3426
3427/*
3428 * The guest has exited. See if we can fix it or if we need userspace
3429 * assistance.
3430 */
851ba692 3431static int vmx_handle_exit(struct kvm_vcpu *vcpu)
6aa8b732 3432{
29bd8a78 3433 struct vcpu_vmx *vmx = to_vmx(vcpu);
a0861c02 3434 u32 exit_reason = vmx->exit_reason;
1155f76a 3435 u32 vectoring_info = vmx->idt_vectoring_info;
29bd8a78 3436
229456fc 3437 trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
2714d1d3 3438
80ced186
MG
3439 /* If guest state is invalid, start emulating */
3440 if (vmx->emulation_required && emulate_invalid_guest_state)
3441 return handle_invalid_guest_state(vcpu);
1d5a4d9b 3442
1439442c
SY
3443 /* Access CR3 don't cause VMExit in paging mode, so we need
3444 * to sync with guest real CR3. */
6de4f3ad 3445 if (enable_ept && is_paging(vcpu))
1439442c 3446 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
1439442c 3447
29bd8a78 3448 if (unlikely(vmx->fail)) {
851ba692
AK
3449 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3450 vcpu->run->fail_entry.hardware_entry_failure_reason
29bd8a78
AK
3451 = vmcs_read32(VM_INSTRUCTION_ERROR);
3452 return 0;
3453 }
6aa8b732 3454
d77c26fc 3455 if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
1439442c 3456 (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
60637aac
JK
3457 exit_reason != EXIT_REASON_EPT_VIOLATION &&
3458 exit_reason != EXIT_REASON_TASK_SWITCH))
3459 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
3460 "(0x%x) and exit reason is 0x%x\n",
3461 __func__, vectoring_info, exit_reason);
3b86cd99
JK
3462
3463 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
c4282df9 3464 if (vmx_interrupt_allowed(vcpu)) {
3b86cd99 3465 vmx->soft_vnmi_blocked = 0;
3b86cd99 3466 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
4531220b 3467 vcpu->arch.nmi_pending) {
3b86cd99
JK
3468 /*
3469 * This CPU don't support us in finding the end of an
3470 * NMI-blocked window if the guest runs with IRQs
3471 * disabled. So we pull the trigger after 1 s of
3472 * futile waiting, but inform the user about this.
3473 */
3474 printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
3475 "state on VCPU %d after 1 s timeout\n",
3476 __func__, vcpu->vcpu_id);
3477 vmx->soft_vnmi_blocked = 0;
3b86cd99 3478 }
3b86cd99
JK
3479 }
3480
6aa8b732
AK
3481 if (exit_reason < kvm_vmx_max_exit_handlers
3482 && kvm_vmx_exit_handlers[exit_reason])
851ba692 3483 return kvm_vmx_exit_handlers[exit_reason](vcpu);
6aa8b732 3484 else {
851ba692
AK
3485 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
3486 vcpu->run->hw.hardware_exit_reason = exit_reason;
6aa8b732
AK
3487 }
3488 return 0;
3489}
3490
95ba8273 3491static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6e5d865c 3492{
95ba8273 3493 if (irr == -1 || tpr < irr) {
6e5d865c
YS
3494 vmcs_write32(TPR_THRESHOLD, 0);
3495 return;
3496 }
3497
95ba8273 3498 vmcs_write32(TPR_THRESHOLD, irr);
6e5d865c
YS
3499}
3500
cf393f75
AK
3501static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3502{
3503 u32 exit_intr_info;
7b4a25cb 3504 u32 idt_vectoring_info = vmx->idt_vectoring_info;
cf393f75
AK
3505 bool unblock_nmi;
3506 u8 vector;
668f612f
AK
3507 int type;
3508 bool idtv_info_valid;
cf393f75
AK
3509
3510 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
20f65983 3511
a0861c02
AK
3512 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
3513
3514 /* Handle machine checks before interrupts are enabled */
3515 if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
3516 || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
3517 && is_machine_check(exit_intr_info)))
3518 kvm_machine_check();
3519
20f65983
GN
3520 /* We need to handle NMIs before interrupts are enabled */
3521 if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
229456fc 3522 (exit_intr_info & INTR_INFO_VALID_MASK))
20f65983 3523 asm("int $2");
20f65983
GN
3524
3525 idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3526
cf393f75
AK
3527 if (cpu_has_virtual_nmis()) {
3528 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
3529 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
3530 /*
7b4a25cb 3531 * SDM 3: 27.7.1.2 (September 2008)
cf393f75
AK
3532 * Re-set bit "block by NMI" before VM entry if vmexit caused by
3533 * a guest IRET fault.
7b4a25cb
GN
3534 * SDM 3: 23.2.2 (September 2008)
3535 * Bit 12 is undefined in any of the following cases:
3536 * If the VM exit sets the valid bit in the IDT-vectoring
3537 * information field.
3538 * If the VM exit is due to a double fault.
cf393f75 3539 */
7b4a25cb
GN
3540 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
3541 vector != DF_VECTOR && !idtv_info_valid)
cf393f75
AK
3542 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3543 GUEST_INTR_STATE_NMI);
3b86cd99
JK
3544 } else if (unlikely(vmx->soft_vnmi_blocked))
3545 vmx->vnmi_blocked_time +=
3546 ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
668f612f 3547
37b96e98
GN
3548 vmx->vcpu.arch.nmi_injected = false;
3549 kvm_clear_exception_queue(&vmx->vcpu);
3550 kvm_clear_interrupt_queue(&vmx->vcpu);
3551
3552 if (!idtv_info_valid)
3553 return;
3554
668f612f
AK
3555 vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
3556 type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
37b96e98 3557
64a7ec06 3558 switch (type) {
37b96e98
GN
3559 case INTR_TYPE_NMI_INTR:
3560 vmx->vcpu.arch.nmi_injected = true;
668f612f 3561 /*
7b4a25cb 3562 * SDM 3: 27.7.1.2 (September 2008)
37b96e98
GN
3563 * Clear bit "block by NMI" before VM entry if a NMI
3564 * delivery faulted.
668f612f 3565 */
37b96e98
GN
3566 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3567 GUEST_INTR_STATE_NMI);
3568 break;
37b96e98 3569 case INTR_TYPE_SOFT_EXCEPTION:
66fd3f7f
GN
3570 vmx->vcpu.arch.event_exit_inst_len =
3571 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3572 /* fall through */
3573 case INTR_TYPE_HARD_EXCEPTION:
35920a35 3574 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
37b96e98
GN
3575 u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
3576 kvm_queue_exception_e(&vmx->vcpu, vector, err);
35920a35
AK
3577 } else
3578 kvm_queue_exception(&vmx->vcpu, vector);
37b96e98 3579 break;
66fd3f7f
GN
3580 case INTR_TYPE_SOFT_INTR:
3581 vmx->vcpu.arch.event_exit_inst_len =
3582 vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3583 /* fall through */
37b96e98 3584 case INTR_TYPE_EXT_INTR:
66fd3f7f
GN
3585 kvm_queue_interrupt(&vmx->vcpu, vector,
3586 type == INTR_TYPE_SOFT_INTR);
37b96e98
GN
3587 break;
3588 default:
3589 break;
f7d9238f 3590 }
cf393f75
AK
3591}
3592
9c8cba37
AK
3593/*
3594 * Failure to inject an interrupt should give us the information
3595 * in IDT_VECTORING_INFO_FIELD. However, if the failure occurs
3596 * when fetching the interrupt redirection bitmap in the real-mode
3597 * tss, this doesn't happen. So we do it ourselves.
3598 */
3599static void fixup_rmode_irq(struct vcpu_vmx *vmx)
3600{
3601 vmx->rmode.irq.pending = 0;
5fdbf976 3602 if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
9c8cba37 3603 return;
5fdbf976 3604 kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
9c8cba37
AK
3605 if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
3606 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
3607 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
3608 return;
3609 }
3610 vmx->idt_vectoring_info =
3611 VECTORING_INFO_VALID_MASK
3612 | INTR_TYPE_EXT_INTR
3613 | vmx->rmode.irq.vector;
3614}
3615
c801949d
AK
3616#ifdef CONFIG_X86_64
3617#define R "r"
3618#define Q "q"
3619#else
3620#define R "e"
3621#define Q "l"
3622#endif
3623
851ba692 3624static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
6aa8b732 3625{
a2fa3e9f 3626 struct vcpu_vmx *vmx = to_vmx(vcpu);
e6adf283 3627
8f5d549f
AK
3628 if (enable_ept && is_paging(vcpu)) {
3629 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3630 ept_load_pdptrs(vcpu);
3631 }
3b86cd99
JK
3632 /* Record the guest's net vcpu time for enforced NMI injections. */
3633 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3634 vmx->entry_time = ktime_get();
3635
80ced186
MG
3636 /* Don't enter VMX if guest state is invalid, let the exit handler
3637 start emulation until we arrive back to a valid state */
3638 if (vmx->emulation_required && emulate_invalid_guest_state)
a89a8fb9 3639 return;
a89a8fb9 3640
5fdbf976
MT
3641 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
3642 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
3643 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3644 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3645
787ff736
GN
3646 /* When single-stepping over STI and MOV SS, we must clear the
3647 * corresponding interruptibility bits in the guest state. Otherwise
3648 * vmentry fails as it then expects bit 14 (BS) in pending debug
3649 * exceptions being set, but that's not correct for the guest debugging
3650 * case. */
3651 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3652 vmx_set_interrupt_shadow(vcpu, 0);
3653
e6adf283
AK
3654 /*
3655 * Loading guest fpu may have cleared host cr0.ts
3656 */
3657 vmcs_writel(HOST_CR0, read_cr0());
3658
e8a48342
AK
3659 if (vcpu->arch.switch_db_regs)
3660 set_debugreg(vcpu->arch.dr6, 6);
42dbaa5a 3661
d77c26fc 3662 asm(
6aa8b732 3663 /* Store host registers */
c801949d
AK
3664 "push %%"R"dx; push %%"R"bp;"
3665 "push %%"R"cx \n\t"
313dbd49
AK
3666 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
3667 "je 1f \n\t"
3668 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
4ecac3fd 3669 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
313dbd49 3670 "1: \n\t"
d3edefc0
AK
3671 /* Reload cr2 if changed */
3672 "mov %c[cr2](%0), %%"R"ax \n\t"
3673 "mov %%cr2, %%"R"dx \n\t"
3674 "cmp %%"R"ax, %%"R"dx \n\t"
3675 "je 2f \n\t"
3676 "mov %%"R"ax, %%cr2 \n\t"
3677 "2: \n\t"
6aa8b732 3678 /* Check if vmlaunch of vmresume is needed */
e08aa78a 3679 "cmpl $0, %c[launched](%0) \n\t"
6aa8b732 3680 /* Load guest registers. Don't clobber flags. */
c801949d
AK
3681 "mov %c[rax](%0), %%"R"ax \n\t"
3682 "mov %c[rbx](%0), %%"R"bx \n\t"
3683 "mov %c[rdx](%0), %%"R"dx \n\t"
3684 "mov %c[rsi](%0), %%"R"si \n\t"
3685 "mov %c[rdi](%0), %%"R"di \n\t"
3686 "mov %c[rbp](%0), %%"R"bp \n\t"
05b3e0c2 3687#ifdef CONFIG_X86_64
e08aa78a
AK
3688 "mov %c[r8](%0), %%r8 \n\t"
3689 "mov %c[r9](%0), %%r9 \n\t"
3690 "mov %c[r10](%0), %%r10 \n\t"
3691 "mov %c[r11](%0), %%r11 \n\t"
3692 "mov %c[r12](%0), %%r12 \n\t"
3693 "mov %c[r13](%0), %%r13 \n\t"
3694 "mov %c[r14](%0), %%r14 \n\t"
3695 "mov %c[r15](%0), %%r15 \n\t"
6aa8b732 3696#endif
c801949d
AK
3697 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
3698
6aa8b732 3699 /* Enter guest mode */
cd2276a7 3700 "jne .Llaunched \n\t"
4ecac3fd 3701 __ex(ASM_VMX_VMLAUNCH) "\n\t"
cd2276a7 3702 "jmp .Lkvm_vmx_return \n\t"
4ecac3fd 3703 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
cd2276a7 3704 ".Lkvm_vmx_return: "
6aa8b732 3705 /* Save guest registers, load host registers, keep flags */
c801949d
AK
3706 "xchg %0, (%%"R"sp) \n\t"
3707 "mov %%"R"ax, %c[rax](%0) \n\t"
3708 "mov %%"R"bx, %c[rbx](%0) \n\t"
3709 "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
3710 "mov %%"R"dx, %c[rdx](%0) \n\t"
3711 "mov %%"R"si, %c[rsi](%0) \n\t"
3712 "mov %%"R"di, %c[rdi](%0) \n\t"
3713 "mov %%"R"bp, %c[rbp](%0) \n\t"
05b3e0c2 3714#ifdef CONFIG_X86_64
e08aa78a
AK
3715 "mov %%r8, %c[r8](%0) \n\t"
3716 "mov %%r9, %c[r9](%0) \n\t"
3717 "mov %%r10, %c[r10](%0) \n\t"
3718 "mov %%r11, %c[r11](%0) \n\t"
3719 "mov %%r12, %c[r12](%0) \n\t"
3720 "mov %%r13, %c[r13](%0) \n\t"
3721 "mov %%r14, %c[r14](%0) \n\t"
3722 "mov %%r15, %c[r15](%0) \n\t"
6aa8b732 3723#endif
c801949d
AK
3724 "mov %%cr2, %%"R"ax \n\t"
3725 "mov %%"R"ax, %c[cr2](%0) \n\t"
3726
3727 "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t"
e08aa78a
AK
3728 "setbe %c[fail](%0) \n\t"
3729 : : "c"(vmx), "d"((unsigned long)HOST_RSP),
3730 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
3731 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
313dbd49 3732 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
ad312c7c
ZX
3733 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
3734 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
3735 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
3736 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
3737 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
3738 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
3739 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
05b3e0c2 3740#ifdef CONFIG_X86_64
ad312c7c
ZX
3741 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
3742 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
3743 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
3744 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
3745 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
3746 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
3747 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
3748 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
6aa8b732 3749#endif
ad312c7c 3750 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
c2036300 3751 : "cc", "memory"
c801949d 3752 , R"bx", R"di", R"si"
c2036300 3753#ifdef CONFIG_X86_64
c2036300
LV
3754 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3755#endif
3756 );
6aa8b732 3757
6de4f3ad
AK
3758 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
3759 | (1 << VCPU_EXREG_PDPTR));
5fdbf976
MT
3760 vcpu->arch.regs_dirty = 0;
3761
e8a48342
AK
3762 if (vcpu->arch.switch_db_regs)
3763 get_debugreg(vcpu->arch.dr6, 6);
42dbaa5a 3764
1155f76a 3765 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
9c8cba37
AK
3766 if (vmx->rmode.irq.pending)
3767 fixup_rmode_irq(vmx);
1155f76a 3768
d77c26fc 3769 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
15ad7146 3770 vmx->launched = 1;
1b6269db 3771
cf393f75 3772 vmx_complete_interrupts(vmx);
6aa8b732
AK
3773}
3774
c801949d
AK
3775#undef R
3776#undef Q
3777
6aa8b732
AK
3778static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
3779{
a2fa3e9f
GH
3780 struct vcpu_vmx *vmx = to_vmx(vcpu);
3781
3782 if (vmx->vmcs) {
543e4243 3783 vcpu_clear(vmx);
a2fa3e9f
GH
3784 free_vmcs(vmx->vmcs);
3785 vmx->vmcs = NULL;
6aa8b732
AK
3786 }
3787}
3788
3789static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
3790{
fb3f0f51
RR
3791 struct vcpu_vmx *vmx = to_vmx(vcpu);
3792
2384d2b3
SY
3793 spin_lock(&vmx_vpid_lock);
3794 if (vmx->vpid != 0)
3795 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3796 spin_unlock(&vmx_vpid_lock);
6aa8b732 3797 vmx_free_vmcs(vcpu);
fb3f0f51
RR
3798 kfree(vmx->guest_msrs);
3799 kvm_vcpu_uninit(vcpu);
a4770347 3800 kmem_cache_free(kvm_vcpu_cache, vmx);
6aa8b732
AK
3801}
3802
fb3f0f51 3803static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 3804{
fb3f0f51 3805 int err;
c16f862d 3806 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
15ad7146 3807 int cpu;
6aa8b732 3808
a2fa3e9f 3809 if (!vmx)
fb3f0f51
RR
3810 return ERR_PTR(-ENOMEM);
3811
2384d2b3
SY
3812 allocate_vpid(vmx);
3813
fb3f0f51
RR
3814 err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3815 if (err)
3816 goto free_vcpu;
965b58a5 3817
a2fa3e9f 3818 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
fb3f0f51
RR
3819 if (!vmx->guest_msrs) {
3820 err = -ENOMEM;
3821 goto uninit_vcpu;
3822 }
965b58a5 3823
a2fa3e9f
GH
3824 vmx->vmcs = alloc_vmcs();
3825 if (!vmx->vmcs)
fb3f0f51 3826 goto free_msrs;
a2fa3e9f
GH
3827
3828 vmcs_clear(vmx->vmcs);
3829
15ad7146
AK
3830 cpu = get_cpu();
3831 vmx_vcpu_load(&vmx->vcpu, cpu);
8b9cf98c 3832 err = vmx_vcpu_setup(vmx);
fb3f0f51 3833 vmx_vcpu_put(&vmx->vcpu);
15ad7146 3834 put_cpu();
fb3f0f51
RR
3835 if (err)
3836 goto free_vmcs;
5e4a0b3c
MT
3837 if (vm_need_virtualize_apic_accesses(kvm))
3838 if (alloc_apic_access_page(kvm) != 0)
3839 goto free_vmcs;
fb3f0f51 3840
b927a3ce
SY
3841 if (enable_ept) {
3842 if (!kvm->arch.ept_identity_map_addr)
3843 kvm->arch.ept_identity_map_addr =
3844 VMX_EPT_IDENTITY_PAGETABLE_ADDR;
b7ebfb05
SY
3845 if (alloc_identity_pagetable(kvm) != 0)
3846 goto free_vmcs;
b927a3ce 3847 }
b7ebfb05 3848
fb3f0f51
RR
3849 return &vmx->vcpu;
3850
3851free_vmcs:
3852 free_vmcs(vmx->vmcs);
3853free_msrs:
fb3f0f51
RR
3854 kfree(vmx->guest_msrs);
3855uninit_vcpu:
3856 kvm_vcpu_uninit(&vmx->vcpu);
3857free_vcpu:
a4770347 3858 kmem_cache_free(kvm_vcpu_cache, vmx);
fb3f0f51 3859 return ERR_PTR(err);
6aa8b732
AK
3860}
3861
002c7f7c
YS
3862static void __init vmx_check_processor_compat(void *rtn)
3863{
3864 struct vmcs_config vmcs_conf;
3865
3866 *(int *)rtn = 0;
3867 if (setup_vmcs_config(&vmcs_conf) < 0)
3868 *(int *)rtn = -EIO;
3869 if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
3870 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
3871 smp_processor_id());
3872 *(int *)rtn = -EIO;
3873 }
3874}
3875
67253af5
SY
3876static int get_ept_level(void)
3877{
3878 return VMX_EPT_DEFAULT_GAW + 1;
3879}
3880
4b12f0de 3881static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
64d4d521 3882{
4b12f0de
SY
3883 u64 ret;
3884
522c68c4
SY
3885 /* For VT-d and EPT combination
3886 * 1. MMIO: always map as UC
3887 * 2. EPT with VT-d:
3888 * a. VT-d without snooping control feature: can't guarantee the
3889 * result, try to trust guest.
3890 * b. VT-d with snooping control feature: snooping control feature of
3891 * VT-d engine can guarantee the cache correctness. Just set it
3892 * to WB to keep consistent with host. So the same as item 3.
3893 * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
3894 * consistent with host MTRR
3895 */
4b12f0de
SY
3896 if (is_mmio)
3897 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
522c68c4
SY
3898 else if (vcpu->kvm->arch.iommu_domain &&
3899 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
3900 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
3901 VMX_EPT_MT_EPTE_SHIFT;
4b12f0de 3902 else
522c68c4
SY
3903 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
3904 | VMX_EPT_IGMT_BIT;
4b12f0de
SY
3905
3906 return ret;
64d4d521
SY
3907}
3908
229456fc
MT
3909static const struct trace_print_flags vmx_exit_reasons_str[] = {
3910 { EXIT_REASON_EXCEPTION_NMI, "exception" },
3911 { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
3912 { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
3913 { EXIT_REASON_NMI_WINDOW, "nmi_window" },
3914 { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
3915 { EXIT_REASON_CR_ACCESS, "cr_access" },
3916 { EXIT_REASON_DR_ACCESS, "dr_access" },
3917 { EXIT_REASON_CPUID, "cpuid" },
3918 { EXIT_REASON_MSR_READ, "rdmsr" },
3919 { EXIT_REASON_MSR_WRITE, "wrmsr" },
3920 { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
3921 { EXIT_REASON_HLT, "halt" },
3922 { EXIT_REASON_INVLPG, "invlpg" },
3923 { EXIT_REASON_VMCALL, "hypercall" },
3924 { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
3925 { EXIT_REASON_APIC_ACCESS, "apic_access" },
3926 { EXIT_REASON_WBINVD, "wbinvd" },
3927 { EXIT_REASON_TASK_SWITCH, "task_switch" },
3928 { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
3929 { -1, NULL }
3930};
3931
344f414f
JR
3932static bool vmx_gb_page_enable(void)
3933{
3934 return false;
3935}
3936
cbdd1bea 3937static struct kvm_x86_ops vmx_x86_ops = {
6aa8b732
AK
3938 .cpu_has_kvm_support = cpu_has_kvm_support,
3939 .disabled_by_bios = vmx_disabled_by_bios,
3940 .hardware_setup = hardware_setup,
3941 .hardware_unsetup = hardware_unsetup,
002c7f7c 3942 .check_processor_compatibility = vmx_check_processor_compat,
6aa8b732
AK
3943 .hardware_enable = hardware_enable,
3944 .hardware_disable = hardware_disable,
04547156 3945 .cpu_has_accelerated_tpr = report_flexpriority,
6aa8b732
AK
3946
3947 .vcpu_create = vmx_create_vcpu,
3948 .vcpu_free = vmx_free_vcpu,
04d2cc77 3949 .vcpu_reset = vmx_vcpu_reset,
6aa8b732 3950
04d2cc77 3951 .prepare_guest_switch = vmx_save_host_state,
6aa8b732
AK
3952 .vcpu_load = vmx_vcpu_load,
3953 .vcpu_put = vmx_vcpu_put,
3954
3955 .set_guest_debug = set_guest_debug,
3956 .get_msr = vmx_get_msr,
3957 .set_msr = vmx_set_msr,
3958 .get_segment_base = vmx_get_segment_base,
3959 .get_segment = vmx_get_segment,
3960 .set_segment = vmx_set_segment,
2e4d2653 3961 .get_cpl = vmx_get_cpl,
6aa8b732 3962 .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
25c4c276 3963 .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
6aa8b732 3964 .set_cr0 = vmx_set_cr0,
6aa8b732
AK
3965 .set_cr3 = vmx_set_cr3,
3966 .set_cr4 = vmx_set_cr4,
6aa8b732 3967 .set_efer = vmx_set_efer,
6aa8b732
AK
3968 .get_idt = vmx_get_idt,
3969 .set_idt = vmx_set_idt,
3970 .get_gdt = vmx_get_gdt,
3971 .set_gdt = vmx_set_gdt,
5fdbf976 3972 .cache_reg = vmx_cache_reg,
6aa8b732
AK
3973 .get_rflags = vmx_get_rflags,
3974 .set_rflags = vmx_set_rflags,
3975
3976 .tlb_flush = vmx_flush_tlb,
6aa8b732 3977
6aa8b732 3978 .run = vmx_vcpu_run,
6062d012 3979 .handle_exit = vmx_handle_exit,
6aa8b732 3980 .skip_emulated_instruction = skip_emulated_instruction,
2809f5d2
GC
3981 .set_interrupt_shadow = vmx_set_interrupt_shadow,
3982 .get_interrupt_shadow = vmx_get_interrupt_shadow,
102d8325 3983 .patch_hypercall = vmx_patch_hypercall,
2a8067f1 3984 .set_irq = vmx_inject_irq,
95ba8273 3985 .set_nmi = vmx_inject_nmi,
298101da 3986 .queue_exception = vmx_queue_exception,
78646121 3987 .interrupt_allowed = vmx_interrupt_allowed,
95ba8273
GN
3988 .nmi_allowed = vmx_nmi_allowed,
3989 .enable_nmi_window = enable_nmi_window,
3990 .enable_irq_window = enable_irq_window,
3991 .update_cr8_intercept = update_cr8_intercept,
95ba8273 3992
cbc94022 3993 .set_tss_addr = vmx_set_tss_addr,
67253af5 3994 .get_tdp_level = get_ept_level,
4b12f0de 3995 .get_mt_mask = vmx_get_mt_mask,
229456fc
MT
3996
3997 .exit_reasons_str = vmx_exit_reasons_str,
344f414f 3998 .gb_page_enable = vmx_gb_page_enable,
6aa8b732
AK
3999};
4000
4001static int __init vmx_init(void)
4002{
26bb0981
AK
4003 int r, i;
4004
4005 rdmsrl_safe(MSR_EFER, &host_efer);
4006
4007 for (i = 0; i < NR_VMX_MSR; ++i)
4008 kvm_define_shared_msr(i, vmx_msr_index[i]);
fdef3ad1 4009
3e7c73e9 4010 vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
fdef3ad1
HQ
4011 if (!vmx_io_bitmap_a)
4012 return -ENOMEM;
4013
3e7c73e9 4014 vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
fdef3ad1
HQ
4015 if (!vmx_io_bitmap_b) {
4016 r = -ENOMEM;
4017 goto out;
4018 }
4019
5897297b
AK
4020 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
4021 if (!vmx_msr_bitmap_legacy) {
25c5f225
SY
4022 r = -ENOMEM;
4023 goto out1;
4024 }
4025
5897297b
AK
4026 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
4027 if (!vmx_msr_bitmap_longmode) {
4028 r = -ENOMEM;
4029 goto out2;
4030 }
4031
fdef3ad1
HQ
4032 /*
4033 * Allow direct access to the PC debug port (it is often used for I/O
4034 * delays, but the vmexits simply slow things down).
4035 */
3e7c73e9
AK
4036 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
4037 clear_bit(0x80, vmx_io_bitmap_a);
fdef3ad1 4038
3e7c73e9 4039 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
fdef3ad1 4040
5897297b
AK
4041 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
4042 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
25c5f225 4043
2384d2b3
SY
4044 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
4045
cb498ea2 4046 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
fdef3ad1 4047 if (r)
5897297b 4048 goto out3;
25c5f225 4049
5897297b
AK
4050 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
4051 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
4052 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
4053 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
4054 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
4055 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
fdef3ad1 4056
089d034e 4057 if (enable_ept) {
1439442c 4058 bypass_guest_pf = 0;
5fdbcb9d 4059 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
2aaf69dc 4060 VMX_EPT_WRITABLE_MASK);
534e38b4 4061 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
4b12f0de 4062 VMX_EPT_EXECUTABLE_MASK);
5fdbcb9d
SY
4063 kvm_enable_tdp();
4064 } else
4065 kvm_disable_tdp();
1439442c 4066
c7addb90
AK
4067 if (bypass_guest_pf)
4068 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4069
fdef3ad1
HQ
4070 return 0;
4071
5897297b
AK
4072out3:
4073 free_page((unsigned long)vmx_msr_bitmap_longmode);
25c5f225 4074out2:
5897297b 4075 free_page((unsigned long)vmx_msr_bitmap_legacy);
fdef3ad1 4076out1:
3e7c73e9 4077 free_page((unsigned long)vmx_io_bitmap_b);
fdef3ad1 4078out:
3e7c73e9 4079 free_page((unsigned long)vmx_io_bitmap_a);
fdef3ad1 4080 return r;
6aa8b732
AK
4081}
4082
4083static void __exit vmx_exit(void)
4084{
5897297b
AK
4085 free_page((unsigned long)vmx_msr_bitmap_legacy);
4086 free_page((unsigned long)vmx_msr_bitmap_longmode);
3e7c73e9
AK
4087 free_page((unsigned long)vmx_io_bitmap_b);
4088 free_page((unsigned long)vmx_io_bitmap_a);
fdef3ad1 4089
cb498ea2 4090 kvm_exit();
6aa8b732
AK
4091}
4092
4093module_init(vmx_init)
4094module_exit(vmx_exit)