| 1 | /* |
| 2 | * Kernel-based Virtual Machine driver for Linux |
| 3 | * |
| 4 | * This module enables machines with Intel VT-x extensions to run virtual |
| 5 | * machines without emulation or binary translation. |
| 6 | * |
| 7 | * Copyright (C) 2006 Qumranet, Inc. |
| 8 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 9 | * |
| 10 | * Authors: |
| 11 | * Avi Kivity <avi@qumranet.com> |
| 12 | * Yaniv Kamay <yaniv@qumranet.com> |
| 13 | * |
| 14 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 15 | * the COPYING file in the top-level directory. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include "irq.h" |
| 20 | #include "mmu.h" |
| 21 | |
| 22 | #include <linux/kvm_host.h> |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/kernel.h> |
| 25 | #include <linux/mm.h> |
| 26 | #include <linux/highmem.h> |
| 27 | #include <linux/sched.h> |
| 28 | #include <linux/moduleparam.h> |
| 29 | #include <linux/ftrace_event.h> |
| 30 | #include <linux/slab.h> |
| 31 | #include <linux/tboot.h> |
| 32 | #include "kvm_cache_regs.h" |
| 33 | #include "x86.h" |
| 34 | |
| 35 | #include <asm/io.h> |
| 36 | #include <asm/desc.h> |
| 37 | #include <asm/vmx.h> |
| 38 | #include <asm/virtext.h> |
| 39 | #include <asm/mce.h> |
| 40 | #include <asm/i387.h> |
| 41 | #include <asm/xcr.h> |
| 42 | |
| 43 | #include "trace.h" |
| 44 | |
| 45 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
| 46 | |
| 47 | MODULE_AUTHOR("Qumranet"); |
| 48 | MODULE_LICENSE("GPL"); |
| 49 | |
| 50 | static int __read_mostly bypass_guest_pf = 1; |
| 51 | module_param(bypass_guest_pf, bool, S_IRUGO); |
| 52 | |
| 53 | static int __read_mostly enable_vpid = 1; |
| 54 | module_param_named(vpid, enable_vpid, bool, 0444); |
| 55 | |
| 56 | static int __read_mostly flexpriority_enabled = 1; |
| 57 | module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); |
| 58 | |
| 59 | static int __read_mostly enable_ept = 1; |
| 60 | module_param_named(ept, enable_ept, bool, S_IRUGO); |
| 61 | |
| 62 | static int __read_mostly enable_unrestricted_guest = 1; |
| 63 | module_param_named(unrestricted_guest, |
| 64 | enable_unrestricted_guest, bool, S_IRUGO); |
| 65 | |
| 66 | static int __read_mostly emulate_invalid_guest_state = 0; |
| 67 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); |
| 68 | |
| 69 | static int __read_mostly vmm_exclusive = 1; |
| 70 | module_param(vmm_exclusive, bool, S_IRUGO); |
| 71 | |
| 72 | #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \ |
| 73 | (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD) |
| 74 | #define KVM_GUEST_CR0_MASK \ |
| 75 | (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) |
| 76 | #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \ |
| 77 | (X86_CR0_WP | X86_CR0_NE) |
| 78 | #define KVM_VM_CR0_ALWAYS_ON \ |
| 79 | (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE) |
| 80 | #define KVM_CR4_GUEST_OWNED_BITS \ |
| 81 | (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \ |
| 82 | | X86_CR4_OSXMMEXCPT) |
| 83 | |
| 84 | #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE) |
| 85 | #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE) |
| 86 | |
| 87 | #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM)) |
| 88 | |
| 89 | /* |
| 90 | * These 2 parameters are used to config the controls for Pause-Loop Exiting: |
| 91 | * ple_gap: upper bound on the amount of time between two successive |
| 92 | * executions of PAUSE in a loop. Also indicate if ple enabled. |
| 93 | * According to test, this time is usually small than 41 cycles. |
| 94 | * ple_window: upper bound on the amount of time a guest is allowed to execute |
| 95 | * in a PAUSE loop. Tests indicate that most spinlocks are held for |
| 96 | * less than 2^12 cycles |
| 97 | * Time is measured based on a counter that runs at the same rate as the TSC, |
| 98 | * refer SDM volume 3b section 21.6.13 & 22.1.3. |
| 99 | */ |
| 100 | #define KVM_VMX_DEFAULT_PLE_GAP 41 |
| 101 | #define KVM_VMX_DEFAULT_PLE_WINDOW 4096 |
| 102 | static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP; |
| 103 | module_param(ple_gap, int, S_IRUGO); |
| 104 | |
| 105 | static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; |
| 106 | module_param(ple_window, int, S_IRUGO); |
| 107 | |
| 108 | #define NR_AUTOLOAD_MSRS 1 |
| 109 | |
| 110 | struct vmcs { |
| 111 | u32 revision_id; |
| 112 | u32 abort; |
| 113 | char data[0]; |
| 114 | }; |
| 115 | |
| 116 | struct shared_msr_entry { |
| 117 | unsigned index; |
| 118 | u64 data; |
| 119 | u64 mask; |
| 120 | }; |
| 121 | |
| 122 | struct vcpu_vmx { |
| 123 | struct kvm_vcpu vcpu; |
| 124 | struct list_head local_vcpus_link; |
| 125 | unsigned long host_rsp; |
| 126 | int launched; |
| 127 | u8 fail; |
| 128 | u32 exit_intr_info; |
| 129 | u32 idt_vectoring_info; |
| 130 | struct shared_msr_entry *guest_msrs; |
| 131 | int nmsrs; |
| 132 | int save_nmsrs; |
| 133 | #ifdef CONFIG_X86_64 |
| 134 | u64 msr_host_kernel_gs_base; |
| 135 | u64 msr_guest_kernel_gs_base; |
| 136 | #endif |
| 137 | struct vmcs *vmcs; |
| 138 | struct msr_autoload { |
| 139 | unsigned nr; |
| 140 | struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS]; |
| 141 | struct vmx_msr_entry host[NR_AUTOLOAD_MSRS]; |
| 142 | } msr_autoload; |
| 143 | struct { |
| 144 | int loaded; |
| 145 | u16 fs_sel, gs_sel, ldt_sel; |
| 146 | int gs_ldt_reload_needed; |
| 147 | int fs_reload_needed; |
| 148 | } host_state; |
| 149 | struct { |
| 150 | int vm86_active; |
| 151 | ulong save_rflags; |
| 152 | struct kvm_save_segment { |
| 153 | u16 selector; |
| 154 | unsigned long base; |
| 155 | u32 limit; |
| 156 | u32 ar; |
| 157 | } tr, es, ds, fs, gs; |
| 158 | } rmode; |
| 159 | int vpid; |
| 160 | bool emulation_required; |
| 161 | |
| 162 | /* Support for vnmi-less CPUs */ |
| 163 | int soft_vnmi_blocked; |
| 164 | ktime_t entry_time; |
| 165 | s64 vnmi_blocked_time; |
| 166 | u32 exit_reason; |
| 167 | |
| 168 | bool rdtscp_enabled; |
| 169 | }; |
| 170 | |
| 171 | static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) |
| 172 | { |
| 173 | return container_of(vcpu, struct vcpu_vmx, vcpu); |
| 174 | } |
| 175 | |
| 176 | static int init_rmode(struct kvm *kvm); |
| 177 | static u64 construct_eptp(unsigned long root_hpa); |
| 178 | static void kvm_cpu_vmxon(u64 addr); |
| 179 | static void kvm_cpu_vmxoff(void); |
| 180 | |
| 181 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
| 182 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
| 183 | static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu); |
| 184 | static DEFINE_PER_CPU(struct desc_ptr, host_gdt); |
| 185 | |
| 186 | static unsigned long *vmx_io_bitmap_a; |
| 187 | static unsigned long *vmx_io_bitmap_b; |
| 188 | static unsigned long *vmx_msr_bitmap_legacy; |
| 189 | static unsigned long *vmx_msr_bitmap_longmode; |
| 190 | |
| 191 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); |
| 192 | static DEFINE_SPINLOCK(vmx_vpid_lock); |
| 193 | |
| 194 | static struct vmcs_config { |
| 195 | int size; |
| 196 | int order; |
| 197 | u32 revision_id; |
| 198 | u32 pin_based_exec_ctrl; |
| 199 | u32 cpu_based_exec_ctrl; |
| 200 | u32 cpu_based_2nd_exec_ctrl; |
| 201 | u32 vmexit_ctrl; |
| 202 | u32 vmentry_ctrl; |
| 203 | } vmcs_config; |
| 204 | |
| 205 | static struct vmx_capability { |
| 206 | u32 ept; |
| 207 | u32 vpid; |
| 208 | } vmx_capability; |
| 209 | |
| 210 | #define VMX_SEGMENT_FIELD(seg) \ |
| 211 | [VCPU_SREG_##seg] = { \ |
| 212 | .selector = GUEST_##seg##_SELECTOR, \ |
| 213 | .base = GUEST_##seg##_BASE, \ |
| 214 | .limit = GUEST_##seg##_LIMIT, \ |
| 215 | .ar_bytes = GUEST_##seg##_AR_BYTES, \ |
| 216 | } |
| 217 | |
| 218 | static struct kvm_vmx_segment_field { |
| 219 | unsigned selector; |
| 220 | unsigned base; |
| 221 | unsigned limit; |
| 222 | unsigned ar_bytes; |
| 223 | } kvm_vmx_segment_fields[] = { |
| 224 | VMX_SEGMENT_FIELD(CS), |
| 225 | VMX_SEGMENT_FIELD(DS), |
| 226 | VMX_SEGMENT_FIELD(ES), |
| 227 | VMX_SEGMENT_FIELD(FS), |
| 228 | VMX_SEGMENT_FIELD(GS), |
| 229 | VMX_SEGMENT_FIELD(SS), |
| 230 | VMX_SEGMENT_FIELD(TR), |
| 231 | VMX_SEGMENT_FIELD(LDTR), |
| 232 | }; |
| 233 | |
| 234 | static u64 host_efer; |
| 235 | |
| 236 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); |
| 237 | |
| 238 | /* |
| 239 | * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it |
| 240 | * away by decrementing the array size. |
| 241 | */ |
| 242 | static const u32 vmx_msr_index[] = { |
| 243 | #ifdef CONFIG_X86_64 |
| 244 | MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, |
| 245 | #endif |
| 246 | MSR_EFER, MSR_TSC_AUX, MSR_STAR, |
| 247 | }; |
| 248 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
| 249 | |
| 250 | static inline bool is_page_fault(u32 intr_info) |
| 251 | { |
| 252 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
| 253 | INTR_INFO_VALID_MASK)) == |
| 254 | (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK); |
| 255 | } |
| 256 | |
| 257 | static inline bool is_no_device(u32 intr_info) |
| 258 | { |
| 259 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
| 260 | INTR_INFO_VALID_MASK)) == |
| 261 | (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK); |
| 262 | } |
| 263 | |
| 264 | static inline bool is_invalid_opcode(u32 intr_info) |
| 265 | { |
| 266 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
| 267 | INTR_INFO_VALID_MASK)) == |
| 268 | (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK); |
| 269 | } |
| 270 | |
| 271 | static inline bool is_external_interrupt(u32 intr_info) |
| 272 | { |
| 273 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) |
| 274 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
| 275 | } |
| 276 | |
| 277 | static inline bool is_machine_check(u32 intr_info) |
| 278 | { |
| 279 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
| 280 | INTR_INFO_VALID_MASK)) == |
| 281 | (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); |
| 282 | } |
| 283 | |
| 284 | static inline bool cpu_has_vmx_msr_bitmap(void) |
| 285 | { |
| 286 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; |
| 287 | } |
| 288 | |
| 289 | static inline bool cpu_has_vmx_tpr_shadow(void) |
| 290 | { |
| 291 | return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW; |
| 292 | } |
| 293 | |
| 294 | static inline bool vm_need_tpr_shadow(struct kvm *kvm) |
| 295 | { |
| 296 | return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)); |
| 297 | } |
| 298 | |
| 299 | static inline bool cpu_has_secondary_exec_ctrls(void) |
| 300 | { |
| 301 | return vmcs_config.cpu_based_exec_ctrl & |
| 302 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
| 303 | } |
| 304 | |
| 305 | static inline bool cpu_has_vmx_virtualize_apic_accesses(void) |
| 306 | { |
| 307 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 308 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
| 309 | } |
| 310 | |
| 311 | static inline bool cpu_has_vmx_flexpriority(void) |
| 312 | { |
| 313 | return cpu_has_vmx_tpr_shadow() && |
| 314 | cpu_has_vmx_virtualize_apic_accesses(); |
| 315 | } |
| 316 | |
| 317 | static inline bool cpu_has_vmx_ept_execute_only(void) |
| 318 | { |
| 319 | return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT; |
| 320 | } |
| 321 | |
| 322 | static inline bool cpu_has_vmx_eptp_uncacheable(void) |
| 323 | { |
| 324 | return vmx_capability.ept & VMX_EPTP_UC_BIT; |
| 325 | } |
| 326 | |
| 327 | static inline bool cpu_has_vmx_eptp_writeback(void) |
| 328 | { |
| 329 | return vmx_capability.ept & VMX_EPTP_WB_BIT; |
| 330 | } |
| 331 | |
| 332 | static inline bool cpu_has_vmx_ept_2m_page(void) |
| 333 | { |
| 334 | return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT; |
| 335 | } |
| 336 | |
| 337 | static inline bool cpu_has_vmx_ept_1g_page(void) |
| 338 | { |
| 339 | return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT; |
| 340 | } |
| 341 | |
| 342 | static inline bool cpu_has_vmx_ept_4levels(void) |
| 343 | { |
| 344 | return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT; |
| 345 | } |
| 346 | |
| 347 | static inline bool cpu_has_vmx_invept_individual_addr(void) |
| 348 | { |
| 349 | return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT; |
| 350 | } |
| 351 | |
| 352 | static inline bool cpu_has_vmx_invept_context(void) |
| 353 | { |
| 354 | return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT; |
| 355 | } |
| 356 | |
| 357 | static inline bool cpu_has_vmx_invept_global(void) |
| 358 | { |
| 359 | return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT; |
| 360 | } |
| 361 | |
| 362 | static inline bool cpu_has_vmx_invvpid_single(void) |
| 363 | { |
| 364 | return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT; |
| 365 | } |
| 366 | |
| 367 | static inline bool cpu_has_vmx_invvpid_global(void) |
| 368 | { |
| 369 | return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; |
| 370 | } |
| 371 | |
| 372 | static inline bool cpu_has_vmx_ept(void) |
| 373 | { |
| 374 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 375 | SECONDARY_EXEC_ENABLE_EPT; |
| 376 | } |
| 377 | |
| 378 | static inline bool cpu_has_vmx_unrestricted_guest(void) |
| 379 | { |
| 380 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 381 | SECONDARY_EXEC_UNRESTRICTED_GUEST; |
| 382 | } |
| 383 | |
| 384 | static inline bool cpu_has_vmx_ple(void) |
| 385 | { |
| 386 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 387 | SECONDARY_EXEC_PAUSE_LOOP_EXITING; |
| 388 | } |
| 389 | |
| 390 | static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm) |
| 391 | { |
| 392 | return flexpriority_enabled && irqchip_in_kernel(kvm); |
| 393 | } |
| 394 | |
| 395 | static inline bool cpu_has_vmx_vpid(void) |
| 396 | { |
| 397 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 398 | SECONDARY_EXEC_ENABLE_VPID; |
| 399 | } |
| 400 | |
| 401 | static inline bool cpu_has_vmx_rdtscp(void) |
| 402 | { |
| 403 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 404 | SECONDARY_EXEC_RDTSCP; |
| 405 | } |
| 406 | |
| 407 | static inline bool cpu_has_virtual_nmis(void) |
| 408 | { |
| 409 | return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS; |
| 410 | } |
| 411 | |
| 412 | static inline bool cpu_has_vmx_wbinvd_exit(void) |
| 413 | { |
| 414 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| 415 | SECONDARY_EXEC_WBINVD_EXITING; |
| 416 | } |
| 417 | |
| 418 | static inline bool report_flexpriority(void) |
| 419 | { |
| 420 | return flexpriority_enabled; |
| 421 | } |
| 422 | |
| 423 | static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) |
| 424 | { |
| 425 | int i; |
| 426 | |
| 427 | for (i = 0; i < vmx->nmsrs; ++i) |
| 428 | if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) |
| 429 | return i; |
| 430 | return -1; |
| 431 | } |
| 432 | |
| 433 | static inline void __invvpid(int ext, u16 vpid, gva_t gva) |
| 434 | { |
| 435 | struct { |
| 436 | u64 vpid : 16; |
| 437 | u64 rsvd : 48; |
| 438 | u64 gva; |
| 439 | } operand = { vpid, 0, gva }; |
| 440 | |
| 441 | asm volatile (__ex(ASM_VMX_INVVPID) |
| 442 | /* CF==1 or ZF==1 --> rc = -1 */ |
| 443 | "; ja 1f ; ud2 ; 1:" |
| 444 | : : "a"(&operand), "c"(ext) : "cc", "memory"); |
| 445 | } |
| 446 | |
| 447 | static inline void __invept(int ext, u64 eptp, gpa_t gpa) |
| 448 | { |
| 449 | struct { |
| 450 | u64 eptp, gpa; |
| 451 | } operand = {eptp, gpa}; |
| 452 | |
| 453 | asm volatile (__ex(ASM_VMX_INVEPT) |
| 454 | /* CF==1 or ZF==1 --> rc = -1 */ |
| 455 | "; ja 1f ; ud2 ; 1:\n" |
| 456 | : : "a" (&operand), "c" (ext) : "cc", "memory"); |
| 457 | } |
| 458 | |
| 459 | static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) |
| 460 | { |
| 461 | int i; |
| 462 | |
| 463 | i = __find_msr_index(vmx, msr); |
| 464 | if (i >= 0) |
| 465 | return &vmx->guest_msrs[i]; |
| 466 | return NULL; |
| 467 | } |
| 468 | |
| 469 | static void vmcs_clear(struct vmcs *vmcs) |
| 470 | { |
| 471 | u64 phys_addr = __pa(vmcs); |
| 472 | u8 error; |
| 473 | |
| 474 | asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0" |
| 475 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
| 476 | : "cc", "memory"); |
| 477 | if (error) |
| 478 | printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n", |
| 479 | vmcs, phys_addr); |
| 480 | } |
| 481 | |
| 482 | static void vmcs_load(struct vmcs *vmcs) |
| 483 | { |
| 484 | u64 phys_addr = __pa(vmcs); |
| 485 | u8 error; |
| 486 | |
| 487 | asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0" |
| 488 | : "=g"(error) : "a"(&phys_addr), "m"(phys_addr) |
| 489 | : "cc", "memory"); |
| 490 | if (error) |
| 491 | printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n", |
| 492 | vmcs, phys_addr); |
| 493 | } |
| 494 | |
| 495 | static void __vcpu_clear(void *arg) |
| 496 | { |
| 497 | struct vcpu_vmx *vmx = arg; |
| 498 | int cpu = raw_smp_processor_id(); |
| 499 | |
| 500 | if (vmx->vcpu.cpu == cpu) |
| 501 | vmcs_clear(vmx->vmcs); |
| 502 | if (per_cpu(current_vmcs, cpu) == vmx->vmcs) |
| 503 | per_cpu(current_vmcs, cpu) = NULL; |
| 504 | list_del(&vmx->local_vcpus_link); |
| 505 | vmx->vcpu.cpu = -1; |
| 506 | vmx->launched = 0; |
| 507 | } |
| 508 | |
| 509 | static void vcpu_clear(struct vcpu_vmx *vmx) |
| 510 | { |
| 511 | if (vmx->vcpu.cpu == -1) |
| 512 | return; |
| 513 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); |
| 514 | } |
| 515 | |
| 516 | static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx) |
| 517 | { |
| 518 | if (vmx->vpid == 0) |
| 519 | return; |
| 520 | |
| 521 | if (cpu_has_vmx_invvpid_single()) |
| 522 | __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0); |
| 523 | } |
| 524 | |
| 525 | static inline void vpid_sync_vcpu_global(void) |
| 526 | { |
| 527 | if (cpu_has_vmx_invvpid_global()) |
| 528 | __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0); |
| 529 | } |
| 530 | |
| 531 | static inline void vpid_sync_context(struct vcpu_vmx *vmx) |
| 532 | { |
| 533 | if (cpu_has_vmx_invvpid_single()) |
| 534 | vpid_sync_vcpu_single(vmx); |
| 535 | else |
| 536 | vpid_sync_vcpu_global(); |
| 537 | } |
| 538 | |
| 539 | static inline void ept_sync_global(void) |
| 540 | { |
| 541 | if (cpu_has_vmx_invept_global()) |
| 542 | __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); |
| 543 | } |
| 544 | |
| 545 | static inline void ept_sync_context(u64 eptp) |
| 546 | { |
| 547 | if (enable_ept) { |
| 548 | if (cpu_has_vmx_invept_context()) |
| 549 | __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); |
| 550 | else |
| 551 | ept_sync_global(); |
| 552 | } |
| 553 | } |
| 554 | |
| 555 | static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa) |
| 556 | { |
| 557 | if (enable_ept) { |
| 558 | if (cpu_has_vmx_invept_individual_addr()) |
| 559 | __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR, |
| 560 | eptp, gpa); |
| 561 | else |
| 562 | ept_sync_context(eptp); |
| 563 | } |
| 564 | } |
| 565 | |
| 566 | static unsigned long vmcs_readl(unsigned long field) |
| 567 | { |
| 568 | unsigned long value; |
| 569 | |
| 570 | asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX) |
| 571 | : "=a"(value) : "d"(field) : "cc"); |
| 572 | return value; |
| 573 | } |
| 574 | |
| 575 | static u16 vmcs_read16(unsigned long field) |
| 576 | { |
| 577 | return vmcs_readl(field); |
| 578 | } |
| 579 | |
| 580 | static u32 vmcs_read32(unsigned long field) |
| 581 | { |
| 582 | return vmcs_readl(field); |
| 583 | } |
| 584 | |
| 585 | static u64 vmcs_read64(unsigned long field) |
| 586 | { |
| 587 | #ifdef CONFIG_X86_64 |
| 588 | return vmcs_readl(field); |
| 589 | #else |
| 590 | return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32); |
| 591 | #endif |
| 592 | } |
| 593 | |
| 594 | static noinline void vmwrite_error(unsigned long field, unsigned long value) |
| 595 | { |
| 596 | printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n", |
| 597 | field, value, vmcs_read32(VM_INSTRUCTION_ERROR)); |
| 598 | dump_stack(); |
| 599 | } |
| 600 | |
| 601 | static void vmcs_writel(unsigned long field, unsigned long value) |
| 602 | { |
| 603 | u8 error; |
| 604 | |
| 605 | asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0" |
| 606 | : "=q"(error) : "a"(value), "d"(field) : "cc"); |
| 607 | if (unlikely(error)) |
| 608 | vmwrite_error(field, value); |
| 609 | } |
| 610 | |
| 611 | static void vmcs_write16(unsigned long field, u16 value) |
| 612 | { |
| 613 | vmcs_writel(field, value); |
| 614 | } |
| 615 | |
| 616 | static void vmcs_write32(unsigned long field, u32 value) |
| 617 | { |
| 618 | vmcs_writel(field, value); |
| 619 | } |
| 620 | |
| 621 | static void vmcs_write64(unsigned long field, u64 value) |
| 622 | { |
| 623 | vmcs_writel(field, value); |
| 624 | #ifndef CONFIG_X86_64 |
| 625 | asm volatile (""); |
| 626 | vmcs_writel(field+1, value >> 32); |
| 627 | #endif |
| 628 | } |
| 629 | |
| 630 | static void vmcs_clear_bits(unsigned long field, u32 mask) |
| 631 | { |
| 632 | vmcs_writel(field, vmcs_readl(field) & ~mask); |
| 633 | } |
| 634 | |
| 635 | static void vmcs_set_bits(unsigned long field, u32 mask) |
| 636 | { |
| 637 | vmcs_writel(field, vmcs_readl(field) | mask); |
| 638 | } |
| 639 | |
| 640 | static void update_exception_bitmap(struct kvm_vcpu *vcpu) |
| 641 | { |
| 642 | u32 eb; |
| 643 | |
| 644 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | |
| 645 | (1u << NM_VECTOR) | (1u << DB_VECTOR); |
| 646 | if ((vcpu->guest_debug & |
| 647 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == |
| 648 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) |
| 649 | eb |= 1u << BP_VECTOR; |
| 650 | if (to_vmx(vcpu)->rmode.vm86_active) |
| 651 | eb = ~0; |
| 652 | if (enable_ept) |
| 653 | eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */ |
| 654 | if (vcpu->fpu_active) |
| 655 | eb &= ~(1u << NM_VECTOR); |
| 656 | vmcs_write32(EXCEPTION_BITMAP, eb); |
| 657 | } |
| 658 | |
| 659 | static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) |
| 660 | { |
| 661 | unsigned i; |
| 662 | struct msr_autoload *m = &vmx->msr_autoload; |
| 663 | |
| 664 | for (i = 0; i < m->nr; ++i) |
| 665 | if (m->guest[i].index == msr) |
| 666 | break; |
| 667 | |
| 668 | if (i == m->nr) |
| 669 | return; |
| 670 | --m->nr; |
| 671 | m->guest[i] = m->guest[m->nr]; |
| 672 | m->host[i] = m->host[m->nr]; |
| 673 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); |
| 674 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
| 675 | } |
| 676 | |
| 677 | static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, |
| 678 | u64 guest_val, u64 host_val) |
| 679 | { |
| 680 | unsigned i; |
| 681 | struct msr_autoload *m = &vmx->msr_autoload; |
| 682 | |
| 683 | for (i = 0; i < m->nr; ++i) |
| 684 | if (m->guest[i].index == msr) |
| 685 | break; |
| 686 | |
| 687 | if (i == m->nr) { |
| 688 | ++m->nr; |
| 689 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr); |
| 690 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr); |
| 691 | } |
| 692 | |
| 693 | m->guest[i].index = msr; |
| 694 | m->guest[i].value = guest_val; |
| 695 | m->host[i].index = msr; |
| 696 | m->host[i].value = host_val; |
| 697 | } |
| 698 | |
| 699 | static void reload_tss(void) |
| 700 | { |
| 701 | /* |
| 702 | * VT restores TR but not its size. Useless. |
| 703 | */ |
| 704 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); |
| 705 | struct desc_struct *descs; |
| 706 | |
| 707 | descs = (void *)gdt->address; |
| 708 | descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ |
| 709 | load_TR_desc(); |
| 710 | } |
| 711 | |
| 712 | static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) |
| 713 | { |
| 714 | u64 guest_efer; |
| 715 | u64 ignore_bits; |
| 716 | |
| 717 | guest_efer = vmx->vcpu.arch.efer; |
| 718 | |
| 719 | /* |
| 720 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless |
| 721 | * outside long mode |
| 722 | */ |
| 723 | ignore_bits = EFER_NX | EFER_SCE; |
| 724 | #ifdef CONFIG_X86_64 |
| 725 | ignore_bits |= EFER_LMA | EFER_LME; |
| 726 | /* SCE is meaningful only in long mode on Intel */ |
| 727 | if (guest_efer & EFER_LMA) |
| 728 | ignore_bits &= ~(u64)EFER_SCE; |
| 729 | #endif |
| 730 | guest_efer &= ~ignore_bits; |
| 731 | guest_efer |= host_efer & ignore_bits; |
| 732 | vmx->guest_msrs[efer_offset].data = guest_efer; |
| 733 | vmx->guest_msrs[efer_offset].mask = ~ignore_bits; |
| 734 | |
| 735 | clear_atomic_switch_msr(vmx, MSR_EFER); |
| 736 | /* On ept, can't emulate nx, and must switch nx atomically */ |
| 737 | if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) { |
| 738 | guest_efer = vmx->vcpu.arch.efer; |
| 739 | if (!(guest_efer & EFER_LMA)) |
| 740 | guest_efer &= ~EFER_LME; |
| 741 | add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer); |
| 742 | return false; |
| 743 | } |
| 744 | |
| 745 | return true; |
| 746 | } |
| 747 | |
| 748 | static unsigned long segment_base(u16 selector) |
| 749 | { |
| 750 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); |
| 751 | struct desc_struct *d; |
| 752 | unsigned long table_base; |
| 753 | unsigned long v; |
| 754 | |
| 755 | if (!(selector & ~3)) |
| 756 | return 0; |
| 757 | |
| 758 | table_base = gdt->address; |
| 759 | |
| 760 | if (selector & 4) { /* from ldt */ |
| 761 | u16 ldt_selector = kvm_read_ldt(); |
| 762 | |
| 763 | if (!(ldt_selector & ~3)) |
| 764 | return 0; |
| 765 | |
| 766 | table_base = segment_base(ldt_selector); |
| 767 | } |
| 768 | d = (struct desc_struct *)(table_base + (selector & ~7)); |
| 769 | v = get_desc_base(d); |
| 770 | #ifdef CONFIG_X86_64 |
| 771 | if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11)) |
| 772 | v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32; |
| 773 | #endif |
| 774 | return v; |
| 775 | } |
| 776 | |
| 777 | static inline unsigned long kvm_read_tr_base(void) |
| 778 | { |
| 779 | u16 tr; |
| 780 | asm("str %0" : "=g"(tr)); |
| 781 | return segment_base(tr); |
| 782 | } |
| 783 | |
| 784 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
| 785 | { |
| 786 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 787 | int i; |
| 788 | |
| 789 | if (vmx->host_state.loaded) |
| 790 | return; |
| 791 | |
| 792 | vmx->host_state.loaded = 1; |
| 793 | /* |
| 794 | * Set host fs and gs selectors. Unfortunately, 22.2.3 does not |
| 795 | * allow segment selectors with cpl > 0 or ti == 1. |
| 796 | */ |
| 797 | vmx->host_state.ldt_sel = kvm_read_ldt(); |
| 798 | vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel; |
| 799 | savesegment(fs, vmx->host_state.fs_sel); |
| 800 | if (!(vmx->host_state.fs_sel & 7)) { |
| 801 | vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel); |
| 802 | vmx->host_state.fs_reload_needed = 0; |
| 803 | } else { |
| 804 | vmcs_write16(HOST_FS_SELECTOR, 0); |
| 805 | vmx->host_state.fs_reload_needed = 1; |
| 806 | } |
| 807 | savesegment(gs, vmx->host_state.gs_sel); |
| 808 | if (!(vmx->host_state.gs_sel & 7)) |
| 809 | vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel); |
| 810 | else { |
| 811 | vmcs_write16(HOST_GS_SELECTOR, 0); |
| 812 | vmx->host_state.gs_ldt_reload_needed = 1; |
| 813 | } |
| 814 | |
| 815 | #ifdef CONFIG_X86_64 |
| 816 | vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); |
| 817 | vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); |
| 818 | #else |
| 819 | vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel)); |
| 820 | vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel)); |
| 821 | #endif |
| 822 | |
| 823 | #ifdef CONFIG_X86_64 |
| 824 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
| 825 | if (is_long_mode(&vmx->vcpu)) |
| 826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
| 827 | #endif |
| 828 | for (i = 0; i < vmx->save_nmsrs; ++i) |
| 829 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
| 830 | vmx->guest_msrs[i].data, |
| 831 | vmx->guest_msrs[i].mask); |
| 832 | } |
| 833 | |
| 834 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
| 835 | { |
| 836 | if (!vmx->host_state.loaded) |
| 837 | return; |
| 838 | |
| 839 | ++vmx->vcpu.stat.host_state_reload; |
| 840 | vmx->host_state.loaded = 0; |
| 841 | #ifdef CONFIG_X86_64 |
| 842 | if (is_long_mode(&vmx->vcpu)) |
| 843 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
| 844 | #endif |
| 845 | if (vmx->host_state.gs_ldt_reload_needed) { |
| 846 | kvm_load_ldt(vmx->host_state.ldt_sel); |
| 847 | #ifdef CONFIG_X86_64 |
| 848 | load_gs_index(vmx->host_state.gs_sel); |
| 849 | #else |
| 850 | loadsegment(gs, vmx->host_state.gs_sel); |
| 851 | #endif |
| 852 | } |
| 853 | if (vmx->host_state.fs_reload_needed) |
| 854 | loadsegment(fs, vmx->host_state.fs_sel); |
| 855 | reload_tss(); |
| 856 | #ifdef CONFIG_X86_64 |
| 857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
| 858 | #endif |
| 859 | if (current_thread_info()->status & TS_USEDFPU) |
| 860 | clts(); |
| 861 | load_gdt(&__get_cpu_var(host_gdt)); |
| 862 | } |
| 863 | |
| 864 | static void vmx_load_host_state(struct vcpu_vmx *vmx) |
| 865 | { |
| 866 | preempt_disable(); |
| 867 | __vmx_load_host_state(vmx); |
| 868 | preempt_enable(); |
| 869 | } |
| 870 | |
| 871 | /* |
| 872 | * Switches to specified vcpu, until a matching vcpu_put(), but assumes |
| 873 | * vcpu mutex is already taken. |
| 874 | */ |
| 875 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 876 | { |
| 877 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 878 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
| 879 | |
| 880 | if (!vmm_exclusive) |
| 881 | kvm_cpu_vmxon(phys_addr); |
| 882 | else if (vcpu->cpu != cpu) |
| 883 | vcpu_clear(vmx); |
| 884 | |
| 885 | if (per_cpu(current_vmcs, cpu) != vmx->vmcs) { |
| 886 | per_cpu(current_vmcs, cpu) = vmx->vmcs; |
| 887 | vmcs_load(vmx->vmcs); |
| 888 | } |
| 889 | |
| 890 | if (vcpu->cpu != cpu) { |
| 891 | struct desc_ptr *gdt = &__get_cpu_var(host_gdt); |
| 892 | unsigned long sysenter_esp; |
| 893 | |
| 894 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
| 895 | local_irq_disable(); |
| 896 | list_add(&vmx->local_vcpus_link, |
| 897 | &per_cpu(vcpus_on_cpu, cpu)); |
| 898 | local_irq_enable(); |
| 899 | |
| 900 | /* |
| 901 | * Linux uses per-cpu TSS and GDT, so set these when switching |
| 902 | * processors. |
| 903 | */ |
| 904 | vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */ |
| 905 | vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */ |
| 906 | |
| 907 | rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); |
| 908 | vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | static void vmx_vcpu_put(struct kvm_vcpu *vcpu) |
| 913 | { |
| 914 | __vmx_load_host_state(to_vmx(vcpu)); |
| 915 | if (!vmm_exclusive) { |
| 916 | __vcpu_clear(to_vmx(vcpu)); |
| 917 | kvm_cpu_vmxoff(); |
| 918 | } |
| 919 | } |
| 920 | |
| 921 | static void vmx_fpu_activate(struct kvm_vcpu *vcpu) |
| 922 | { |
| 923 | ulong cr0; |
| 924 | |
| 925 | if (vcpu->fpu_active) |
| 926 | return; |
| 927 | vcpu->fpu_active = 1; |
| 928 | cr0 = vmcs_readl(GUEST_CR0); |
| 929 | cr0 &= ~(X86_CR0_TS | X86_CR0_MP); |
| 930 | cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP); |
| 931 | vmcs_writel(GUEST_CR0, cr0); |
| 932 | update_exception_bitmap(vcpu); |
| 933 | vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; |
| 934 | vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); |
| 935 | } |
| 936 | |
| 937 | static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); |
| 938 | |
| 939 | static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) |
| 940 | { |
| 941 | vmx_decache_cr0_guest_bits(vcpu); |
| 942 | vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP); |
| 943 | update_exception_bitmap(vcpu); |
| 944 | vcpu->arch.cr0_guest_owned_bits = 0; |
| 945 | vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); |
| 946 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); |
| 947 | } |
| 948 | |
| 949 | static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) |
| 950 | { |
| 951 | unsigned long rflags, save_rflags; |
| 952 | |
| 953 | rflags = vmcs_readl(GUEST_RFLAGS); |
| 954 | if (to_vmx(vcpu)->rmode.vm86_active) { |
| 955 | rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 956 | save_rflags = to_vmx(vcpu)->rmode.save_rflags; |
| 957 | rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 958 | } |
| 959 | return rflags; |
| 960 | } |
| 961 | |
| 962 | static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
| 963 | { |
| 964 | if (to_vmx(vcpu)->rmode.vm86_active) { |
| 965 | to_vmx(vcpu)->rmode.save_rflags = rflags; |
| 966 | rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
| 967 | } |
| 968 | vmcs_writel(GUEST_RFLAGS, rflags); |
| 969 | } |
| 970 | |
| 971 | static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
| 972 | { |
| 973 | u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
| 974 | int ret = 0; |
| 975 | |
| 976 | if (interruptibility & GUEST_INTR_STATE_STI) |
| 977 | ret |= KVM_X86_SHADOW_INT_STI; |
| 978 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) |
| 979 | ret |= KVM_X86_SHADOW_INT_MOV_SS; |
| 980 | |
| 981 | return ret & mask; |
| 982 | } |
| 983 | |
| 984 | static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
| 985 | { |
| 986 | u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
| 987 | u32 interruptibility = interruptibility_old; |
| 988 | |
| 989 | interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS); |
| 990 | |
| 991 | if (mask & KVM_X86_SHADOW_INT_MOV_SS) |
| 992 | interruptibility |= GUEST_INTR_STATE_MOV_SS; |
| 993 | else if (mask & KVM_X86_SHADOW_INT_STI) |
| 994 | interruptibility |= GUEST_INTR_STATE_STI; |
| 995 | |
| 996 | if ((interruptibility != interruptibility_old)) |
| 997 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility); |
| 998 | } |
| 999 | |
| 1000 | static void skip_emulated_instruction(struct kvm_vcpu *vcpu) |
| 1001 | { |
| 1002 | unsigned long rip; |
| 1003 | |
| 1004 | rip = kvm_rip_read(vcpu); |
| 1005 | rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
| 1006 | kvm_rip_write(vcpu, rip); |
| 1007 | |
| 1008 | /* skipping an emulated instruction also counts */ |
| 1009 | vmx_set_interrupt_shadow(vcpu, 0); |
| 1010 | } |
| 1011 | |
| 1012 | static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, |
| 1013 | bool has_error_code, u32 error_code, |
| 1014 | bool reinject) |
| 1015 | { |
| 1016 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1017 | u32 intr_info = nr | INTR_INFO_VALID_MASK; |
| 1018 | |
| 1019 | if (has_error_code) { |
| 1020 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code); |
| 1021 | intr_info |= INTR_INFO_DELIVER_CODE_MASK; |
| 1022 | } |
| 1023 | |
| 1024 | if (vmx->rmode.vm86_active) { |
| 1025 | if (kvm_inject_realmode_interrupt(vcpu, nr) != EMULATE_DONE) |
| 1026 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 1027 | return; |
| 1028 | } |
| 1029 | |
| 1030 | if (kvm_exception_is_soft(nr)) { |
| 1031 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, |
| 1032 | vmx->vcpu.arch.event_exit_inst_len); |
| 1033 | intr_info |= INTR_TYPE_SOFT_EXCEPTION; |
| 1034 | } else |
| 1035 | intr_info |= INTR_TYPE_HARD_EXCEPTION; |
| 1036 | |
| 1037 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); |
| 1038 | } |
| 1039 | |
| 1040 | static bool vmx_rdtscp_supported(void) |
| 1041 | { |
| 1042 | return cpu_has_vmx_rdtscp(); |
| 1043 | } |
| 1044 | |
| 1045 | /* |
| 1046 | * Swap MSR entry in host/guest MSR entry array. |
| 1047 | */ |
| 1048 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) |
| 1049 | { |
| 1050 | struct shared_msr_entry tmp; |
| 1051 | |
| 1052 | tmp = vmx->guest_msrs[to]; |
| 1053 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; |
| 1054 | vmx->guest_msrs[from] = tmp; |
| 1055 | } |
| 1056 | |
| 1057 | /* |
| 1058 | * Set up the vmcs to automatically save and restore system |
| 1059 | * msrs. Don't touch the 64-bit msrs if the guest is in legacy |
| 1060 | * mode, as fiddling with msrs is very expensive. |
| 1061 | */ |
| 1062 | static void setup_msrs(struct vcpu_vmx *vmx) |
| 1063 | { |
| 1064 | int save_nmsrs, index; |
| 1065 | unsigned long *msr_bitmap; |
| 1066 | |
| 1067 | vmx_load_host_state(vmx); |
| 1068 | save_nmsrs = 0; |
| 1069 | #ifdef CONFIG_X86_64 |
| 1070 | if (is_long_mode(&vmx->vcpu)) { |
| 1071 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); |
| 1072 | if (index >= 0) |
| 1073 | move_msr_up(vmx, index, save_nmsrs++); |
| 1074 | index = __find_msr_index(vmx, MSR_LSTAR); |
| 1075 | if (index >= 0) |
| 1076 | move_msr_up(vmx, index, save_nmsrs++); |
| 1077 | index = __find_msr_index(vmx, MSR_CSTAR); |
| 1078 | if (index >= 0) |
| 1079 | move_msr_up(vmx, index, save_nmsrs++); |
| 1080 | index = __find_msr_index(vmx, MSR_TSC_AUX); |
| 1081 | if (index >= 0 && vmx->rdtscp_enabled) |
| 1082 | move_msr_up(vmx, index, save_nmsrs++); |
| 1083 | /* |
| 1084 | * MSR_STAR is only needed on long mode guests, and only |
| 1085 | * if efer.sce is enabled. |
| 1086 | */ |
| 1087 | index = __find_msr_index(vmx, MSR_STAR); |
| 1088 | if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) |
| 1089 | move_msr_up(vmx, index, save_nmsrs++); |
| 1090 | } |
| 1091 | #endif |
| 1092 | index = __find_msr_index(vmx, MSR_EFER); |
| 1093 | if (index >= 0 && update_transition_efer(vmx, index)) |
| 1094 | move_msr_up(vmx, index, save_nmsrs++); |
| 1095 | |
| 1096 | vmx->save_nmsrs = save_nmsrs; |
| 1097 | |
| 1098 | if (cpu_has_vmx_msr_bitmap()) { |
| 1099 | if (is_long_mode(&vmx->vcpu)) |
| 1100 | msr_bitmap = vmx_msr_bitmap_longmode; |
| 1101 | else |
| 1102 | msr_bitmap = vmx_msr_bitmap_legacy; |
| 1103 | |
| 1104 | vmcs_write64(MSR_BITMAP, __pa(msr_bitmap)); |
| 1105 | } |
| 1106 | } |
| 1107 | |
| 1108 | /* |
| 1109 | * reads and returns guest's timestamp counter "register" |
| 1110 | * guest_tsc = host_tsc + tsc_offset -- 21.3 |
| 1111 | */ |
| 1112 | static u64 guest_read_tsc(void) |
| 1113 | { |
| 1114 | u64 host_tsc, tsc_offset; |
| 1115 | |
| 1116 | rdtscll(host_tsc); |
| 1117 | tsc_offset = vmcs_read64(TSC_OFFSET); |
| 1118 | return host_tsc + tsc_offset; |
| 1119 | } |
| 1120 | |
| 1121 | /* |
| 1122 | * writes 'offset' into guest's timestamp counter offset register |
| 1123 | */ |
| 1124 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| 1125 | { |
| 1126 | vmcs_write64(TSC_OFFSET, offset); |
| 1127 | } |
| 1128 | |
| 1129 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) |
| 1130 | { |
| 1131 | u64 offset = vmcs_read64(TSC_OFFSET); |
| 1132 | vmcs_write64(TSC_OFFSET, offset + adjustment); |
| 1133 | } |
| 1134 | |
| 1135 | /* |
| 1136 | * Reads an msr value (of 'msr_index') into 'pdata'. |
| 1137 | * Returns 0 on success, non-0 otherwise. |
| 1138 | * Assumes vcpu_load() was already called. |
| 1139 | */ |
| 1140 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) |
| 1141 | { |
| 1142 | u64 data; |
| 1143 | struct shared_msr_entry *msr; |
| 1144 | |
| 1145 | if (!pdata) { |
| 1146 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); |
| 1147 | return -EINVAL; |
| 1148 | } |
| 1149 | |
| 1150 | switch (msr_index) { |
| 1151 | #ifdef CONFIG_X86_64 |
| 1152 | case MSR_FS_BASE: |
| 1153 | data = vmcs_readl(GUEST_FS_BASE); |
| 1154 | break; |
| 1155 | case MSR_GS_BASE: |
| 1156 | data = vmcs_readl(GUEST_GS_BASE); |
| 1157 | break; |
| 1158 | case MSR_KERNEL_GS_BASE: |
| 1159 | vmx_load_host_state(to_vmx(vcpu)); |
| 1160 | data = to_vmx(vcpu)->msr_guest_kernel_gs_base; |
| 1161 | break; |
| 1162 | #endif |
| 1163 | case MSR_EFER: |
| 1164 | return kvm_get_msr_common(vcpu, msr_index, pdata); |
| 1165 | case MSR_IA32_TSC: |
| 1166 | data = guest_read_tsc(); |
| 1167 | break; |
| 1168 | case MSR_IA32_SYSENTER_CS: |
| 1169 | data = vmcs_read32(GUEST_SYSENTER_CS); |
| 1170 | break; |
| 1171 | case MSR_IA32_SYSENTER_EIP: |
| 1172 | data = vmcs_readl(GUEST_SYSENTER_EIP); |
| 1173 | break; |
| 1174 | case MSR_IA32_SYSENTER_ESP: |
| 1175 | data = vmcs_readl(GUEST_SYSENTER_ESP); |
| 1176 | break; |
| 1177 | case MSR_TSC_AUX: |
| 1178 | if (!to_vmx(vcpu)->rdtscp_enabled) |
| 1179 | return 1; |
| 1180 | /* Otherwise falls through */ |
| 1181 | default: |
| 1182 | vmx_load_host_state(to_vmx(vcpu)); |
| 1183 | msr = find_msr_entry(to_vmx(vcpu), msr_index); |
| 1184 | if (msr) { |
| 1185 | vmx_load_host_state(to_vmx(vcpu)); |
| 1186 | data = msr->data; |
| 1187 | break; |
| 1188 | } |
| 1189 | return kvm_get_msr_common(vcpu, msr_index, pdata); |
| 1190 | } |
| 1191 | |
| 1192 | *pdata = data; |
| 1193 | return 0; |
| 1194 | } |
| 1195 | |
| 1196 | /* |
| 1197 | * Writes msr value into into the appropriate "register". |
| 1198 | * Returns 0 on success, non-0 otherwise. |
| 1199 | * Assumes vcpu_load() was already called. |
| 1200 | */ |
| 1201 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) |
| 1202 | { |
| 1203 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1204 | struct shared_msr_entry *msr; |
| 1205 | int ret = 0; |
| 1206 | |
| 1207 | switch (msr_index) { |
| 1208 | case MSR_EFER: |
| 1209 | vmx_load_host_state(vmx); |
| 1210 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
| 1211 | break; |
| 1212 | #ifdef CONFIG_X86_64 |
| 1213 | case MSR_FS_BASE: |
| 1214 | vmcs_writel(GUEST_FS_BASE, data); |
| 1215 | break; |
| 1216 | case MSR_GS_BASE: |
| 1217 | vmcs_writel(GUEST_GS_BASE, data); |
| 1218 | break; |
| 1219 | case MSR_KERNEL_GS_BASE: |
| 1220 | vmx_load_host_state(vmx); |
| 1221 | vmx->msr_guest_kernel_gs_base = data; |
| 1222 | break; |
| 1223 | #endif |
| 1224 | case MSR_IA32_SYSENTER_CS: |
| 1225 | vmcs_write32(GUEST_SYSENTER_CS, data); |
| 1226 | break; |
| 1227 | case MSR_IA32_SYSENTER_EIP: |
| 1228 | vmcs_writel(GUEST_SYSENTER_EIP, data); |
| 1229 | break; |
| 1230 | case MSR_IA32_SYSENTER_ESP: |
| 1231 | vmcs_writel(GUEST_SYSENTER_ESP, data); |
| 1232 | break; |
| 1233 | case MSR_IA32_TSC: |
| 1234 | kvm_write_tsc(vcpu, data); |
| 1235 | break; |
| 1236 | case MSR_IA32_CR_PAT: |
| 1237 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
| 1238 | vmcs_write64(GUEST_IA32_PAT, data); |
| 1239 | vcpu->arch.pat = data; |
| 1240 | break; |
| 1241 | } |
| 1242 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
| 1243 | break; |
| 1244 | case MSR_TSC_AUX: |
| 1245 | if (!vmx->rdtscp_enabled) |
| 1246 | return 1; |
| 1247 | /* Check reserved bit, higher 32 bits should be zero */ |
| 1248 | if ((data >> 32) != 0) |
| 1249 | return 1; |
| 1250 | /* Otherwise falls through */ |
| 1251 | default: |
| 1252 | msr = find_msr_entry(vmx, msr_index); |
| 1253 | if (msr) { |
| 1254 | vmx_load_host_state(vmx); |
| 1255 | msr->data = data; |
| 1256 | break; |
| 1257 | } |
| 1258 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
| 1259 | } |
| 1260 | |
| 1261 | return ret; |
| 1262 | } |
| 1263 | |
| 1264 | static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) |
| 1265 | { |
| 1266 | __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); |
| 1267 | switch (reg) { |
| 1268 | case VCPU_REGS_RSP: |
| 1269 | vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); |
| 1270 | break; |
| 1271 | case VCPU_REGS_RIP: |
| 1272 | vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); |
| 1273 | break; |
| 1274 | case VCPU_EXREG_PDPTR: |
| 1275 | if (enable_ept) |
| 1276 | ept_save_pdptrs(vcpu); |
| 1277 | break; |
| 1278 | default: |
| 1279 | break; |
| 1280 | } |
| 1281 | } |
| 1282 | |
| 1283 | static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) |
| 1284 | { |
| 1285 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) |
| 1286 | vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]); |
| 1287 | else |
| 1288 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); |
| 1289 | |
| 1290 | update_exception_bitmap(vcpu); |
| 1291 | } |
| 1292 | |
| 1293 | static __init int cpu_has_kvm_support(void) |
| 1294 | { |
| 1295 | return cpu_has_vmx(); |
| 1296 | } |
| 1297 | |
| 1298 | static __init int vmx_disabled_by_bios(void) |
| 1299 | { |
| 1300 | u64 msr; |
| 1301 | |
| 1302 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); |
| 1303 | if (msr & FEATURE_CONTROL_LOCKED) { |
| 1304 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) |
| 1305 | && tboot_enabled()) |
| 1306 | return 1; |
| 1307 | if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) |
| 1308 | && !tboot_enabled()) |
| 1309 | return 1; |
| 1310 | } |
| 1311 | |
| 1312 | return 0; |
| 1313 | /* locked but not enabled */ |
| 1314 | } |
| 1315 | |
| 1316 | static void kvm_cpu_vmxon(u64 addr) |
| 1317 | { |
| 1318 | asm volatile (ASM_VMX_VMXON_RAX |
| 1319 | : : "a"(&addr), "m"(addr) |
| 1320 | : "memory", "cc"); |
| 1321 | } |
| 1322 | |
| 1323 | static int hardware_enable(void *garbage) |
| 1324 | { |
| 1325 | int cpu = raw_smp_processor_id(); |
| 1326 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
| 1327 | u64 old, test_bits; |
| 1328 | |
| 1329 | if (read_cr4() & X86_CR4_VMXE) |
| 1330 | return -EBUSY; |
| 1331 | |
| 1332 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); |
| 1333 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
| 1334 | |
| 1335 | test_bits = FEATURE_CONTROL_LOCKED; |
| 1336 | test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
| 1337 | if (tboot_enabled()) |
| 1338 | test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; |
| 1339 | |
| 1340 | if ((old & test_bits) != test_bits) { |
| 1341 | /* enable and lock */ |
| 1342 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
| 1343 | } |
| 1344 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
| 1345 | |
| 1346 | if (vmm_exclusive) { |
| 1347 | kvm_cpu_vmxon(phys_addr); |
| 1348 | ept_sync_global(); |
| 1349 | } |
| 1350 | |
| 1351 | store_gdt(&__get_cpu_var(host_gdt)); |
| 1352 | |
| 1353 | return 0; |
| 1354 | } |
| 1355 | |
| 1356 | static void vmclear_local_vcpus(void) |
| 1357 | { |
| 1358 | int cpu = raw_smp_processor_id(); |
| 1359 | struct vcpu_vmx *vmx, *n; |
| 1360 | |
| 1361 | list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu), |
| 1362 | local_vcpus_link) |
| 1363 | __vcpu_clear(vmx); |
| 1364 | } |
| 1365 | |
| 1366 | |
| 1367 | /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot() |
| 1368 | * tricks. |
| 1369 | */ |
| 1370 | static void kvm_cpu_vmxoff(void) |
| 1371 | { |
| 1372 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
| 1373 | } |
| 1374 | |
| 1375 | static void hardware_disable(void *garbage) |
| 1376 | { |
| 1377 | if (vmm_exclusive) { |
| 1378 | vmclear_local_vcpus(); |
| 1379 | kvm_cpu_vmxoff(); |
| 1380 | } |
| 1381 | write_cr4(read_cr4() & ~X86_CR4_VMXE); |
| 1382 | } |
| 1383 | |
| 1384 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
| 1385 | u32 msr, u32 *result) |
| 1386 | { |
| 1387 | u32 vmx_msr_low, vmx_msr_high; |
| 1388 | u32 ctl = ctl_min | ctl_opt; |
| 1389 | |
| 1390 | rdmsr(msr, vmx_msr_low, vmx_msr_high); |
| 1391 | |
| 1392 | ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */ |
| 1393 | ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */ |
| 1394 | |
| 1395 | /* Ensure minimum (required) set of control bits are supported. */ |
| 1396 | if (ctl_min & ~ctl) |
| 1397 | return -EIO; |
| 1398 | |
| 1399 | *result = ctl; |
| 1400 | return 0; |
| 1401 | } |
| 1402 | |
| 1403 | static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) |
| 1404 | { |
| 1405 | u32 vmx_msr_low, vmx_msr_high; |
| 1406 | u32 min, opt, min2, opt2; |
| 1407 | u32 _pin_based_exec_control = 0; |
| 1408 | u32 _cpu_based_exec_control = 0; |
| 1409 | u32 _cpu_based_2nd_exec_control = 0; |
| 1410 | u32 _vmexit_control = 0; |
| 1411 | u32 _vmentry_control = 0; |
| 1412 | |
| 1413 | min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING; |
| 1414 | opt = PIN_BASED_VIRTUAL_NMIS; |
| 1415 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS, |
| 1416 | &_pin_based_exec_control) < 0) |
| 1417 | return -EIO; |
| 1418 | |
| 1419 | min = CPU_BASED_HLT_EXITING | |
| 1420 | #ifdef CONFIG_X86_64 |
| 1421 | CPU_BASED_CR8_LOAD_EXITING | |
| 1422 | CPU_BASED_CR8_STORE_EXITING | |
| 1423 | #endif |
| 1424 | CPU_BASED_CR3_LOAD_EXITING | |
| 1425 | CPU_BASED_CR3_STORE_EXITING | |
| 1426 | CPU_BASED_USE_IO_BITMAPS | |
| 1427 | CPU_BASED_MOV_DR_EXITING | |
| 1428 | CPU_BASED_USE_TSC_OFFSETING | |
| 1429 | CPU_BASED_MWAIT_EXITING | |
| 1430 | CPU_BASED_MONITOR_EXITING | |
| 1431 | CPU_BASED_INVLPG_EXITING; |
| 1432 | opt = CPU_BASED_TPR_SHADOW | |
| 1433 | CPU_BASED_USE_MSR_BITMAPS | |
| 1434 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
| 1435 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, |
| 1436 | &_cpu_based_exec_control) < 0) |
| 1437 | return -EIO; |
| 1438 | #ifdef CONFIG_X86_64 |
| 1439 | if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) |
| 1440 | _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING & |
| 1441 | ~CPU_BASED_CR8_STORE_EXITING; |
| 1442 | #endif |
| 1443 | if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { |
| 1444 | min2 = 0; |
| 1445 | opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | |
| 1446 | SECONDARY_EXEC_WBINVD_EXITING | |
| 1447 | SECONDARY_EXEC_ENABLE_VPID | |
| 1448 | SECONDARY_EXEC_ENABLE_EPT | |
| 1449 | SECONDARY_EXEC_UNRESTRICTED_GUEST | |
| 1450 | SECONDARY_EXEC_PAUSE_LOOP_EXITING | |
| 1451 | SECONDARY_EXEC_RDTSCP; |
| 1452 | if (adjust_vmx_controls(min2, opt2, |
| 1453 | MSR_IA32_VMX_PROCBASED_CTLS2, |
| 1454 | &_cpu_based_2nd_exec_control) < 0) |
| 1455 | return -EIO; |
| 1456 | } |
| 1457 | #ifndef CONFIG_X86_64 |
| 1458 | if (!(_cpu_based_2nd_exec_control & |
| 1459 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) |
| 1460 | _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW; |
| 1461 | #endif |
| 1462 | if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { |
| 1463 | /* CR3 accesses and invlpg don't need to cause VM Exits when EPT |
| 1464 | enabled */ |
| 1465 | _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | |
| 1466 | CPU_BASED_CR3_STORE_EXITING | |
| 1467 | CPU_BASED_INVLPG_EXITING); |
| 1468 | rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, |
| 1469 | vmx_capability.ept, vmx_capability.vpid); |
| 1470 | } |
| 1471 | |
| 1472 | min = 0; |
| 1473 | #ifdef CONFIG_X86_64 |
| 1474 | min |= VM_EXIT_HOST_ADDR_SPACE_SIZE; |
| 1475 | #endif |
| 1476 | opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT; |
| 1477 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS, |
| 1478 | &_vmexit_control) < 0) |
| 1479 | return -EIO; |
| 1480 | |
| 1481 | min = 0; |
| 1482 | opt = VM_ENTRY_LOAD_IA32_PAT; |
| 1483 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, |
| 1484 | &_vmentry_control) < 0) |
| 1485 | return -EIO; |
| 1486 | |
| 1487 | rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); |
| 1488 | |
| 1489 | /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ |
| 1490 | if ((vmx_msr_high & 0x1fff) > PAGE_SIZE) |
| 1491 | return -EIO; |
| 1492 | |
| 1493 | #ifdef CONFIG_X86_64 |
| 1494 | /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ |
| 1495 | if (vmx_msr_high & (1u<<16)) |
| 1496 | return -EIO; |
| 1497 | #endif |
| 1498 | |
| 1499 | /* Require Write-Back (WB) memory type for VMCS accesses. */ |
| 1500 | if (((vmx_msr_high >> 18) & 15) != 6) |
| 1501 | return -EIO; |
| 1502 | |
| 1503 | vmcs_conf->size = vmx_msr_high & 0x1fff; |
| 1504 | vmcs_conf->order = get_order(vmcs_config.size); |
| 1505 | vmcs_conf->revision_id = vmx_msr_low; |
| 1506 | |
| 1507 | vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; |
| 1508 | vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; |
| 1509 | vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; |
| 1510 | vmcs_conf->vmexit_ctrl = _vmexit_control; |
| 1511 | vmcs_conf->vmentry_ctrl = _vmentry_control; |
| 1512 | |
| 1513 | return 0; |
| 1514 | } |
| 1515 | |
| 1516 | static struct vmcs *alloc_vmcs_cpu(int cpu) |
| 1517 | { |
| 1518 | int node = cpu_to_node(cpu); |
| 1519 | struct page *pages; |
| 1520 | struct vmcs *vmcs; |
| 1521 | |
| 1522 | pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order); |
| 1523 | if (!pages) |
| 1524 | return NULL; |
| 1525 | vmcs = page_address(pages); |
| 1526 | memset(vmcs, 0, vmcs_config.size); |
| 1527 | vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */ |
| 1528 | return vmcs; |
| 1529 | } |
| 1530 | |
| 1531 | static struct vmcs *alloc_vmcs(void) |
| 1532 | { |
| 1533 | return alloc_vmcs_cpu(raw_smp_processor_id()); |
| 1534 | } |
| 1535 | |
| 1536 | static void free_vmcs(struct vmcs *vmcs) |
| 1537 | { |
| 1538 | free_pages((unsigned long)vmcs, vmcs_config.order); |
| 1539 | } |
| 1540 | |
| 1541 | static void free_kvm_area(void) |
| 1542 | { |
| 1543 | int cpu; |
| 1544 | |
| 1545 | for_each_possible_cpu(cpu) { |
| 1546 | free_vmcs(per_cpu(vmxarea, cpu)); |
| 1547 | per_cpu(vmxarea, cpu) = NULL; |
| 1548 | } |
| 1549 | } |
| 1550 | |
| 1551 | static __init int alloc_kvm_area(void) |
| 1552 | { |
| 1553 | int cpu; |
| 1554 | |
| 1555 | for_each_possible_cpu(cpu) { |
| 1556 | struct vmcs *vmcs; |
| 1557 | |
| 1558 | vmcs = alloc_vmcs_cpu(cpu); |
| 1559 | if (!vmcs) { |
| 1560 | free_kvm_area(); |
| 1561 | return -ENOMEM; |
| 1562 | } |
| 1563 | |
| 1564 | per_cpu(vmxarea, cpu) = vmcs; |
| 1565 | } |
| 1566 | return 0; |
| 1567 | } |
| 1568 | |
| 1569 | static __init int hardware_setup(void) |
| 1570 | { |
| 1571 | if (setup_vmcs_config(&vmcs_config) < 0) |
| 1572 | return -EIO; |
| 1573 | |
| 1574 | if (boot_cpu_has(X86_FEATURE_NX)) |
| 1575 | kvm_enable_efer_bits(EFER_NX); |
| 1576 | |
| 1577 | if (!cpu_has_vmx_vpid()) |
| 1578 | enable_vpid = 0; |
| 1579 | |
| 1580 | if (!cpu_has_vmx_ept() || |
| 1581 | !cpu_has_vmx_ept_4levels()) { |
| 1582 | enable_ept = 0; |
| 1583 | enable_unrestricted_guest = 0; |
| 1584 | } |
| 1585 | |
| 1586 | if (!cpu_has_vmx_unrestricted_guest()) |
| 1587 | enable_unrestricted_guest = 0; |
| 1588 | |
| 1589 | if (!cpu_has_vmx_flexpriority()) |
| 1590 | flexpriority_enabled = 0; |
| 1591 | |
| 1592 | if (!cpu_has_vmx_tpr_shadow()) |
| 1593 | kvm_x86_ops->update_cr8_intercept = NULL; |
| 1594 | |
| 1595 | if (enable_ept && !cpu_has_vmx_ept_2m_page()) |
| 1596 | kvm_disable_largepages(); |
| 1597 | |
| 1598 | if (!cpu_has_vmx_ple()) |
| 1599 | ple_gap = 0; |
| 1600 | |
| 1601 | return alloc_kvm_area(); |
| 1602 | } |
| 1603 | |
| 1604 | static __exit void hardware_unsetup(void) |
| 1605 | { |
| 1606 | free_kvm_area(); |
| 1607 | } |
| 1608 | |
| 1609 | static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save) |
| 1610 | { |
| 1611 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
| 1612 | |
| 1613 | if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) { |
| 1614 | vmcs_write16(sf->selector, save->selector); |
| 1615 | vmcs_writel(sf->base, save->base); |
| 1616 | vmcs_write32(sf->limit, save->limit); |
| 1617 | vmcs_write32(sf->ar_bytes, save->ar); |
| 1618 | } else { |
| 1619 | u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK) |
| 1620 | << AR_DPL_SHIFT; |
| 1621 | vmcs_write32(sf->ar_bytes, 0x93 | dpl); |
| 1622 | } |
| 1623 | } |
| 1624 | |
| 1625 | static void enter_pmode(struct kvm_vcpu *vcpu) |
| 1626 | { |
| 1627 | unsigned long flags; |
| 1628 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1629 | |
| 1630 | vmx->emulation_required = 1; |
| 1631 | vmx->rmode.vm86_active = 0; |
| 1632 | |
| 1633 | vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base); |
| 1634 | vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit); |
| 1635 | vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar); |
| 1636 | |
| 1637 | flags = vmcs_readl(GUEST_RFLAGS); |
| 1638 | flags &= RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 1639 | flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; |
| 1640 | vmcs_writel(GUEST_RFLAGS, flags); |
| 1641 | |
| 1642 | vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) | |
| 1643 | (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME)); |
| 1644 | |
| 1645 | update_exception_bitmap(vcpu); |
| 1646 | |
| 1647 | if (emulate_invalid_guest_state) |
| 1648 | return; |
| 1649 | |
| 1650 | fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es); |
| 1651 | fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds); |
| 1652 | fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs); |
| 1653 | fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs); |
| 1654 | |
| 1655 | vmcs_write16(GUEST_SS_SELECTOR, 0); |
| 1656 | vmcs_write32(GUEST_SS_AR_BYTES, 0x93); |
| 1657 | |
| 1658 | vmcs_write16(GUEST_CS_SELECTOR, |
| 1659 | vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK); |
| 1660 | vmcs_write32(GUEST_CS_AR_BYTES, 0x9b); |
| 1661 | } |
| 1662 | |
| 1663 | static gva_t rmode_tss_base(struct kvm *kvm) |
| 1664 | { |
| 1665 | if (!kvm->arch.tss_addr) { |
| 1666 | struct kvm_memslots *slots; |
| 1667 | gfn_t base_gfn; |
| 1668 | |
| 1669 | slots = kvm_memslots(kvm); |
| 1670 | base_gfn = slots->memslots[0].base_gfn + |
| 1671 | kvm->memslots->memslots[0].npages - 3; |
| 1672 | return base_gfn << PAGE_SHIFT; |
| 1673 | } |
| 1674 | return kvm->arch.tss_addr; |
| 1675 | } |
| 1676 | |
| 1677 | static void fix_rmode_seg(int seg, struct kvm_save_segment *save) |
| 1678 | { |
| 1679 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
| 1680 | |
| 1681 | save->selector = vmcs_read16(sf->selector); |
| 1682 | save->base = vmcs_readl(sf->base); |
| 1683 | save->limit = vmcs_read32(sf->limit); |
| 1684 | save->ar = vmcs_read32(sf->ar_bytes); |
| 1685 | vmcs_write16(sf->selector, save->base >> 4); |
| 1686 | vmcs_write32(sf->base, save->base & 0xfffff); |
| 1687 | vmcs_write32(sf->limit, 0xffff); |
| 1688 | vmcs_write32(sf->ar_bytes, 0xf3); |
| 1689 | } |
| 1690 | |
| 1691 | static void enter_rmode(struct kvm_vcpu *vcpu) |
| 1692 | { |
| 1693 | unsigned long flags; |
| 1694 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1695 | |
| 1696 | if (enable_unrestricted_guest) |
| 1697 | return; |
| 1698 | |
| 1699 | vmx->emulation_required = 1; |
| 1700 | vmx->rmode.vm86_active = 1; |
| 1701 | |
| 1702 | vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE); |
| 1703 | vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm)); |
| 1704 | |
| 1705 | vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT); |
| 1706 | vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); |
| 1707 | |
| 1708 | vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES); |
| 1709 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
| 1710 | |
| 1711 | flags = vmcs_readl(GUEST_RFLAGS); |
| 1712 | vmx->rmode.save_rflags = flags; |
| 1713 | |
| 1714 | flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM; |
| 1715 | |
| 1716 | vmcs_writel(GUEST_RFLAGS, flags); |
| 1717 | vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME); |
| 1718 | update_exception_bitmap(vcpu); |
| 1719 | |
| 1720 | if (emulate_invalid_guest_state) |
| 1721 | goto continue_rmode; |
| 1722 | |
| 1723 | vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); |
| 1724 | vmcs_write32(GUEST_SS_LIMIT, 0xffff); |
| 1725 | vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); |
| 1726 | |
| 1727 | vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); |
| 1728 | vmcs_write32(GUEST_CS_LIMIT, 0xffff); |
| 1729 | if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000) |
| 1730 | vmcs_writel(GUEST_CS_BASE, 0xf0000); |
| 1731 | vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4); |
| 1732 | |
| 1733 | fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); |
| 1734 | fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); |
| 1735 | fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); |
| 1736 | fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); |
| 1737 | |
| 1738 | continue_rmode: |
| 1739 | kvm_mmu_reset_context(vcpu); |
| 1740 | init_rmode(vcpu->kvm); |
| 1741 | } |
| 1742 | |
| 1743 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
| 1744 | { |
| 1745 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1746 | struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
| 1747 | |
| 1748 | if (!msr) |
| 1749 | return; |
| 1750 | |
| 1751 | /* |
| 1752 | * Force kernel_gs_base reloading before EFER changes, as control |
| 1753 | * of this msr depends on is_long_mode(). |
| 1754 | */ |
| 1755 | vmx_load_host_state(to_vmx(vcpu)); |
| 1756 | vcpu->arch.efer = efer; |
| 1757 | if (efer & EFER_LMA) { |
| 1758 | vmcs_write32(VM_ENTRY_CONTROLS, |
| 1759 | vmcs_read32(VM_ENTRY_CONTROLS) | |
| 1760 | VM_ENTRY_IA32E_MODE); |
| 1761 | msr->data = efer; |
| 1762 | } else { |
| 1763 | vmcs_write32(VM_ENTRY_CONTROLS, |
| 1764 | vmcs_read32(VM_ENTRY_CONTROLS) & |
| 1765 | ~VM_ENTRY_IA32E_MODE); |
| 1766 | |
| 1767 | msr->data = efer & ~EFER_LME; |
| 1768 | } |
| 1769 | setup_msrs(vmx); |
| 1770 | } |
| 1771 | |
| 1772 | #ifdef CONFIG_X86_64 |
| 1773 | |
| 1774 | static void enter_lmode(struct kvm_vcpu *vcpu) |
| 1775 | { |
| 1776 | u32 guest_tr_ar; |
| 1777 | |
| 1778 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); |
| 1779 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { |
| 1780 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", |
| 1781 | __func__); |
| 1782 | vmcs_write32(GUEST_TR_AR_BYTES, |
| 1783 | (guest_tr_ar & ~AR_TYPE_MASK) |
| 1784 | | AR_TYPE_BUSY_64_TSS); |
| 1785 | } |
| 1786 | vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); |
| 1787 | } |
| 1788 | |
| 1789 | static void exit_lmode(struct kvm_vcpu *vcpu) |
| 1790 | { |
| 1791 | vmcs_write32(VM_ENTRY_CONTROLS, |
| 1792 | vmcs_read32(VM_ENTRY_CONTROLS) |
| 1793 | & ~VM_ENTRY_IA32E_MODE); |
| 1794 | vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); |
| 1795 | } |
| 1796 | |
| 1797 | #endif |
| 1798 | |
| 1799 | static void vmx_flush_tlb(struct kvm_vcpu *vcpu) |
| 1800 | { |
| 1801 | vpid_sync_context(to_vmx(vcpu)); |
| 1802 | if (enable_ept) { |
| 1803 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) |
| 1804 | return; |
| 1805 | ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); |
| 1806 | } |
| 1807 | } |
| 1808 | |
| 1809 | static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) |
| 1810 | { |
| 1811 | ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; |
| 1812 | |
| 1813 | vcpu->arch.cr0 &= ~cr0_guest_owned_bits; |
| 1814 | vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits; |
| 1815 | } |
| 1816 | |
| 1817 | static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) |
| 1818 | { |
| 1819 | ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; |
| 1820 | |
| 1821 | vcpu->arch.cr4 &= ~cr4_guest_owned_bits; |
| 1822 | vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits; |
| 1823 | } |
| 1824 | |
| 1825 | static void ept_load_pdptrs(struct kvm_vcpu *vcpu) |
| 1826 | { |
| 1827 | if (!test_bit(VCPU_EXREG_PDPTR, |
| 1828 | (unsigned long *)&vcpu->arch.regs_dirty)) |
| 1829 | return; |
| 1830 | |
| 1831 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { |
| 1832 | vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); |
| 1833 | vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); |
| 1834 | vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); |
| 1835 | vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); |
| 1836 | } |
| 1837 | } |
| 1838 | |
| 1839 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu) |
| 1840 | { |
| 1841 | if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { |
| 1842 | vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); |
| 1843 | vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); |
| 1844 | vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); |
| 1845 | vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); |
| 1846 | } |
| 1847 | |
| 1848 | __set_bit(VCPU_EXREG_PDPTR, |
| 1849 | (unsigned long *)&vcpu->arch.regs_avail); |
| 1850 | __set_bit(VCPU_EXREG_PDPTR, |
| 1851 | (unsigned long *)&vcpu->arch.regs_dirty); |
| 1852 | } |
| 1853 | |
| 1854 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
| 1855 | |
| 1856 | static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, |
| 1857 | unsigned long cr0, |
| 1858 | struct kvm_vcpu *vcpu) |
| 1859 | { |
| 1860 | if (!(cr0 & X86_CR0_PG)) { |
| 1861 | /* From paging/starting to nonpaging */ |
| 1862 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, |
| 1863 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | |
| 1864 | (CPU_BASED_CR3_LOAD_EXITING | |
| 1865 | CPU_BASED_CR3_STORE_EXITING)); |
| 1866 | vcpu->arch.cr0 = cr0; |
| 1867 | vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); |
| 1868 | } else if (!is_paging(vcpu)) { |
| 1869 | /* From nonpaging to paging */ |
| 1870 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, |
| 1871 | vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & |
| 1872 | ~(CPU_BASED_CR3_LOAD_EXITING | |
| 1873 | CPU_BASED_CR3_STORE_EXITING)); |
| 1874 | vcpu->arch.cr0 = cr0; |
| 1875 | vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); |
| 1876 | } |
| 1877 | |
| 1878 | if (!(cr0 & X86_CR0_WP)) |
| 1879 | *hw_cr0 &= ~X86_CR0_WP; |
| 1880 | } |
| 1881 | |
| 1882 | static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
| 1883 | { |
| 1884 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1885 | unsigned long hw_cr0; |
| 1886 | |
| 1887 | if (enable_unrestricted_guest) |
| 1888 | hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST) |
| 1889 | | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; |
| 1890 | else |
| 1891 | hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON; |
| 1892 | |
| 1893 | if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) |
| 1894 | enter_pmode(vcpu); |
| 1895 | |
| 1896 | if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) |
| 1897 | enter_rmode(vcpu); |
| 1898 | |
| 1899 | #ifdef CONFIG_X86_64 |
| 1900 | if (vcpu->arch.efer & EFER_LME) { |
| 1901 | if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) |
| 1902 | enter_lmode(vcpu); |
| 1903 | if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) |
| 1904 | exit_lmode(vcpu); |
| 1905 | } |
| 1906 | #endif |
| 1907 | |
| 1908 | if (enable_ept) |
| 1909 | ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu); |
| 1910 | |
| 1911 | if (!vcpu->fpu_active) |
| 1912 | hw_cr0 |= X86_CR0_TS | X86_CR0_MP; |
| 1913 | |
| 1914 | vmcs_writel(CR0_READ_SHADOW, cr0); |
| 1915 | vmcs_writel(GUEST_CR0, hw_cr0); |
| 1916 | vcpu->arch.cr0 = cr0; |
| 1917 | } |
| 1918 | |
| 1919 | static u64 construct_eptp(unsigned long root_hpa) |
| 1920 | { |
| 1921 | u64 eptp; |
| 1922 | |
| 1923 | /* TODO write the value reading from MSR */ |
| 1924 | eptp = VMX_EPT_DEFAULT_MT | |
| 1925 | VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; |
| 1926 | eptp |= (root_hpa & PAGE_MASK); |
| 1927 | |
| 1928 | return eptp; |
| 1929 | } |
| 1930 | |
| 1931 | static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) |
| 1932 | { |
| 1933 | unsigned long guest_cr3; |
| 1934 | u64 eptp; |
| 1935 | |
| 1936 | guest_cr3 = cr3; |
| 1937 | if (enable_ept) { |
| 1938 | eptp = construct_eptp(cr3); |
| 1939 | vmcs_write64(EPT_POINTER, eptp); |
| 1940 | guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 : |
| 1941 | vcpu->kvm->arch.ept_identity_map_addr; |
| 1942 | ept_load_pdptrs(vcpu); |
| 1943 | } |
| 1944 | |
| 1945 | vmx_flush_tlb(vcpu); |
| 1946 | vmcs_writel(GUEST_CR3, guest_cr3); |
| 1947 | } |
| 1948 | |
| 1949 | static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| 1950 | { |
| 1951 | unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? |
| 1952 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); |
| 1953 | |
| 1954 | vcpu->arch.cr4 = cr4; |
| 1955 | if (enable_ept) { |
| 1956 | if (!is_paging(vcpu)) { |
| 1957 | hw_cr4 &= ~X86_CR4_PAE; |
| 1958 | hw_cr4 |= X86_CR4_PSE; |
| 1959 | } else if (!(cr4 & X86_CR4_PAE)) { |
| 1960 | hw_cr4 &= ~X86_CR4_PAE; |
| 1961 | } |
| 1962 | } |
| 1963 | |
| 1964 | vmcs_writel(CR4_READ_SHADOW, cr4); |
| 1965 | vmcs_writel(GUEST_CR4, hw_cr4); |
| 1966 | } |
| 1967 | |
| 1968 | static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) |
| 1969 | { |
| 1970 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
| 1971 | |
| 1972 | return vmcs_readl(sf->base); |
| 1973 | } |
| 1974 | |
| 1975 | static void vmx_get_segment(struct kvm_vcpu *vcpu, |
| 1976 | struct kvm_segment *var, int seg) |
| 1977 | { |
| 1978 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
| 1979 | u32 ar; |
| 1980 | |
| 1981 | var->base = vmcs_readl(sf->base); |
| 1982 | var->limit = vmcs_read32(sf->limit); |
| 1983 | var->selector = vmcs_read16(sf->selector); |
| 1984 | ar = vmcs_read32(sf->ar_bytes); |
| 1985 | if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state) |
| 1986 | ar = 0; |
| 1987 | var->type = ar & 15; |
| 1988 | var->s = (ar >> 4) & 1; |
| 1989 | var->dpl = (ar >> 5) & 3; |
| 1990 | var->present = (ar >> 7) & 1; |
| 1991 | var->avl = (ar >> 12) & 1; |
| 1992 | var->l = (ar >> 13) & 1; |
| 1993 | var->db = (ar >> 14) & 1; |
| 1994 | var->g = (ar >> 15) & 1; |
| 1995 | var->unusable = (ar >> 16) & 1; |
| 1996 | } |
| 1997 | |
| 1998 | static int vmx_get_cpl(struct kvm_vcpu *vcpu) |
| 1999 | { |
| 2000 | if (!is_protmode(vcpu)) |
| 2001 | return 0; |
| 2002 | |
| 2003 | if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ |
| 2004 | return 3; |
| 2005 | |
| 2006 | return vmcs_read16(GUEST_CS_SELECTOR) & 3; |
| 2007 | } |
| 2008 | |
| 2009 | static u32 vmx_segment_access_rights(struct kvm_segment *var) |
| 2010 | { |
| 2011 | u32 ar; |
| 2012 | |
| 2013 | if (var->unusable) |
| 2014 | ar = 1 << 16; |
| 2015 | else { |
| 2016 | ar = var->type & 15; |
| 2017 | ar |= (var->s & 1) << 4; |
| 2018 | ar |= (var->dpl & 3) << 5; |
| 2019 | ar |= (var->present & 1) << 7; |
| 2020 | ar |= (var->avl & 1) << 12; |
| 2021 | ar |= (var->l & 1) << 13; |
| 2022 | ar |= (var->db & 1) << 14; |
| 2023 | ar |= (var->g & 1) << 15; |
| 2024 | } |
| 2025 | if (ar == 0) /* a 0 value means unusable */ |
| 2026 | ar = AR_UNUSABLE_MASK; |
| 2027 | |
| 2028 | return ar; |
| 2029 | } |
| 2030 | |
| 2031 | static void vmx_set_segment(struct kvm_vcpu *vcpu, |
| 2032 | struct kvm_segment *var, int seg) |
| 2033 | { |
| 2034 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2035 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
| 2036 | u32 ar; |
| 2037 | |
| 2038 | if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) { |
| 2039 | vmx->rmode.tr.selector = var->selector; |
| 2040 | vmx->rmode.tr.base = var->base; |
| 2041 | vmx->rmode.tr.limit = var->limit; |
| 2042 | vmx->rmode.tr.ar = vmx_segment_access_rights(var); |
| 2043 | return; |
| 2044 | } |
| 2045 | vmcs_writel(sf->base, var->base); |
| 2046 | vmcs_write32(sf->limit, var->limit); |
| 2047 | vmcs_write16(sf->selector, var->selector); |
| 2048 | if (vmx->rmode.vm86_active && var->s) { |
| 2049 | /* |
| 2050 | * Hack real-mode segments into vm86 compatibility. |
| 2051 | */ |
| 2052 | if (var->base == 0xffff0000 && var->selector == 0xf000) |
| 2053 | vmcs_writel(sf->base, 0xf0000); |
| 2054 | ar = 0xf3; |
| 2055 | } else |
| 2056 | ar = vmx_segment_access_rights(var); |
| 2057 | |
| 2058 | /* |
| 2059 | * Fix the "Accessed" bit in AR field of segment registers for older |
| 2060 | * qemu binaries. |
| 2061 | * IA32 arch specifies that at the time of processor reset the |
| 2062 | * "Accessed" bit in the AR field of segment registers is 1. And qemu |
| 2063 | * is setting it to 0 in the usedland code. This causes invalid guest |
| 2064 | * state vmexit when "unrestricted guest" mode is turned on. |
| 2065 | * Fix for this setup issue in cpu_reset is being pushed in the qemu |
| 2066 | * tree. Newer qemu binaries with that qemu fix would not need this |
| 2067 | * kvm hack. |
| 2068 | */ |
| 2069 | if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR)) |
| 2070 | ar |= 0x1; /* Accessed */ |
| 2071 | |
| 2072 | vmcs_write32(sf->ar_bytes, ar); |
| 2073 | } |
| 2074 | |
| 2075 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) |
| 2076 | { |
| 2077 | u32 ar = vmcs_read32(GUEST_CS_AR_BYTES); |
| 2078 | |
| 2079 | *db = (ar >> 14) & 1; |
| 2080 | *l = (ar >> 13) & 1; |
| 2081 | } |
| 2082 | |
| 2083 | static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 2084 | { |
| 2085 | dt->size = vmcs_read32(GUEST_IDTR_LIMIT); |
| 2086 | dt->address = vmcs_readl(GUEST_IDTR_BASE); |
| 2087 | } |
| 2088 | |
| 2089 | static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 2090 | { |
| 2091 | vmcs_write32(GUEST_IDTR_LIMIT, dt->size); |
| 2092 | vmcs_writel(GUEST_IDTR_BASE, dt->address); |
| 2093 | } |
| 2094 | |
| 2095 | static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 2096 | { |
| 2097 | dt->size = vmcs_read32(GUEST_GDTR_LIMIT); |
| 2098 | dt->address = vmcs_readl(GUEST_GDTR_BASE); |
| 2099 | } |
| 2100 | |
| 2101 | static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) |
| 2102 | { |
| 2103 | vmcs_write32(GUEST_GDTR_LIMIT, dt->size); |
| 2104 | vmcs_writel(GUEST_GDTR_BASE, dt->address); |
| 2105 | } |
| 2106 | |
| 2107 | static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg) |
| 2108 | { |
| 2109 | struct kvm_segment var; |
| 2110 | u32 ar; |
| 2111 | |
| 2112 | vmx_get_segment(vcpu, &var, seg); |
| 2113 | ar = vmx_segment_access_rights(&var); |
| 2114 | |
| 2115 | if (var.base != (var.selector << 4)) |
| 2116 | return false; |
| 2117 | if (var.limit != 0xffff) |
| 2118 | return false; |
| 2119 | if (ar != 0xf3) |
| 2120 | return false; |
| 2121 | |
| 2122 | return true; |
| 2123 | } |
| 2124 | |
| 2125 | static bool code_segment_valid(struct kvm_vcpu *vcpu) |
| 2126 | { |
| 2127 | struct kvm_segment cs; |
| 2128 | unsigned int cs_rpl; |
| 2129 | |
| 2130 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); |
| 2131 | cs_rpl = cs.selector & SELECTOR_RPL_MASK; |
| 2132 | |
| 2133 | if (cs.unusable) |
| 2134 | return false; |
| 2135 | if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK)) |
| 2136 | return false; |
| 2137 | if (!cs.s) |
| 2138 | return false; |
| 2139 | if (cs.type & AR_TYPE_WRITEABLE_MASK) { |
| 2140 | if (cs.dpl > cs_rpl) |
| 2141 | return false; |
| 2142 | } else { |
| 2143 | if (cs.dpl != cs_rpl) |
| 2144 | return false; |
| 2145 | } |
| 2146 | if (!cs.present) |
| 2147 | return false; |
| 2148 | |
| 2149 | /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */ |
| 2150 | return true; |
| 2151 | } |
| 2152 | |
| 2153 | static bool stack_segment_valid(struct kvm_vcpu *vcpu) |
| 2154 | { |
| 2155 | struct kvm_segment ss; |
| 2156 | unsigned int ss_rpl; |
| 2157 | |
| 2158 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); |
| 2159 | ss_rpl = ss.selector & SELECTOR_RPL_MASK; |
| 2160 | |
| 2161 | if (ss.unusable) |
| 2162 | return true; |
| 2163 | if (ss.type != 3 && ss.type != 7) |
| 2164 | return false; |
| 2165 | if (!ss.s) |
| 2166 | return false; |
| 2167 | if (ss.dpl != ss_rpl) /* DPL != RPL */ |
| 2168 | return false; |
| 2169 | if (!ss.present) |
| 2170 | return false; |
| 2171 | |
| 2172 | return true; |
| 2173 | } |
| 2174 | |
| 2175 | static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg) |
| 2176 | { |
| 2177 | struct kvm_segment var; |
| 2178 | unsigned int rpl; |
| 2179 | |
| 2180 | vmx_get_segment(vcpu, &var, seg); |
| 2181 | rpl = var.selector & SELECTOR_RPL_MASK; |
| 2182 | |
| 2183 | if (var.unusable) |
| 2184 | return true; |
| 2185 | if (!var.s) |
| 2186 | return false; |
| 2187 | if (!var.present) |
| 2188 | return false; |
| 2189 | if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) { |
| 2190 | if (var.dpl < rpl) /* DPL < RPL */ |
| 2191 | return false; |
| 2192 | } |
| 2193 | |
| 2194 | /* TODO: Add other members to kvm_segment_field to allow checking for other access |
| 2195 | * rights flags |
| 2196 | */ |
| 2197 | return true; |
| 2198 | } |
| 2199 | |
| 2200 | static bool tr_valid(struct kvm_vcpu *vcpu) |
| 2201 | { |
| 2202 | struct kvm_segment tr; |
| 2203 | |
| 2204 | vmx_get_segment(vcpu, &tr, VCPU_SREG_TR); |
| 2205 | |
| 2206 | if (tr.unusable) |
| 2207 | return false; |
| 2208 | if (tr.selector & SELECTOR_TI_MASK) /* TI = 1 */ |
| 2209 | return false; |
| 2210 | if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */ |
| 2211 | return false; |
| 2212 | if (!tr.present) |
| 2213 | return false; |
| 2214 | |
| 2215 | return true; |
| 2216 | } |
| 2217 | |
| 2218 | static bool ldtr_valid(struct kvm_vcpu *vcpu) |
| 2219 | { |
| 2220 | struct kvm_segment ldtr; |
| 2221 | |
| 2222 | vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR); |
| 2223 | |
| 2224 | if (ldtr.unusable) |
| 2225 | return true; |
| 2226 | if (ldtr.selector & SELECTOR_TI_MASK) /* TI = 1 */ |
| 2227 | return false; |
| 2228 | if (ldtr.type != 2) |
| 2229 | return false; |
| 2230 | if (!ldtr.present) |
| 2231 | return false; |
| 2232 | |
| 2233 | return true; |
| 2234 | } |
| 2235 | |
| 2236 | static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu) |
| 2237 | { |
| 2238 | struct kvm_segment cs, ss; |
| 2239 | |
| 2240 | vmx_get_segment(vcpu, &cs, VCPU_SREG_CS); |
| 2241 | vmx_get_segment(vcpu, &ss, VCPU_SREG_SS); |
| 2242 | |
| 2243 | return ((cs.selector & SELECTOR_RPL_MASK) == |
| 2244 | (ss.selector & SELECTOR_RPL_MASK)); |
| 2245 | } |
| 2246 | |
| 2247 | /* |
| 2248 | * Check if guest state is valid. Returns true if valid, false if |
| 2249 | * not. |
| 2250 | * We assume that registers are always usable |
| 2251 | */ |
| 2252 | static bool guest_state_valid(struct kvm_vcpu *vcpu) |
| 2253 | { |
| 2254 | /* real mode guest state checks */ |
| 2255 | if (!is_protmode(vcpu)) { |
| 2256 | if (!rmode_segment_valid(vcpu, VCPU_SREG_CS)) |
| 2257 | return false; |
| 2258 | if (!rmode_segment_valid(vcpu, VCPU_SREG_SS)) |
| 2259 | return false; |
| 2260 | if (!rmode_segment_valid(vcpu, VCPU_SREG_DS)) |
| 2261 | return false; |
| 2262 | if (!rmode_segment_valid(vcpu, VCPU_SREG_ES)) |
| 2263 | return false; |
| 2264 | if (!rmode_segment_valid(vcpu, VCPU_SREG_FS)) |
| 2265 | return false; |
| 2266 | if (!rmode_segment_valid(vcpu, VCPU_SREG_GS)) |
| 2267 | return false; |
| 2268 | } else { |
| 2269 | /* protected mode guest state checks */ |
| 2270 | if (!cs_ss_rpl_check(vcpu)) |
| 2271 | return false; |
| 2272 | if (!code_segment_valid(vcpu)) |
| 2273 | return false; |
| 2274 | if (!stack_segment_valid(vcpu)) |
| 2275 | return false; |
| 2276 | if (!data_segment_valid(vcpu, VCPU_SREG_DS)) |
| 2277 | return false; |
| 2278 | if (!data_segment_valid(vcpu, VCPU_SREG_ES)) |
| 2279 | return false; |
| 2280 | if (!data_segment_valid(vcpu, VCPU_SREG_FS)) |
| 2281 | return false; |
| 2282 | if (!data_segment_valid(vcpu, VCPU_SREG_GS)) |
| 2283 | return false; |
| 2284 | if (!tr_valid(vcpu)) |
| 2285 | return false; |
| 2286 | if (!ldtr_valid(vcpu)) |
| 2287 | return false; |
| 2288 | } |
| 2289 | /* TODO: |
| 2290 | * - Add checks on RIP |
| 2291 | * - Add checks on RFLAGS |
| 2292 | */ |
| 2293 | |
| 2294 | return true; |
| 2295 | } |
| 2296 | |
| 2297 | static int init_rmode_tss(struct kvm *kvm) |
| 2298 | { |
| 2299 | gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT; |
| 2300 | u16 data = 0; |
| 2301 | int ret = 0; |
| 2302 | int r; |
| 2303 | |
| 2304 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); |
| 2305 | if (r < 0) |
| 2306 | goto out; |
| 2307 | data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE; |
| 2308 | r = kvm_write_guest_page(kvm, fn++, &data, |
| 2309 | TSS_IOPB_BASE_OFFSET, sizeof(u16)); |
| 2310 | if (r < 0) |
| 2311 | goto out; |
| 2312 | r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE); |
| 2313 | if (r < 0) |
| 2314 | goto out; |
| 2315 | r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); |
| 2316 | if (r < 0) |
| 2317 | goto out; |
| 2318 | data = ~0; |
| 2319 | r = kvm_write_guest_page(kvm, fn, &data, |
| 2320 | RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, |
| 2321 | sizeof(u8)); |
| 2322 | if (r < 0) |
| 2323 | goto out; |
| 2324 | |
| 2325 | ret = 1; |
| 2326 | out: |
| 2327 | return ret; |
| 2328 | } |
| 2329 | |
| 2330 | static int init_rmode_identity_map(struct kvm *kvm) |
| 2331 | { |
| 2332 | int i, r, ret; |
| 2333 | pfn_t identity_map_pfn; |
| 2334 | u32 tmp; |
| 2335 | |
| 2336 | if (!enable_ept) |
| 2337 | return 1; |
| 2338 | if (unlikely(!kvm->arch.ept_identity_pagetable)) { |
| 2339 | printk(KERN_ERR "EPT: identity-mapping pagetable " |
| 2340 | "haven't been allocated!\n"); |
| 2341 | return 0; |
| 2342 | } |
| 2343 | if (likely(kvm->arch.ept_identity_pagetable_done)) |
| 2344 | return 1; |
| 2345 | ret = 0; |
| 2346 | identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; |
| 2347 | r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE); |
| 2348 | if (r < 0) |
| 2349 | goto out; |
| 2350 | /* Set up identity-mapping pagetable for EPT in real mode */ |
| 2351 | for (i = 0; i < PT32_ENT_PER_PAGE; i++) { |
| 2352 | tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | |
| 2353 | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE); |
| 2354 | r = kvm_write_guest_page(kvm, identity_map_pfn, |
| 2355 | &tmp, i * sizeof(tmp), sizeof(tmp)); |
| 2356 | if (r < 0) |
| 2357 | goto out; |
| 2358 | } |
| 2359 | kvm->arch.ept_identity_pagetable_done = true; |
| 2360 | ret = 1; |
| 2361 | out: |
| 2362 | return ret; |
| 2363 | } |
| 2364 | |
| 2365 | static void seg_setup(int seg) |
| 2366 | { |
| 2367 | struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg]; |
| 2368 | unsigned int ar; |
| 2369 | |
| 2370 | vmcs_write16(sf->selector, 0); |
| 2371 | vmcs_writel(sf->base, 0); |
| 2372 | vmcs_write32(sf->limit, 0xffff); |
| 2373 | if (enable_unrestricted_guest) { |
| 2374 | ar = 0x93; |
| 2375 | if (seg == VCPU_SREG_CS) |
| 2376 | ar |= 0x08; /* code segment */ |
| 2377 | } else |
| 2378 | ar = 0xf3; |
| 2379 | |
| 2380 | vmcs_write32(sf->ar_bytes, ar); |
| 2381 | } |
| 2382 | |
| 2383 | static int alloc_apic_access_page(struct kvm *kvm) |
| 2384 | { |
| 2385 | struct kvm_userspace_memory_region kvm_userspace_mem; |
| 2386 | int r = 0; |
| 2387 | |
| 2388 | mutex_lock(&kvm->slots_lock); |
| 2389 | if (kvm->arch.apic_access_page) |
| 2390 | goto out; |
| 2391 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; |
| 2392 | kvm_userspace_mem.flags = 0; |
| 2393 | kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL; |
| 2394 | kvm_userspace_mem.memory_size = PAGE_SIZE; |
| 2395 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); |
| 2396 | if (r) |
| 2397 | goto out; |
| 2398 | |
| 2399 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); |
| 2400 | out: |
| 2401 | mutex_unlock(&kvm->slots_lock); |
| 2402 | return r; |
| 2403 | } |
| 2404 | |
| 2405 | static int alloc_identity_pagetable(struct kvm *kvm) |
| 2406 | { |
| 2407 | struct kvm_userspace_memory_region kvm_userspace_mem; |
| 2408 | int r = 0; |
| 2409 | |
| 2410 | mutex_lock(&kvm->slots_lock); |
| 2411 | if (kvm->arch.ept_identity_pagetable) |
| 2412 | goto out; |
| 2413 | kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT; |
| 2414 | kvm_userspace_mem.flags = 0; |
| 2415 | kvm_userspace_mem.guest_phys_addr = |
| 2416 | kvm->arch.ept_identity_map_addr; |
| 2417 | kvm_userspace_mem.memory_size = PAGE_SIZE; |
| 2418 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); |
| 2419 | if (r) |
| 2420 | goto out; |
| 2421 | |
| 2422 | kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, |
| 2423 | kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); |
| 2424 | out: |
| 2425 | mutex_unlock(&kvm->slots_lock); |
| 2426 | return r; |
| 2427 | } |
| 2428 | |
| 2429 | static void allocate_vpid(struct vcpu_vmx *vmx) |
| 2430 | { |
| 2431 | int vpid; |
| 2432 | |
| 2433 | vmx->vpid = 0; |
| 2434 | if (!enable_vpid) |
| 2435 | return; |
| 2436 | spin_lock(&vmx_vpid_lock); |
| 2437 | vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS); |
| 2438 | if (vpid < VMX_NR_VPIDS) { |
| 2439 | vmx->vpid = vpid; |
| 2440 | __set_bit(vpid, vmx_vpid_bitmap); |
| 2441 | } |
| 2442 | spin_unlock(&vmx_vpid_lock); |
| 2443 | } |
| 2444 | |
| 2445 | static void free_vpid(struct vcpu_vmx *vmx) |
| 2446 | { |
| 2447 | if (!enable_vpid) |
| 2448 | return; |
| 2449 | spin_lock(&vmx_vpid_lock); |
| 2450 | if (vmx->vpid != 0) |
| 2451 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); |
| 2452 | spin_unlock(&vmx_vpid_lock); |
| 2453 | } |
| 2454 | |
| 2455 | static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) |
| 2456 | { |
| 2457 | int f = sizeof(unsigned long); |
| 2458 | |
| 2459 | if (!cpu_has_vmx_msr_bitmap()) |
| 2460 | return; |
| 2461 | |
| 2462 | /* |
| 2463 | * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals |
| 2464 | * have the write-low and read-high bitmap offsets the wrong way round. |
| 2465 | * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. |
| 2466 | */ |
| 2467 | if (msr <= 0x1fff) { |
| 2468 | __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */ |
| 2469 | __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */ |
| 2470 | } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { |
| 2471 | msr &= 0x1fff; |
| 2472 | __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */ |
| 2473 | __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */ |
| 2474 | } |
| 2475 | } |
| 2476 | |
| 2477 | static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) |
| 2478 | { |
| 2479 | if (!longmode_only) |
| 2480 | __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr); |
| 2481 | __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr); |
| 2482 | } |
| 2483 | |
| 2484 | /* |
| 2485 | * Sets up the vmcs for emulated real mode. |
| 2486 | */ |
| 2487 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) |
| 2488 | { |
| 2489 | u32 host_sysenter_cs, msr_low, msr_high; |
| 2490 | u32 junk; |
| 2491 | u64 host_pat; |
| 2492 | unsigned long a; |
| 2493 | struct desc_ptr dt; |
| 2494 | int i; |
| 2495 | unsigned long kvm_vmx_return; |
| 2496 | u32 exec_control; |
| 2497 | |
| 2498 | /* I/O */ |
| 2499 | vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); |
| 2500 | vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); |
| 2501 | |
| 2502 | if (cpu_has_vmx_msr_bitmap()) |
| 2503 | vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); |
| 2504 | |
| 2505 | vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ |
| 2506 | |
| 2507 | /* Control */ |
| 2508 | vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, |
| 2509 | vmcs_config.pin_based_exec_ctrl); |
| 2510 | |
| 2511 | exec_control = vmcs_config.cpu_based_exec_ctrl; |
| 2512 | if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) { |
| 2513 | exec_control &= ~CPU_BASED_TPR_SHADOW; |
| 2514 | #ifdef CONFIG_X86_64 |
| 2515 | exec_control |= CPU_BASED_CR8_STORE_EXITING | |
| 2516 | CPU_BASED_CR8_LOAD_EXITING; |
| 2517 | #endif |
| 2518 | } |
| 2519 | if (!enable_ept) |
| 2520 | exec_control |= CPU_BASED_CR3_STORE_EXITING | |
| 2521 | CPU_BASED_CR3_LOAD_EXITING | |
| 2522 | CPU_BASED_INVLPG_EXITING; |
| 2523 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); |
| 2524 | |
| 2525 | if (cpu_has_secondary_exec_ctrls()) { |
| 2526 | exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; |
| 2527 | if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) |
| 2528 | exec_control &= |
| 2529 | ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
| 2530 | if (vmx->vpid == 0) |
| 2531 | exec_control &= ~SECONDARY_EXEC_ENABLE_VPID; |
| 2532 | if (!enable_ept) { |
| 2533 | exec_control &= ~SECONDARY_EXEC_ENABLE_EPT; |
| 2534 | enable_unrestricted_guest = 0; |
| 2535 | } |
| 2536 | if (!enable_unrestricted_guest) |
| 2537 | exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; |
| 2538 | if (!ple_gap) |
| 2539 | exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; |
| 2540 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); |
| 2541 | } |
| 2542 | |
| 2543 | if (ple_gap) { |
| 2544 | vmcs_write32(PLE_GAP, ple_gap); |
| 2545 | vmcs_write32(PLE_WINDOW, ple_window); |
| 2546 | } |
| 2547 | |
| 2548 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf); |
| 2549 | vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf); |
| 2550 | vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */ |
| 2551 | |
| 2552 | vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */ |
| 2553 | vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ |
| 2554 | vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ |
| 2555 | |
| 2556 | vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ |
| 2557 | vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
| 2558 | vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
| 2559 | vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */ |
| 2560 | vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */ |
| 2561 | vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ |
| 2562 | #ifdef CONFIG_X86_64 |
| 2563 | rdmsrl(MSR_FS_BASE, a); |
| 2564 | vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */ |
| 2565 | rdmsrl(MSR_GS_BASE, a); |
| 2566 | vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */ |
| 2567 | #else |
| 2568 | vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */ |
| 2569 | vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */ |
| 2570 | #endif |
| 2571 | |
| 2572 | vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ |
| 2573 | |
| 2574 | native_store_idt(&dt); |
| 2575 | vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ |
| 2576 | |
| 2577 | asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); |
| 2578 | vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ |
| 2579 | vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); |
| 2580 | vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); |
| 2581 | vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); |
| 2582 | vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); |
| 2583 | vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest)); |
| 2584 | |
| 2585 | rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk); |
| 2586 | vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs); |
| 2587 | rdmsrl(MSR_IA32_SYSENTER_ESP, a); |
| 2588 | vmcs_writel(HOST_IA32_SYSENTER_ESP, a); /* 22.2.3 */ |
| 2589 | rdmsrl(MSR_IA32_SYSENTER_EIP, a); |
| 2590 | vmcs_writel(HOST_IA32_SYSENTER_EIP, a); /* 22.2.3 */ |
| 2591 | |
| 2592 | if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) { |
| 2593 | rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); |
| 2594 | host_pat = msr_low | ((u64) msr_high << 32); |
| 2595 | vmcs_write64(HOST_IA32_PAT, host_pat); |
| 2596 | } |
| 2597 | if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) { |
| 2598 | rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high); |
| 2599 | host_pat = msr_low | ((u64) msr_high << 32); |
| 2600 | /* Write the default value follow host pat */ |
| 2601 | vmcs_write64(GUEST_IA32_PAT, host_pat); |
| 2602 | /* Keep arch.pat sync with GUEST_IA32_PAT */ |
| 2603 | vmx->vcpu.arch.pat = host_pat; |
| 2604 | } |
| 2605 | |
| 2606 | for (i = 0; i < NR_VMX_MSR; ++i) { |
| 2607 | u32 index = vmx_msr_index[i]; |
| 2608 | u32 data_low, data_high; |
| 2609 | int j = vmx->nmsrs; |
| 2610 | |
| 2611 | if (rdmsr_safe(index, &data_low, &data_high) < 0) |
| 2612 | continue; |
| 2613 | if (wrmsr_safe(index, data_low, data_high) < 0) |
| 2614 | continue; |
| 2615 | vmx->guest_msrs[j].index = i; |
| 2616 | vmx->guest_msrs[j].data = 0; |
| 2617 | vmx->guest_msrs[j].mask = -1ull; |
| 2618 | ++vmx->nmsrs; |
| 2619 | } |
| 2620 | |
| 2621 | vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl); |
| 2622 | |
| 2623 | /* 22.2.1, 20.8.1 */ |
| 2624 | vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl); |
| 2625 | |
| 2626 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); |
| 2627 | vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; |
| 2628 | if (enable_ept) |
| 2629 | vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; |
| 2630 | vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); |
| 2631 | |
| 2632 | kvm_write_tsc(&vmx->vcpu, 0); |
| 2633 | |
| 2634 | return 0; |
| 2635 | } |
| 2636 | |
| 2637 | static int init_rmode(struct kvm *kvm) |
| 2638 | { |
| 2639 | int idx, ret = 0; |
| 2640 | |
| 2641 | idx = srcu_read_lock(&kvm->srcu); |
| 2642 | if (!init_rmode_tss(kvm)) |
| 2643 | goto exit; |
| 2644 | if (!init_rmode_identity_map(kvm)) |
| 2645 | goto exit; |
| 2646 | |
| 2647 | ret = 1; |
| 2648 | exit: |
| 2649 | srcu_read_unlock(&kvm->srcu, idx); |
| 2650 | return ret; |
| 2651 | } |
| 2652 | |
| 2653 | static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) |
| 2654 | { |
| 2655 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2656 | u64 msr; |
| 2657 | int ret; |
| 2658 | |
| 2659 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); |
| 2660 | if (!init_rmode(vmx->vcpu.kvm)) { |
| 2661 | ret = -ENOMEM; |
| 2662 | goto out; |
| 2663 | } |
| 2664 | |
| 2665 | vmx->rmode.vm86_active = 0; |
| 2666 | |
| 2667 | vmx->soft_vnmi_blocked = 0; |
| 2668 | |
| 2669 | vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); |
| 2670 | kvm_set_cr8(&vmx->vcpu, 0); |
| 2671 | msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; |
| 2672 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) |
| 2673 | msr |= MSR_IA32_APICBASE_BSP; |
| 2674 | kvm_set_apic_base(&vmx->vcpu, msr); |
| 2675 | |
| 2676 | ret = fx_init(&vmx->vcpu); |
| 2677 | if (ret != 0) |
| 2678 | goto out; |
| 2679 | |
| 2680 | seg_setup(VCPU_SREG_CS); |
| 2681 | /* |
| 2682 | * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode |
| 2683 | * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh. |
| 2684 | */ |
| 2685 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) { |
| 2686 | vmcs_write16(GUEST_CS_SELECTOR, 0xf000); |
| 2687 | vmcs_writel(GUEST_CS_BASE, 0x000f0000); |
| 2688 | } else { |
| 2689 | vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8); |
| 2690 | vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12); |
| 2691 | } |
| 2692 | |
| 2693 | seg_setup(VCPU_SREG_DS); |
| 2694 | seg_setup(VCPU_SREG_ES); |
| 2695 | seg_setup(VCPU_SREG_FS); |
| 2696 | seg_setup(VCPU_SREG_GS); |
| 2697 | seg_setup(VCPU_SREG_SS); |
| 2698 | |
| 2699 | vmcs_write16(GUEST_TR_SELECTOR, 0); |
| 2700 | vmcs_writel(GUEST_TR_BASE, 0); |
| 2701 | vmcs_write32(GUEST_TR_LIMIT, 0xffff); |
| 2702 | vmcs_write32(GUEST_TR_AR_BYTES, 0x008b); |
| 2703 | |
| 2704 | vmcs_write16(GUEST_LDTR_SELECTOR, 0); |
| 2705 | vmcs_writel(GUEST_LDTR_BASE, 0); |
| 2706 | vmcs_write32(GUEST_LDTR_LIMIT, 0xffff); |
| 2707 | vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082); |
| 2708 | |
| 2709 | vmcs_write32(GUEST_SYSENTER_CS, 0); |
| 2710 | vmcs_writel(GUEST_SYSENTER_ESP, 0); |
| 2711 | vmcs_writel(GUEST_SYSENTER_EIP, 0); |
| 2712 | |
| 2713 | vmcs_writel(GUEST_RFLAGS, 0x02); |
| 2714 | if (kvm_vcpu_is_bsp(&vmx->vcpu)) |
| 2715 | kvm_rip_write(vcpu, 0xfff0); |
| 2716 | else |
| 2717 | kvm_rip_write(vcpu, 0); |
| 2718 | kvm_register_write(vcpu, VCPU_REGS_RSP, 0); |
| 2719 | |
| 2720 | vmcs_writel(GUEST_DR7, 0x400); |
| 2721 | |
| 2722 | vmcs_writel(GUEST_GDTR_BASE, 0); |
| 2723 | vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); |
| 2724 | |
| 2725 | vmcs_writel(GUEST_IDTR_BASE, 0); |
| 2726 | vmcs_write32(GUEST_IDTR_LIMIT, 0xffff); |
| 2727 | |
| 2728 | vmcs_write32(GUEST_ACTIVITY_STATE, 0); |
| 2729 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); |
| 2730 | vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0); |
| 2731 | |
| 2732 | /* Special registers */ |
| 2733 | vmcs_write64(GUEST_IA32_DEBUGCTL, 0); |
| 2734 | |
| 2735 | setup_msrs(vmx); |
| 2736 | |
| 2737 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */ |
| 2738 | |
| 2739 | if (cpu_has_vmx_tpr_shadow()) { |
| 2740 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0); |
| 2741 | if (vm_need_tpr_shadow(vmx->vcpu.kvm)) |
| 2742 | vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, |
| 2743 | page_to_phys(vmx->vcpu.arch.apic->regs_page)); |
| 2744 | vmcs_write32(TPR_THRESHOLD, 0); |
| 2745 | } |
| 2746 | |
| 2747 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) |
| 2748 | vmcs_write64(APIC_ACCESS_ADDR, |
| 2749 | page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); |
| 2750 | |
| 2751 | if (vmx->vpid != 0) |
| 2752 | vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); |
| 2753 | |
| 2754 | vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET; |
| 2755 | vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */ |
| 2756 | vmx_set_cr4(&vmx->vcpu, 0); |
| 2757 | vmx_set_efer(&vmx->vcpu, 0); |
| 2758 | vmx_fpu_activate(&vmx->vcpu); |
| 2759 | update_exception_bitmap(&vmx->vcpu); |
| 2760 | |
| 2761 | vpid_sync_context(vmx); |
| 2762 | |
| 2763 | ret = 0; |
| 2764 | |
| 2765 | /* HACK: Don't enable emulation on guest boot/reset */ |
| 2766 | vmx->emulation_required = 0; |
| 2767 | |
| 2768 | out: |
| 2769 | return ret; |
| 2770 | } |
| 2771 | |
| 2772 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
| 2773 | { |
| 2774 | u32 cpu_based_vm_exec_control; |
| 2775 | |
| 2776 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
| 2777 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
| 2778 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
| 2779 | } |
| 2780 | |
| 2781 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
| 2782 | { |
| 2783 | u32 cpu_based_vm_exec_control; |
| 2784 | |
| 2785 | if (!cpu_has_virtual_nmis()) { |
| 2786 | enable_irq_window(vcpu); |
| 2787 | return; |
| 2788 | } |
| 2789 | |
| 2790 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
| 2791 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; |
| 2792 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
| 2793 | } |
| 2794 | |
| 2795 | static void vmx_inject_irq(struct kvm_vcpu *vcpu) |
| 2796 | { |
| 2797 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2798 | uint32_t intr; |
| 2799 | int irq = vcpu->arch.interrupt.nr; |
| 2800 | |
| 2801 | trace_kvm_inj_virq(irq); |
| 2802 | |
| 2803 | ++vcpu->stat.irq_injections; |
| 2804 | if (vmx->rmode.vm86_active) { |
| 2805 | if (kvm_inject_realmode_interrupt(vcpu, irq) != EMULATE_DONE) |
| 2806 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 2807 | return; |
| 2808 | } |
| 2809 | intr = irq | INTR_INFO_VALID_MASK; |
| 2810 | if (vcpu->arch.interrupt.soft) { |
| 2811 | intr |= INTR_TYPE_SOFT_INTR; |
| 2812 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, |
| 2813 | vmx->vcpu.arch.event_exit_inst_len); |
| 2814 | } else |
| 2815 | intr |= INTR_TYPE_EXT_INTR; |
| 2816 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); |
| 2817 | } |
| 2818 | |
| 2819 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) |
| 2820 | { |
| 2821 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2822 | |
| 2823 | if (!cpu_has_virtual_nmis()) { |
| 2824 | /* |
| 2825 | * Tracking the NMI-blocked state in software is built upon |
| 2826 | * finding the next open IRQ window. This, in turn, depends on |
| 2827 | * well-behaving guests: They have to keep IRQs disabled at |
| 2828 | * least as long as the NMI handler runs. Otherwise we may |
| 2829 | * cause NMI nesting, maybe breaking the guest. But as this is |
| 2830 | * highly unlikely, we can live with the residual risk. |
| 2831 | */ |
| 2832 | vmx->soft_vnmi_blocked = 1; |
| 2833 | vmx->vnmi_blocked_time = 0; |
| 2834 | } |
| 2835 | |
| 2836 | ++vcpu->stat.nmi_injections; |
| 2837 | if (vmx->rmode.vm86_active) { |
| 2838 | if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR) != EMULATE_DONE) |
| 2839 | kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); |
| 2840 | return; |
| 2841 | } |
| 2842 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
| 2843 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
| 2844 | } |
| 2845 | |
| 2846 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
| 2847 | { |
| 2848 | if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked) |
| 2849 | return 0; |
| 2850 | |
| 2851 | return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
| 2852 | (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI)); |
| 2853 | } |
| 2854 | |
| 2855 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu) |
| 2856 | { |
| 2857 | if (!cpu_has_virtual_nmis()) |
| 2858 | return to_vmx(vcpu)->soft_vnmi_blocked; |
| 2859 | return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI; |
| 2860 | } |
| 2861 | |
| 2862 | static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) |
| 2863 | { |
| 2864 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2865 | |
| 2866 | if (!cpu_has_virtual_nmis()) { |
| 2867 | if (vmx->soft_vnmi_blocked != masked) { |
| 2868 | vmx->soft_vnmi_blocked = masked; |
| 2869 | vmx->vnmi_blocked_time = 0; |
| 2870 | } |
| 2871 | } else { |
| 2872 | if (masked) |
| 2873 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
| 2874 | GUEST_INTR_STATE_NMI); |
| 2875 | else |
| 2876 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
| 2877 | GUEST_INTR_STATE_NMI); |
| 2878 | } |
| 2879 | } |
| 2880 | |
| 2881 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
| 2882 | { |
| 2883 | return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) && |
| 2884 | !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & |
| 2885 | (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)); |
| 2886 | } |
| 2887 | |
| 2888 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) |
| 2889 | { |
| 2890 | int ret; |
| 2891 | struct kvm_userspace_memory_region tss_mem = { |
| 2892 | .slot = TSS_PRIVATE_MEMSLOT, |
| 2893 | .guest_phys_addr = addr, |
| 2894 | .memory_size = PAGE_SIZE * 3, |
| 2895 | .flags = 0, |
| 2896 | }; |
| 2897 | |
| 2898 | ret = kvm_set_memory_region(kvm, &tss_mem, 0); |
| 2899 | if (ret) |
| 2900 | return ret; |
| 2901 | kvm->arch.tss_addr = addr; |
| 2902 | return 0; |
| 2903 | } |
| 2904 | |
| 2905 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, |
| 2906 | int vec, u32 err_code) |
| 2907 | { |
| 2908 | /* |
| 2909 | * Instruction with address size override prefix opcode 0x67 |
| 2910 | * Cause the #SS fault with 0 error code in VM86 mode. |
| 2911 | */ |
| 2912 | if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) |
| 2913 | if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE) |
| 2914 | return 1; |
| 2915 | /* |
| 2916 | * Forward all other exceptions that are valid in real mode. |
| 2917 | * FIXME: Breaks guest debugging in real mode, needs to be fixed with |
| 2918 | * the required debugging infrastructure rework. |
| 2919 | */ |
| 2920 | switch (vec) { |
| 2921 | case DB_VECTOR: |
| 2922 | if (vcpu->guest_debug & |
| 2923 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) |
| 2924 | return 0; |
| 2925 | kvm_queue_exception(vcpu, vec); |
| 2926 | return 1; |
| 2927 | case BP_VECTOR: |
| 2928 | /* |
| 2929 | * Update instruction length as we may reinject the exception |
| 2930 | * from user space while in guest debugging mode. |
| 2931 | */ |
| 2932 | to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = |
| 2933 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
| 2934 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) |
| 2935 | return 0; |
| 2936 | /* fall through */ |
| 2937 | case DE_VECTOR: |
| 2938 | case OF_VECTOR: |
| 2939 | case BR_VECTOR: |
| 2940 | case UD_VECTOR: |
| 2941 | case DF_VECTOR: |
| 2942 | case SS_VECTOR: |
| 2943 | case GP_VECTOR: |
| 2944 | case MF_VECTOR: |
| 2945 | kvm_queue_exception(vcpu, vec); |
| 2946 | return 1; |
| 2947 | } |
| 2948 | return 0; |
| 2949 | } |
| 2950 | |
| 2951 | /* |
| 2952 | * Trigger machine check on the host. We assume all the MSRs are already set up |
| 2953 | * by the CPU and that we still run on the same CPU as the MCE occurred on. |
| 2954 | * We pass a fake environment to the machine check handler because we want |
| 2955 | * the guest to be always treated like user space, no matter what context |
| 2956 | * it used internally. |
| 2957 | */ |
| 2958 | static void kvm_machine_check(void) |
| 2959 | { |
| 2960 | #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64) |
| 2961 | struct pt_regs regs = { |
| 2962 | .cs = 3, /* Fake ring 3 no matter what the guest ran on */ |
| 2963 | .flags = X86_EFLAGS_IF, |
| 2964 | }; |
| 2965 | |
| 2966 | do_machine_check(®s, 0); |
| 2967 | #endif |
| 2968 | } |
| 2969 | |
| 2970 | static int handle_machine_check(struct kvm_vcpu *vcpu) |
| 2971 | { |
| 2972 | /* already handled by vcpu_run */ |
| 2973 | return 1; |
| 2974 | } |
| 2975 | |
| 2976 | static int handle_exception(struct kvm_vcpu *vcpu) |
| 2977 | { |
| 2978 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 2979 | struct kvm_run *kvm_run = vcpu->run; |
| 2980 | u32 intr_info, ex_no, error_code; |
| 2981 | unsigned long cr2, rip, dr6; |
| 2982 | u32 vect_info; |
| 2983 | enum emulation_result er; |
| 2984 | |
| 2985 | vect_info = vmx->idt_vectoring_info; |
| 2986 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
| 2987 | |
| 2988 | if (is_machine_check(intr_info)) |
| 2989 | return handle_machine_check(vcpu); |
| 2990 | |
| 2991 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
| 2992 | !is_page_fault(intr_info)) { |
| 2993 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 2994 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; |
| 2995 | vcpu->run->internal.ndata = 2; |
| 2996 | vcpu->run->internal.data[0] = vect_info; |
| 2997 | vcpu->run->internal.data[1] = intr_info; |
| 2998 | return 0; |
| 2999 | } |
| 3000 | |
| 3001 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) |
| 3002 | return 1; /* already handled by vmx_vcpu_run() */ |
| 3003 | |
| 3004 | if (is_no_device(intr_info)) { |
| 3005 | vmx_fpu_activate(vcpu); |
| 3006 | return 1; |
| 3007 | } |
| 3008 | |
| 3009 | if (is_invalid_opcode(intr_info)) { |
| 3010 | er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD); |
| 3011 | if (er != EMULATE_DONE) |
| 3012 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 3013 | return 1; |
| 3014 | } |
| 3015 | |
| 3016 | error_code = 0; |
| 3017 | rip = kvm_rip_read(vcpu); |
| 3018 | if (intr_info & INTR_INFO_DELIVER_CODE_MASK) |
| 3019 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); |
| 3020 | if (is_page_fault(intr_info)) { |
| 3021 | /* EPT won't cause page fault directly */ |
| 3022 | if (enable_ept) |
| 3023 | BUG(); |
| 3024 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
| 3025 | trace_kvm_page_fault(cr2, error_code); |
| 3026 | |
| 3027 | if (kvm_event_needs_reinjection(vcpu)) |
| 3028 | kvm_mmu_unprotect_page_virt(vcpu, cr2); |
| 3029 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
| 3030 | } |
| 3031 | |
| 3032 | if (vmx->rmode.vm86_active && |
| 3033 | handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK, |
| 3034 | error_code)) { |
| 3035 | if (vcpu->arch.halt_request) { |
| 3036 | vcpu->arch.halt_request = 0; |
| 3037 | return kvm_emulate_halt(vcpu); |
| 3038 | } |
| 3039 | return 1; |
| 3040 | } |
| 3041 | |
| 3042 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; |
| 3043 | switch (ex_no) { |
| 3044 | case DB_VECTOR: |
| 3045 | dr6 = vmcs_readl(EXIT_QUALIFICATION); |
| 3046 | if (!(vcpu->guest_debug & |
| 3047 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { |
| 3048 | vcpu->arch.dr6 = dr6 | DR6_FIXED_1; |
| 3049 | kvm_queue_exception(vcpu, DB_VECTOR); |
| 3050 | return 1; |
| 3051 | } |
| 3052 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; |
| 3053 | kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); |
| 3054 | /* fall through */ |
| 3055 | case BP_VECTOR: |
| 3056 | /* |
| 3057 | * Update instruction length as we may reinject #BP from |
| 3058 | * user space while in guest debugging mode. Reading it for |
| 3059 | * #DB as well causes no harm, it is not used in that case. |
| 3060 | */ |
| 3061 | vmx->vcpu.arch.event_exit_inst_len = |
| 3062 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN); |
| 3063 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
| 3064 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; |
| 3065 | kvm_run->debug.arch.exception = ex_no; |
| 3066 | break; |
| 3067 | default: |
| 3068 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; |
| 3069 | kvm_run->ex.exception = ex_no; |
| 3070 | kvm_run->ex.error_code = error_code; |
| 3071 | break; |
| 3072 | } |
| 3073 | return 0; |
| 3074 | } |
| 3075 | |
| 3076 | static int handle_external_interrupt(struct kvm_vcpu *vcpu) |
| 3077 | { |
| 3078 | ++vcpu->stat.irq_exits; |
| 3079 | return 1; |
| 3080 | } |
| 3081 | |
| 3082 | static int handle_triple_fault(struct kvm_vcpu *vcpu) |
| 3083 | { |
| 3084 | vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; |
| 3085 | return 0; |
| 3086 | } |
| 3087 | |
| 3088 | static int handle_io(struct kvm_vcpu *vcpu) |
| 3089 | { |
| 3090 | unsigned long exit_qualification; |
| 3091 | int size, in, string; |
| 3092 | unsigned port; |
| 3093 | |
| 3094 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 3095 | string = (exit_qualification & 16) != 0; |
| 3096 | in = (exit_qualification & 8) != 0; |
| 3097 | |
| 3098 | ++vcpu->stat.io_exits; |
| 3099 | |
| 3100 | if (string || in) |
| 3101 | return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; |
| 3102 | |
| 3103 | port = exit_qualification >> 16; |
| 3104 | size = (exit_qualification & 7) + 1; |
| 3105 | skip_emulated_instruction(vcpu); |
| 3106 | |
| 3107 | return kvm_fast_pio_out(vcpu, size, port); |
| 3108 | } |
| 3109 | |
| 3110 | static void |
| 3111 | vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) |
| 3112 | { |
| 3113 | /* |
| 3114 | * Patch in the VMCALL instruction: |
| 3115 | */ |
| 3116 | hypercall[0] = 0x0f; |
| 3117 | hypercall[1] = 0x01; |
| 3118 | hypercall[2] = 0xc1; |
| 3119 | } |
| 3120 | |
| 3121 | static void complete_insn_gp(struct kvm_vcpu *vcpu, int err) |
| 3122 | { |
| 3123 | if (err) |
| 3124 | kvm_inject_gp(vcpu, 0); |
| 3125 | else |
| 3126 | skip_emulated_instruction(vcpu); |
| 3127 | } |
| 3128 | |
| 3129 | static int handle_cr(struct kvm_vcpu *vcpu) |
| 3130 | { |
| 3131 | unsigned long exit_qualification, val; |
| 3132 | int cr; |
| 3133 | int reg; |
| 3134 | int err; |
| 3135 | |
| 3136 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 3137 | cr = exit_qualification & 15; |
| 3138 | reg = (exit_qualification >> 8) & 15; |
| 3139 | switch ((exit_qualification >> 4) & 3) { |
| 3140 | case 0: /* mov to cr */ |
| 3141 | val = kvm_register_read(vcpu, reg); |
| 3142 | trace_kvm_cr_write(cr, val); |
| 3143 | switch (cr) { |
| 3144 | case 0: |
| 3145 | err = kvm_set_cr0(vcpu, val); |
| 3146 | complete_insn_gp(vcpu, err); |
| 3147 | return 1; |
| 3148 | case 3: |
| 3149 | err = kvm_set_cr3(vcpu, val); |
| 3150 | complete_insn_gp(vcpu, err); |
| 3151 | return 1; |
| 3152 | case 4: |
| 3153 | err = kvm_set_cr4(vcpu, val); |
| 3154 | complete_insn_gp(vcpu, err); |
| 3155 | return 1; |
| 3156 | case 8: { |
| 3157 | u8 cr8_prev = kvm_get_cr8(vcpu); |
| 3158 | u8 cr8 = kvm_register_read(vcpu, reg); |
| 3159 | kvm_set_cr8(vcpu, cr8); |
| 3160 | skip_emulated_instruction(vcpu); |
| 3161 | if (irqchip_in_kernel(vcpu->kvm)) |
| 3162 | return 1; |
| 3163 | if (cr8_prev <= cr8) |
| 3164 | return 1; |
| 3165 | vcpu->run->exit_reason = KVM_EXIT_SET_TPR; |
| 3166 | return 0; |
| 3167 | } |
| 3168 | }; |
| 3169 | break; |
| 3170 | case 2: /* clts */ |
| 3171 | vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS)); |
| 3172 | trace_kvm_cr_write(0, kvm_read_cr0(vcpu)); |
| 3173 | skip_emulated_instruction(vcpu); |
| 3174 | vmx_fpu_activate(vcpu); |
| 3175 | return 1; |
| 3176 | case 1: /*mov from cr*/ |
| 3177 | switch (cr) { |
| 3178 | case 3: |
| 3179 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); |
| 3180 | trace_kvm_cr_read(cr, vcpu->arch.cr3); |
| 3181 | skip_emulated_instruction(vcpu); |
| 3182 | return 1; |
| 3183 | case 8: |
| 3184 | val = kvm_get_cr8(vcpu); |
| 3185 | kvm_register_write(vcpu, reg, val); |
| 3186 | trace_kvm_cr_read(cr, val); |
| 3187 | skip_emulated_instruction(vcpu); |
| 3188 | return 1; |
| 3189 | } |
| 3190 | break; |
| 3191 | case 3: /* lmsw */ |
| 3192 | val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; |
| 3193 | trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val); |
| 3194 | kvm_lmsw(vcpu, val); |
| 3195 | |
| 3196 | skip_emulated_instruction(vcpu); |
| 3197 | return 1; |
| 3198 | default: |
| 3199 | break; |
| 3200 | } |
| 3201 | vcpu->run->exit_reason = 0; |
| 3202 | pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n", |
| 3203 | (int)(exit_qualification >> 4) & 3, cr); |
| 3204 | return 0; |
| 3205 | } |
| 3206 | |
| 3207 | static int handle_dr(struct kvm_vcpu *vcpu) |
| 3208 | { |
| 3209 | unsigned long exit_qualification; |
| 3210 | int dr, reg; |
| 3211 | |
| 3212 | /* Do not handle if the CPL > 0, will trigger GP on re-entry */ |
| 3213 | if (!kvm_require_cpl(vcpu, 0)) |
| 3214 | return 1; |
| 3215 | dr = vmcs_readl(GUEST_DR7); |
| 3216 | if (dr & DR7_GD) { |
| 3217 | /* |
| 3218 | * As the vm-exit takes precedence over the debug trap, we |
| 3219 | * need to emulate the latter, either for the host or the |
| 3220 | * guest debugging itself. |
| 3221 | */ |
| 3222 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { |
| 3223 | vcpu->run->debug.arch.dr6 = vcpu->arch.dr6; |
| 3224 | vcpu->run->debug.arch.dr7 = dr; |
| 3225 | vcpu->run->debug.arch.pc = |
| 3226 | vmcs_readl(GUEST_CS_BASE) + |
| 3227 | vmcs_readl(GUEST_RIP); |
| 3228 | vcpu->run->debug.arch.exception = DB_VECTOR; |
| 3229 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; |
| 3230 | return 0; |
| 3231 | } else { |
| 3232 | vcpu->arch.dr7 &= ~DR7_GD; |
| 3233 | vcpu->arch.dr6 |= DR6_BD; |
| 3234 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); |
| 3235 | kvm_queue_exception(vcpu, DB_VECTOR); |
| 3236 | return 1; |
| 3237 | } |
| 3238 | } |
| 3239 | |
| 3240 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 3241 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; |
| 3242 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); |
| 3243 | if (exit_qualification & TYPE_MOV_FROM_DR) { |
| 3244 | unsigned long val; |
| 3245 | if (!kvm_get_dr(vcpu, dr, &val)) |
| 3246 | kvm_register_write(vcpu, reg, val); |
| 3247 | } else |
| 3248 | kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]); |
| 3249 | skip_emulated_instruction(vcpu); |
| 3250 | return 1; |
| 3251 | } |
| 3252 | |
| 3253 | static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) |
| 3254 | { |
| 3255 | vmcs_writel(GUEST_DR7, val); |
| 3256 | } |
| 3257 | |
| 3258 | static int handle_cpuid(struct kvm_vcpu *vcpu) |
| 3259 | { |
| 3260 | kvm_emulate_cpuid(vcpu); |
| 3261 | return 1; |
| 3262 | } |
| 3263 | |
| 3264 | static int handle_rdmsr(struct kvm_vcpu *vcpu) |
| 3265 | { |
| 3266 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
| 3267 | u64 data; |
| 3268 | |
| 3269 | if (vmx_get_msr(vcpu, ecx, &data)) { |
| 3270 | trace_kvm_msr_read_ex(ecx); |
| 3271 | kvm_inject_gp(vcpu, 0); |
| 3272 | return 1; |
| 3273 | } |
| 3274 | |
| 3275 | trace_kvm_msr_read(ecx, data); |
| 3276 | |
| 3277 | /* FIXME: handling of bits 32:63 of rax, rdx */ |
| 3278 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; |
| 3279 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; |
| 3280 | skip_emulated_instruction(vcpu); |
| 3281 | return 1; |
| 3282 | } |
| 3283 | |
| 3284 | static int handle_wrmsr(struct kvm_vcpu *vcpu) |
| 3285 | { |
| 3286 | u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX]; |
| 3287 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
| 3288 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
| 3289 | |
| 3290 | if (vmx_set_msr(vcpu, ecx, data) != 0) { |
| 3291 | trace_kvm_msr_write_ex(ecx, data); |
| 3292 | kvm_inject_gp(vcpu, 0); |
| 3293 | return 1; |
| 3294 | } |
| 3295 | |
| 3296 | trace_kvm_msr_write(ecx, data); |
| 3297 | skip_emulated_instruction(vcpu); |
| 3298 | return 1; |
| 3299 | } |
| 3300 | |
| 3301 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
| 3302 | { |
| 3303 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3304 | return 1; |
| 3305 | } |
| 3306 | |
| 3307 | static int handle_interrupt_window(struct kvm_vcpu *vcpu) |
| 3308 | { |
| 3309 | u32 cpu_based_vm_exec_control; |
| 3310 | |
| 3311 | /* clear pending irq */ |
| 3312 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
| 3313 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
| 3314 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
| 3315 | |
| 3316 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3317 | |
| 3318 | ++vcpu->stat.irq_window_exits; |
| 3319 | |
| 3320 | /* |
| 3321 | * If the user space waits to inject interrupts, exit as soon as |
| 3322 | * possible |
| 3323 | */ |
| 3324 | if (!irqchip_in_kernel(vcpu->kvm) && |
| 3325 | vcpu->run->request_interrupt_window && |
| 3326 | !kvm_cpu_has_interrupt(vcpu)) { |
| 3327 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; |
| 3328 | return 0; |
| 3329 | } |
| 3330 | return 1; |
| 3331 | } |
| 3332 | |
| 3333 | static int handle_halt(struct kvm_vcpu *vcpu) |
| 3334 | { |
| 3335 | skip_emulated_instruction(vcpu); |
| 3336 | return kvm_emulate_halt(vcpu); |
| 3337 | } |
| 3338 | |
| 3339 | static int handle_vmcall(struct kvm_vcpu *vcpu) |
| 3340 | { |
| 3341 | skip_emulated_instruction(vcpu); |
| 3342 | kvm_emulate_hypercall(vcpu); |
| 3343 | return 1; |
| 3344 | } |
| 3345 | |
| 3346 | static int handle_vmx_insn(struct kvm_vcpu *vcpu) |
| 3347 | { |
| 3348 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 3349 | return 1; |
| 3350 | } |
| 3351 | |
| 3352 | static int handle_invd(struct kvm_vcpu *vcpu) |
| 3353 | { |
| 3354 | return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; |
| 3355 | } |
| 3356 | |
| 3357 | static int handle_invlpg(struct kvm_vcpu *vcpu) |
| 3358 | { |
| 3359 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 3360 | |
| 3361 | kvm_mmu_invlpg(vcpu, exit_qualification); |
| 3362 | skip_emulated_instruction(vcpu); |
| 3363 | return 1; |
| 3364 | } |
| 3365 | |
| 3366 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
| 3367 | { |
| 3368 | skip_emulated_instruction(vcpu); |
| 3369 | kvm_emulate_wbinvd(vcpu); |
| 3370 | return 1; |
| 3371 | } |
| 3372 | |
| 3373 | static int handle_xsetbv(struct kvm_vcpu *vcpu) |
| 3374 | { |
| 3375 | u64 new_bv = kvm_read_edx_eax(vcpu); |
| 3376 | u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX); |
| 3377 | |
| 3378 | if (kvm_set_xcr(vcpu, index, new_bv) == 0) |
| 3379 | skip_emulated_instruction(vcpu); |
| 3380 | return 1; |
| 3381 | } |
| 3382 | |
| 3383 | static int handle_apic_access(struct kvm_vcpu *vcpu) |
| 3384 | { |
| 3385 | return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE; |
| 3386 | } |
| 3387 | |
| 3388 | static int handle_task_switch(struct kvm_vcpu *vcpu) |
| 3389 | { |
| 3390 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 3391 | unsigned long exit_qualification; |
| 3392 | bool has_error_code = false; |
| 3393 | u32 error_code = 0; |
| 3394 | u16 tss_selector; |
| 3395 | int reason, type, idt_v; |
| 3396 | |
| 3397 | idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); |
| 3398 | type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); |
| 3399 | |
| 3400 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 3401 | |
| 3402 | reason = (u32)exit_qualification >> 30; |
| 3403 | if (reason == TASK_SWITCH_GATE && idt_v) { |
| 3404 | switch (type) { |
| 3405 | case INTR_TYPE_NMI_INTR: |
| 3406 | vcpu->arch.nmi_injected = false; |
| 3407 | if (cpu_has_virtual_nmis()) |
| 3408 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
| 3409 | GUEST_INTR_STATE_NMI); |
| 3410 | break; |
| 3411 | case INTR_TYPE_EXT_INTR: |
| 3412 | case INTR_TYPE_SOFT_INTR: |
| 3413 | kvm_clear_interrupt_queue(vcpu); |
| 3414 | break; |
| 3415 | case INTR_TYPE_HARD_EXCEPTION: |
| 3416 | if (vmx->idt_vectoring_info & |
| 3417 | VECTORING_INFO_DELIVER_CODE_MASK) { |
| 3418 | has_error_code = true; |
| 3419 | error_code = |
| 3420 | vmcs_read32(IDT_VECTORING_ERROR_CODE); |
| 3421 | } |
| 3422 | /* fall through */ |
| 3423 | case INTR_TYPE_SOFT_EXCEPTION: |
| 3424 | kvm_clear_exception_queue(vcpu); |
| 3425 | break; |
| 3426 | default: |
| 3427 | break; |
| 3428 | } |
| 3429 | } |
| 3430 | tss_selector = exit_qualification; |
| 3431 | |
| 3432 | if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION && |
| 3433 | type != INTR_TYPE_EXT_INTR && |
| 3434 | type != INTR_TYPE_NMI_INTR)) |
| 3435 | skip_emulated_instruction(vcpu); |
| 3436 | |
| 3437 | if (kvm_task_switch(vcpu, tss_selector, reason, |
| 3438 | has_error_code, error_code) == EMULATE_FAIL) { |
| 3439 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 3440 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
| 3441 | vcpu->run->internal.ndata = 0; |
| 3442 | return 0; |
| 3443 | } |
| 3444 | |
| 3445 | /* clear all local breakpoint enable flags */ |
| 3446 | vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); |
| 3447 | |
| 3448 | /* |
| 3449 | * TODO: What about debug traps on tss switch? |
| 3450 | * Are we supposed to inject them and update dr6? |
| 3451 | */ |
| 3452 | |
| 3453 | return 1; |
| 3454 | } |
| 3455 | |
| 3456 | static int handle_ept_violation(struct kvm_vcpu *vcpu) |
| 3457 | { |
| 3458 | unsigned long exit_qualification; |
| 3459 | gpa_t gpa; |
| 3460 | int gla_validity; |
| 3461 | |
| 3462 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
| 3463 | |
| 3464 | if (exit_qualification & (1 << 6)) { |
| 3465 | printk(KERN_ERR "EPT: GPA exceeds GAW!\n"); |
| 3466 | return -EINVAL; |
| 3467 | } |
| 3468 | |
| 3469 | gla_validity = (exit_qualification >> 7) & 0x3; |
| 3470 | if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) { |
| 3471 | printk(KERN_ERR "EPT: Handling EPT violation failed!\n"); |
| 3472 | printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n", |
| 3473 | (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS), |
| 3474 | vmcs_readl(GUEST_LINEAR_ADDRESS)); |
| 3475 | printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n", |
| 3476 | (long unsigned int)exit_qualification); |
| 3477 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
| 3478 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION; |
| 3479 | return 0; |
| 3480 | } |
| 3481 | |
| 3482 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
| 3483 | trace_kvm_page_fault(gpa, exit_qualification); |
| 3484 | return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3); |
| 3485 | } |
| 3486 | |
| 3487 | static u64 ept_rsvd_mask(u64 spte, int level) |
| 3488 | { |
| 3489 | int i; |
| 3490 | u64 mask = 0; |
| 3491 | |
| 3492 | for (i = 51; i > boot_cpu_data.x86_phys_bits; i--) |
| 3493 | mask |= (1ULL << i); |
| 3494 | |
| 3495 | if (level > 2) |
| 3496 | /* bits 7:3 reserved */ |
| 3497 | mask |= 0xf8; |
| 3498 | else if (level == 2) { |
| 3499 | if (spte & (1ULL << 7)) |
| 3500 | /* 2MB ref, bits 20:12 reserved */ |
| 3501 | mask |= 0x1ff000; |
| 3502 | else |
| 3503 | /* bits 6:3 reserved */ |
| 3504 | mask |= 0x78; |
| 3505 | } |
| 3506 | |
| 3507 | return mask; |
| 3508 | } |
| 3509 | |
| 3510 | static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte, |
| 3511 | int level) |
| 3512 | { |
| 3513 | printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level); |
| 3514 | |
| 3515 | /* 010b (write-only) */ |
| 3516 | WARN_ON((spte & 0x7) == 0x2); |
| 3517 | |
| 3518 | /* 110b (write/execute) */ |
| 3519 | WARN_ON((spte & 0x7) == 0x6); |
| 3520 | |
| 3521 | /* 100b (execute-only) and value not supported by logical processor */ |
| 3522 | if (!cpu_has_vmx_ept_execute_only()) |
| 3523 | WARN_ON((spte & 0x7) == 0x4); |
| 3524 | |
| 3525 | /* not 000b */ |
| 3526 | if ((spte & 0x7)) { |
| 3527 | u64 rsvd_bits = spte & ept_rsvd_mask(spte, level); |
| 3528 | |
| 3529 | if (rsvd_bits != 0) { |
| 3530 | printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n", |
| 3531 | __func__, rsvd_bits); |
| 3532 | WARN_ON(1); |
| 3533 | } |
| 3534 | |
| 3535 | if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) { |
| 3536 | u64 ept_mem_type = (spte & 0x38) >> 3; |
| 3537 | |
| 3538 | if (ept_mem_type == 2 || ept_mem_type == 3 || |
| 3539 | ept_mem_type == 7) { |
| 3540 | printk(KERN_ERR "%s: ept_mem_type=0x%llx\n", |
| 3541 | __func__, ept_mem_type); |
| 3542 | WARN_ON(1); |
| 3543 | } |
| 3544 | } |
| 3545 | } |
| 3546 | } |
| 3547 | |
| 3548 | static int handle_ept_misconfig(struct kvm_vcpu *vcpu) |
| 3549 | { |
| 3550 | u64 sptes[4]; |
| 3551 | int nr_sptes, i; |
| 3552 | gpa_t gpa; |
| 3553 | |
| 3554 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
| 3555 | |
| 3556 | printk(KERN_ERR "EPT: Misconfiguration.\n"); |
| 3557 | printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa); |
| 3558 | |
| 3559 | nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes); |
| 3560 | |
| 3561 | for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i) |
| 3562 | ept_misconfig_inspect_spte(vcpu, sptes[i-1], i); |
| 3563 | |
| 3564 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
| 3565 | vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG; |
| 3566 | |
| 3567 | return 0; |
| 3568 | } |
| 3569 | |
| 3570 | static int handle_nmi_window(struct kvm_vcpu *vcpu) |
| 3571 | { |
| 3572 | u32 cpu_based_vm_exec_control; |
| 3573 | |
| 3574 | /* clear pending NMI */ |
| 3575 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
| 3576 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; |
| 3577 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
| 3578 | ++vcpu->stat.nmi_window_exits; |
| 3579 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
| 3580 | |
| 3581 | return 1; |
| 3582 | } |
| 3583 | |
| 3584 | static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
| 3585 | { |
| 3586 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 3587 | enum emulation_result err = EMULATE_DONE; |
| 3588 | int ret = 1; |
| 3589 | u32 cpu_exec_ctrl; |
| 3590 | bool intr_window_requested; |
| 3591 | |
| 3592 | cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
| 3593 | intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; |
| 3594 | |
| 3595 | while (!guest_state_valid(vcpu)) { |
| 3596 | if (intr_window_requested |
| 3597 | && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF)) |
| 3598 | return handle_interrupt_window(&vmx->vcpu); |
| 3599 | |
| 3600 | err = emulate_instruction(vcpu, 0, 0, 0); |
| 3601 | |
| 3602 | if (err == EMULATE_DO_MMIO) { |
| 3603 | ret = 0; |
| 3604 | goto out; |
| 3605 | } |
| 3606 | |
| 3607 | if (err != EMULATE_DONE) |
| 3608 | return 0; |
| 3609 | |
| 3610 | if (signal_pending(current)) |
| 3611 | goto out; |
| 3612 | if (need_resched()) |
| 3613 | schedule(); |
| 3614 | } |
| 3615 | |
| 3616 | vmx->emulation_required = 0; |
| 3617 | out: |
| 3618 | return ret; |
| 3619 | } |
| 3620 | |
| 3621 | /* |
| 3622 | * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE |
| 3623 | * exiting, so only get here on cpu with PAUSE-Loop-Exiting. |
| 3624 | */ |
| 3625 | static int handle_pause(struct kvm_vcpu *vcpu) |
| 3626 | { |
| 3627 | skip_emulated_instruction(vcpu); |
| 3628 | kvm_vcpu_on_spin(vcpu); |
| 3629 | |
| 3630 | return 1; |
| 3631 | } |
| 3632 | |
| 3633 | static int handle_invalid_op(struct kvm_vcpu *vcpu) |
| 3634 | { |
| 3635 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 3636 | return 1; |
| 3637 | } |
| 3638 | |
| 3639 | /* |
| 3640 | * The exit handlers return 1 if the exit was handled fully and guest execution |
| 3641 | * may resume. Otherwise they set the kvm_run parameter to indicate what needs |
| 3642 | * to be done to userspace and return 0. |
| 3643 | */ |
| 3644 | static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { |
| 3645 | [EXIT_REASON_EXCEPTION_NMI] = handle_exception, |
| 3646 | [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt, |
| 3647 | [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault, |
| 3648 | [EXIT_REASON_NMI_WINDOW] = handle_nmi_window, |
| 3649 | [EXIT_REASON_IO_INSTRUCTION] = handle_io, |
| 3650 | [EXIT_REASON_CR_ACCESS] = handle_cr, |
| 3651 | [EXIT_REASON_DR_ACCESS] = handle_dr, |
| 3652 | [EXIT_REASON_CPUID] = handle_cpuid, |
| 3653 | [EXIT_REASON_MSR_READ] = handle_rdmsr, |
| 3654 | [EXIT_REASON_MSR_WRITE] = handle_wrmsr, |
| 3655 | [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window, |
| 3656 | [EXIT_REASON_HLT] = handle_halt, |
| 3657 | [EXIT_REASON_INVD] = handle_invd, |
| 3658 | [EXIT_REASON_INVLPG] = handle_invlpg, |
| 3659 | [EXIT_REASON_VMCALL] = handle_vmcall, |
| 3660 | [EXIT_REASON_VMCLEAR] = handle_vmx_insn, |
| 3661 | [EXIT_REASON_VMLAUNCH] = handle_vmx_insn, |
| 3662 | [EXIT_REASON_VMPTRLD] = handle_vmx_insn, |
| 3663 | [EXIT_REASON_VMPTRST] = handle_vmx_insn, |
| 3664 | [EXIT_REASON_VMREAD] = handle_vmx_insn, |
| 3665 | [EXIT_REASON_VMRESUME] = handle_vmx_insn, |
| 3666 | [EXIT_REASON_VMWRITE] = handle_vmx_insn, |
| 3667 | [EXIT_REASON_VMOFF] = handle_vmx_insn, |
| 3668 | [EXIT_REASON_VMON] = handle_vmx_insn, |
| 3669 | [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold, |
| 3670 | [EXIT_REASON_APIC_ACCESS] = handle_apic_access, |
| 3671 | [EXIT_REASON_WBINVD] = handle_wbinvd, |
| 3672 | [EXIT_REASON_XSETBV] = handle_xsetbv, |
| 3673 | [EXIT_REASON_TASK_SWITCH] = handle_task_switch, |
| 3674 | [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check, |
| 3675 | [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation, |
| 3676 | [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig, |
| 3677 | [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, |
| 3678 | [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, |
| 3679 | [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, |
| 3680 | }; |
| 3681 | |
| 3682 | static const int kvm_vmx_max_exit_handlers = |
| 3683 | ARRAY_SIZE(kvm_vmx_exit_handlers); |
| 3684 | |
| 3685 | /* |
| 3686 | * The guest has exited. See if we can fix it or if we need userspace |
| 3687 | * assistance. |
| 3688 | */ |
| 3689 | static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
| 3690 | { |
| 3691 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 3692 | u32 exit_reason = vmx->exit_reason; |
| 3693 | u32 vectoring_info = vmx->idt_vectoring_info; |
| 3694 | |
| 3695 | trace_kvm_exit(exit_reason, vcpu); |
| 3696 | |
| 3697 | /* If guest state is invalid, start emulating */ |
| 3698 | if (vmx->emulation_required && emulate_invalid_guest_state) |
| 3699 | return handle_invalid_guest_state(vcpu); |
| 3700 | |
| 3701 | /* Access CR3 don't cause VMExit in paging mode, so we need |
| 3702 | * to sync with guest real CR3. */ |
| 3703 | if (enable_ept && is_paging(vcpu)) |
| 3704 | vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); |
| 3705 | |
| 3706 | if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) { |
| 3707 | vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 3708 | vcpu->run->fail_entry.hardware_entry_failure_reason |
| 3709 | = exit_reason; |
| 3710 | return 0; |
| 3711 | } |
| 3712 | |
| 3713 | if (unlikely(vmx->fail)) { |
| 3714 | vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
| 3715 | vcpu->run->fail_entry.hardware_entry_failure_reason |
| 3716 | = vmcs_read32(VM_INSTRUCTION_ERROR); |
| 3717 | return 0; |
| 3718 | } |
| 3719 | |
| 3720 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && |
| 3721 | (exit_reason != EXIT_REASON_EXCEPTION_NMI && |
| 3722 | exit_reason != EXIT_REASON_EPT_VIOLATION && |
| 3723 | exit_reason != EXIT_REASON_TASK_SWITCH)) |
| 3724 | printk(KERN_WARNING "%s: unexpected, valid vectoring info " |
| 3725 | "(0x%x) and exit reason is 0x%x\n", |
| 3726 | __func__, vectoring_info, exit_reason); |
| 3727 | |
| 3728 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) { |
| 3729 | if (vmx_interrupt_allowed(vcpu)) { |
| 3730 | vmx->soft_vnmi_blocked = 0; |
| 3731 | } else if (vmx->vnmi_blocked_time > 1000000000LL && |
| 3732 | vcpu->arch.nmi_pending) { |
| 3733 | /* |
| 3734 | * This CPU don't support us in finding the end of an |
| 3735 | * NMI-blocked window if the guest runs with IRQs |
| 3736 | * disabled. So we pull the trigger after 1 s of |
| 3737 | * futile waiting, but inform the user about this. |
| 3738 | */ |
| 3739 | printk(KERN_WARNING "%s: Breaking out of NMI-blocked " |
| 3740 | "state on VCPU %d after 1 s timeout\n", |
| 3741 | __func__, vcpu->vcpu_id); |
| 3742 | vmx->soft_vnmi_blocked = 0; |
| 3743 | } |
| 3744 | } |
| 3745 | |
| 3746 | if (exit_reason < kvm_vmx_max_exit_handlers |
| 3747 | && kvm_vmx_exit_handlers[exit_reason]) |
| 3748 | return kvm_vmx_exit_handlers[exit_reason](vcpu); |
| 3749 | else { |
| 3750 | vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; |
| 3751 | vcpu->run->hw.hardware_exit_reason = exit_reason; |
| 3752 | } |
| 3753 | return 0; |
| 3754 | } |
| 3755 | |
| 3756 | static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
| 3757 | { |
| 3758 | if (irr == -1 || tpr < irr) { |
| 3759 | vmcs_write32(TPR_THRESHOLD, 0); |
| 3760 | return; |
| 3761 | } |
| 3762 | |
| 3763 | vmcs_write32(TPR_THRESHOLD, irr); |
| 3764 | } |
| 3765 | |
| 3766 | static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) |
| 3767 | { |
| 3768 | u32 exit_intr_info = vmx->exit_intr_info; |
| 3769 | |
| 3770 | /* Handle machine checks before interrupts are enabled */ |
| 3771 | if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY) |
| 3772 | || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI |
| 3773 | && is_machine_check(exit_intr_info))) |
| 3774 | kvm_machine_check(); |
| 3775 | |
| 3776 | /* We need to handle NMIs before interrupts are enabled */ |
| 3777 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && |
| 3778 | (exit_intr_info & INTR_INFO_VALID_MASK)) { |
| 3779 | kvm_before_handle_nmi(&vmx->vcpu); |
| 3780 | asm("int $2"); |
| 3781 | kvm_after_handle_nmi(&vmx->vcpu); |
| 3782 | } |
| 3783 | } |
| 3784 | |
| 3785 | static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) |
| 3786 | { |
| 3787 | u32 exit_intr_info = vmx->exit_intr_info; |
| 3788 | bool unblock_nmi; |
| 3789 | u8 vector; |
| 3790 | bool idtv_info_valid; |
| 3791 | |
| 3792 | idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
| 3793 | |
| 3794 | if (cpu_has_virtual_nmis()) { |
| 3795 | unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0; |
| 3796 | vector = exit_intr_info & INTR_INFO_VECTOR_MASK; |
| 3797 | /* |
| 3798 | * SDM 3: 27.7.1.2 (September 2008) |
| 3799 | * Re-set bit "block by NMI" before VM entry if vmexit caused by |
| 3800 | * a guest IRET fault. |
| 3801 | * SDM 3: 23.2.2 (September 2008) |
| 3802 | * Bit 12 is undefined in any of the following cases: |
| 3803 | * If the VM exit sets the valid bit in the IDT-vectoring |
| 3804 | * information field. |
| 3805 | * If the VM exit is due to a double fault. |
| 3806 | */ |
| 3807 | if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi && |
| 3808 | vector != DF_VECTOR && !idtv_info_valid) |
| 3809 | vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, |
| 3810 | GUEST_INTR_STATE_NMI); |
| 3811 | } else if (unlikely(vmx->soft_vnmi_blocked)) |
| 3812 | vmx->vnmi_blocked_time += |
| 3813 | ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); |
| 3814 | } |
| 3815 | |
| 3816 | static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, |
| 3817 | u32 idt_vectoring_info, |
| 3818 | int instr_len_field, |
| 3819 | int error_code_field) |
| 3820 | { |
| 3821 | u8 vector; |
| 3822 | int type; |
| 3823 | bool idtv_info_valid; |
| 3824 | |
| 3825 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
| 3826 | |
| 3827 | vmx->vcpu.arch.nmi_injected = false; |
| 3828 | kvm_clear_exception_queue(&vmx->vcpu); |
| 3829 | kvm_clear_interrupt_queue(&vmx->vcpu); |
| 3830 | |
| 3831 | if (!idtv_info_valid) |
| 3832 | return; |
| 3833 | |
| 3834 | kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); |
| 3835 | |
| 3836 | vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; |
| 3837 | type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; |
| 3838 | |
| 3839 | switch (type) { |
| 3840 | case INTR_TYPE_NMI_INTR: |
| 3841 | vmx->vcpu.arch.nmi_injected = true; |
| 3842 | /* |
| 3843 | * SDM 3: 27.7.1.2 (September 2008) |
| 3844 | * Clear bit "block by NMI" before VM entry if a NMI |
| 3845 | * delivery faulted. |
| 3846 | */ |
| 3847 | vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, |
| 3848 | GUEST_INTR_STATE_NMI); |
| 3849 | break; |
| 3850 | case INTR_TYPE_SOFT_EXCEPTION: |
| 3851 | vmx->vcpu.arch.event_exit_inst_len = |
| 3852 | vmcs_read32(instr_len_field); |
| 3853 | /* fall through */ |
| 3854 | case INTR_TYPE_HARD_EXCEPTION: |
| 3855 | if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { |
| 3856 | u32 err = vmcs_read32(error_code_field); |
| 3857 | kvm_queue_exception_e(&vmx->vcpu, vector, err); |
| 3858 | } else |
| 3859 | kvm_queue_exception(&vmx->vcpu, vector); |
| 3860 | break; |
| 3861 | case INTR_TYPE_SOFT_INTR: |
| 3862 | vmx->vcpu.arch.event_exit_inst_len = |
| 3863 | vmcs_read32(instr_len_field); |
| 3864 | /* fall through */ |
| 3865 | case INTR_TYPE_EXT_INTR: |
| 3866 | kvm_queue_interrupt(&vmx->vcpu, vector, |
| 3867 | type == INTR_TYPE_SOFT_INTR); |
| 3868 | break; |
| 3869 | default: |
| 3870 | break; |
| 3871 | } |
| 3872 | } |
| 3873 | |
| 3874 | static void vmx_complete_interrupts(struct vcpu_vmx *vmx) |
| 3875 | { |
| 3876 | __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info, |
| 3877 | VM_EXIT_INSTRUCTION_LEN, |
| 3878 | IDT_VECTORING_ERROR_CODE); |
| 3879 | } |
| 3880 | |
| 3881 | static void vmx_cancel_injection(struct kvm_vcpu *vcpu) |
| 3882 | { |
| 3883 | __vmx_complete_interrupts(to_vmx(vcpu), |
| 3884 | vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), |
| 3885 | VM_ENTRY_INSTRUCTION_LEN, |
| 3886 | VM_ENTRY_EXCEPTION_ERROR_CODE); |
| 3887 | |
| 3888 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); |
| 3889 | } |
| 3890 | |
| 3891 | #ifdef CONFIG_X86_64 |
| 3892 | #define R "r" |
| 3893 | #define Q "q" |
| 3894 | #else |
| 3895 | #define R "e" |
| 3896 | #define Q "l" |
| 3897 | #endif |
| 3898 | |
| 3899 | /* |
| 3900 | * We put this into a separate noinline function to prevent the compiler |
| 3901 | * from duplicating the code. This is needed because this code |
| 3902 | * uses non local labels that cannot be duplicated. |
| 3903 | * Do not put any flow control into this function. |
| 3904 | * Better would be to put this whole monstrosity into a .S file. |
| 3905 | */ |
| 3906 | static void noinline do_vmx_vcpu_run(struct kvm_vcpu *vcpu) |
| 3907 | { |
| 3908 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 3909 | asm volatile( |
| 3910 | /* Store host registers */ |
| 3911 | "push %%"R"dx; push %%"R"bp;" |
| 3912 | "push %%"R"cx \n\t" |
| 3913 | "cmp %%"R"sp, %c[host_rsp](%0) \n\t" |
| 3914 | "je 1f \n\t" |
| 3915 | "mov %%"R"sp, %c[host_rsp](%0) \n\t" |
| 3916 | __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t" |
| 3917 | "1: \n\t" |
| 3918 | /* Reload cr2 if changed */ |
| 3919 | "mov %c[cr2](%0), %%"R"ax \n\t" |
| 3920 | "mov %%cr2, %%"R"dx \n\t" |
| 3921 | "cmp %%"R"ax, %%"R"dx \n\t" |
| 3922 | "je 2f \n\t" |
| 3923 | "mov %%"R"ax, %%cr2 \n\t" |
| 3924 | "2: \n\t" |
| 3925 | /* Check if vmlaunch of vmresume is needed */ |
| 3926 | "cmpl $0, %c[launched](%0) \n\t" |
| 3927 | /* Load guest registers. Don't clobber flags. */ |
| 3928 | "mov %c[rax](%0), %%"R"ax \n\t" |
| 3929 | "mov %c[rbx](%0), %%"R"bx \n\t" |
| 3930 | "mov %c[rdx](%0), %%"R"dx \n\t" |
| 3931 | "mov %c[rsi](%0), %%"R"si \n\t" |
| 3932 | "mov %c[rdi](%0), %%"R"di \n\t" |
| 3933 | "mov %c[rbp](%0), %%"R"bp \n\t" |
| 3934 | #ifdef CONFIG_X86_64 |
| 3935 | "mov %c[r8](%0), %%r8 \n\t" |
| 3936 | "mov %c[r9](%0), %%r9 \n\t" |
| 3937 | "mov %c[r10](%0), %%r10 \n\t" |
| 3938 | "mov %c[r11](%0), %%r11 \n\t" |
| 3939 | "mov %c[r12](%0), %%r12 \n\t" |
| 3940 | "mov %c[r13](%0), %%r13 \n\t" |
| 3941 | "mov %c[r14](%0), %%r14 \n\t" |
| 3942 | "mov %c[r15](%0), %%r15 \n\t" |
| 3943 | #endif |
| 3944 | "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */ |
| 3945 | |
| 3946 | /* Enter guest mode */ |
| 3947 | "jne .Llaunched \n\t" |
| 3948 | __ex(ASM_VMX_VMLAUNCH) "\n\t" |
| 3949 | "jmp .Lkvm_vmx_return \n\t" |
| 3950 | ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" |
| 3951 | ".Lkvm_vmx_return: " |
| 3952 | /* Save guest registers, load host registers, keep flags */ |
| 3953 | "xchg %0, (%%"R"sp) \n\t" |
| 3954 | "mov %%"R"ax, %c[rax](%0) \n\t" |
| 3955 | "mov %%"R"bx, %c[rbx](%0) \n\t" |
| 3956 | "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t" |
| 3957 | "mov %%"R"dx, %c[rdx](%0) \n\t" |
| 3958 | "mov %%"R"si, %c[rsi](%0) \n\t" |
| 3959 | "mov %%"R"di, %c[rdi](%0) \n\t" |
| 3960 | "mov %%"R"bp, %c[rbp](%0) \n\t" |
| 3961 | #ifdef CONFIG_X86_64 |
| 3962 | "mov %%r8, %c[r8](%0) \n\t" |
| 3963 | "mov %%r9, %c[r9](%0) \n\t" |
| 3964 | "mov %%r10, %c[r10](%0) \n\t" |
| 3965 | "mov %%r11, %c[r11](%0) \n\t" |
| 3966 | "mov %%r12, %c[r12](%0) \n\t" |
| 3967 | "mov %%r13, %c[r13](%0) \n\t" |
| 3968 | "mov %%r14, %c[r14](%0) \n\t" |
| 3969 | "mov %%r15, %c[r15](%0) \n\t" |
| 3970 | #endif |
| 3971 | "mov %%cr2, %%"R"ax \n\t" |
| 3972 | "mov %%"R"ax, %c[cr2](%0) \n\t" |
| 3973 | |
| 3974 | "pop %%"R"bp; pop %%"R"bp; pop %%"R"dx \n\t" |
| 3975 | "setbe %c[fail](%0) \n\t" |
| 3976 | : : "c"(vmx), "d"((unsigned long)HOST_RSP), |
| 3977 | [launched]"i"(offsetof(struct vcpu_vmx, launched)), |
| 3978 | [fail]"i"(offsetof(struct vcpu_vmx, fail)), |
| 3979 | [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)), |
| 3980 | [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])), |
| 3981 | [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])), |
| 3982 | [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])), |
| 3983 | [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])), |
| 3984 | [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])), |
| 3985 | [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])), |
| 3986 | [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])), |
| 3987 | #ifdef CONFIG_X86_64 |
| 3988 | [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])), |
| 3989 | [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])), |
| 3990 | [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])), |
| 3991 | [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])), |
| 3992 | [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])), |
| 3993 | [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])), |
| 3994 | [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])), |
| 3995 | [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), |
| 3996 | #endif |
| 3997 | [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) |
| 3998 | : "cc", "memory" |
| 3999 | , R"ax", R"bx", R"di", R"si" |
| 4000 | #ifdef CONFIG_X86_64 |
| 4001 | , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" |
| 4002 | #endif |
| 4003 | ); |
| 4004 | } |
| 4005 | |
| 4006 | static void vmx_vcpu_run(struct kvm_vcpu *vcpu) |
| 4007 | { |
| 4008 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 4009 | |
| 4010 | /* Record the guest's net vcpu time for enforced NMI injections. */ |
| 4011 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) |
| 4012 | vmx->entry_time = ktime_get(); |
| 4013 | |
| 4014 | /* Don't enter VMX if guest state is invalid, let the exit handler |
| 4015 | start emulation until we arrive back to a valid state */ |
| 4016 | if (vmx->emulation_required && emulate_invalid_guest_state) |
| 4017 | return; |
| 4018 | |
| 4019 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) |
| 4020 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); |
| 4021 | if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) |
| 4022 | vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); |
| 4023 | |
| 4024 | /* When single-stepping over STI and MOV SS, we must clear the |
| 4025 | * corresponding interruptibility bits in the guest state. Otherwise |
| 4026 | * vmentry fails as it then expects bit 14 (BS) in pending debug |
| 4027 | * exceptions being set, but that's not correct for the guest debugging |
| 4028 | * case. */ |
| 4029 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
| 4030 | vmx_set_interrupt_shadow(vcpu, 0); |
| 4031 | |
| 4032 | do_vmx_vcpu_run(vcpu); |
| 4033 | |
| 4034 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) |
| 4035 | | (1 << VCPU_EXREG_PDPTR)); |
| 4036 | vcpu->arch.regs_dirty = 0; |
| 4037 | |
| 4038 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); |
| 4039 | |
| 4040 | asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); |
| 4041 | vmx->launched = 1; |
| 4042 | |
| 4043 | vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); |
| 4044 | vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
| 4045 | |
| 4046 | vmx_complete_atomic_exit(vmx); |
| 4047 | vmx_recover_nmi_blocking(vmx); |
| 4048 | vmx_complete_interrupts(vmx); |
| 4049 | } |
| 4050 | |
| 4051 | #undef R |
| 4052 | #undef Q |
| 4053 | |
| 4054 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) |
| 4055 | { |
| 4056 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 4057 | |
| 4058 | if (vmx->vmcs) { |
| 4059 | vcpu_clear(vmx); |
| 4060 | free_vmcs(vmx->vmcs); |
| 4061 | vmx->vmcs = NULL; |
| 4062 | } |
| 4063 | } |
| 4064 | |
| 4065 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) |
| 4066 | { |
| 4067 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 4068 | |
| 4069 | free_vpid(vmx); |
| 4070 | vmx_free_vmcs(vcpu); |
| 4071 | kfree(vmx->guest_msrs); |
| 4072 | kvm_vcpu_uninit(vcpu); |
| 4073 | kmem_cache_free(kvm_vcpu_cache, vmx); |
| 4074 | } |
| 4075 | |
| 4076 | static inline void vmcs_init(struct vmcs *vmcs) |
| 4077 | { |
| 4078 | u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id())); |
| 4079 | |
| 4080 | if (!vmm_exclusive) |
| 4081 | kvm_cpu_vmxon(phys_addr); |
| 4082 | |
| 4083 | vmcs_clear(vmcs); |
| 4084 | |
| 4085 | if (!vmm_exclusive) |
| 4086 | kvm_cpu_vmxoff(); |
| 4087 | } |
| 4088 | |
| 4089 | static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) |
| 4090 | { |
| 4091 | int err; |
| 4092 | struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
| 4093 | int cpu; |
| 4094 | |
| 4095 | if (!vmx) |
| 4096 | return ERR_PTR(-ENOMEM); |
| 4097 | |
| 4098 | allocate_vpid(vmx); |
| 4099 | |
| 4100 | err = kvm_vcpu_init(&vmx->vcpu, kvm, id); |
| 4101 | if (err) |
| 4102 | goto free_vcpu; |
| 4103 | |
| 4104 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 4105 | if (!vmx->guest_msrs) { |
| 4106 | err = -ENOMEM; |
| 4107 | goto uninit_vcpu; |
| 4108 | } |
| 4109 | |
| 4110 | vmx->vmcs = alloc_vmcs(); |
| 4111 | if (!vmx->vmcs) |
| 4112 | goto free_msrs; |
| 4113 | |
| 4114 | vmcs_init(vmx->vmcs); |
| 4115 | |
| 4116 | cpu = get_cpu(); |
| 4117 | vmx_vcpu_load(&vmx->vcpu, cpu); |
| 4118 | vmx->vcpu.cpu = cpu; |
| 4119 | err = vmx_vcpu_setup(vmx); |
| 4120 | vmx_vcpu_put(&vmx->vcpu); |
| 4121 | put_cpu(); |
| 4122 | if (err) |
| 4123 | goto free_vmcs; |
| 4124 | if (vm_need_virtualize_apic_accesses(kvm)) |
| 4125 | if (alloc_apic_access_page(kvm) != 0) |
| 4126 | goto free_vmcs; |
| 4127 | |
| 4128 | if (enable_ept) { |
| 4129 | if (!kvm->arch.ept_identity_map_addr) |
| 4130 | kvm->arch.ept_identity_map_addr = |
| 4131 | VMX_EPT_IDENTITY_PAGETABLE_ADDR; |
| 4132 | if (alloc_identity_pagetable(kvm) != 0) |
| 4133 | goto free_vmcs; |
| 4134 | } |
| 4135 | |
| 4136 | return &vmx->vcpu; |
| 4137 | |
| 4138 | free_vmcs: |
| 4139 | free_vmcs(vmx->vmcs); |
| 4140 | free_msrs: |
| 4141 | kfree(vmx->guest_msrs); |
| 4142 | uninit_vcpu: |
| 4143 | kvm_vcpu_uninit(&vmx->vcpu); |
| 4144 | free_vcpu: |
| 4145 | free_vpid(vmx); |
| 4146 | kmem_cache_free(kvm_vcpu_cache, vmx); |
| 4147 | return ERR_PTR(err); |
| 4148 | } |
| 4149 | |
| 4150 | static void __init vmx_check_processor_compat(void *rtn) |
| 4151 | { |
| 4152 | struct vmcs_config vmcs_conf; |
| 4153 | |
| 4154 | *(int *)rtn = 0; |
| 4155 | if (setup_vmcs_config(&vmcs_conf) < 0) |
| 4156 | *(int *)rtn = -EIO; |
| 4157 | if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) { |
| 4158 | printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n", |
| 4159 | smp_processor_id()); |
| 4160 | *(int *)rtn = -EIO; |
| 4161 | } |
| 4162 | } |
| 4163 | |
| 4164 | static int get_ept_level(void) |
| 4165 | { |
| 4166 | return VMX_EPT_DEFAULT_GAW + 1; |
| 4167 | } |
| 4168 | |
| 4169 | static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
| 4170 | { |
| 4171 | u64 ret; |
| 4172 | |
| 4173 | /* For VT-d and EPT combination |
| 4174 | * 1. MMIO: always map as UC |
| 4175 | * 2. EPT with VT-d: |
| 4176 | * a. VT-d without snooping control feature: can't guarantee the |
| 4177 | * result, try to trust guest. |
| 4178 | * b. VT-d with snooping control feature: snooping control feature of |
| 4179 | * VT-d engine can guarantee the cache correctness. Just set it |
| 4180 | * to WB to keep consistent with host. So the same as item 3. |
| 4181 | * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep |
| 4182 | * consistent with host MTRR |
| 4183 | */ |
| 4184 | if (is_mmio) |
| 4185 | ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; |
| 4186 | else if (vcpu->kvm->arch.iommu_domain && |
| 4187 | !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)) |
| 4188 | ret = kvm_get_guest_memory_type(vcpu, gfn) << |
| 4189 | VMX_EPT_MT_EPTE_SHIFT; |
| 4190 | else |
| 4191 | ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) |
| 4192 | | VMX_EPT_IPAT_BIT; |
| 4193 | |
| 4194 | return ret; |
| 4195 | } |
| 4196 | |
| 4197 | #define _ER(x) { EXIT_REASON_##x, #x } |
| 4198 | |
| 4199 | static const struct trace_print_flags vmx_exit_reasons_str[] = { |
| 4200 | _ER(EXCEPTION_NMI), |
| 4201 | _ER(EXTERNAL_INTERRUPT), |
| 4202 | _ER(TRIPLE_FAULT), |
| 4203 | _ER(PENDING_INTERRUPT), |
| 4204 | _ER(NMI_WINDOW), |
| 4205 | _ER(TASK_SWITCH), |
| 4206 | _ER(CPUID), |
| 4207 | _ER(HLT), |
| 4208 | _ER(INVLPG), |
| 4209 | _ER(RDPMC), |
| 4210 | _ER(RDTSC), |
| 4211 | _ER(VMCALL), |
| 4212 | _ER(VMCLEAR), |
| 4213 | _ER(VMLAUNCH), |
| 4214 | _ER(VMPTRLD), |
| 4215 | _ER(VMPTRST), |
| 4216 | _ER(VMREAD), |
| 4217 | _ER(VMRESUME), |
| 4218 | _ER(VMWRITE), |
| 4219 | _ER(VMOFF), |
| 4220 | _ER(VMON), |
| 4221 | _ER(CR_ACCESS), |
| 4222 | _ER(DR_ACCESS), |
| 4223 | _ER(IO_INSTRUCTION), |
| 4224 | _ER(MSR_READ), |
| 4225 | _ER(MSR_WRITE), |
| 4226 | _ER(MWAIT_INSTRUCTION), |
| 4227 | _ER(MONITOR_INSTRUCTION), |
| 4228 | _ER(PAUSE_INSTRUCTION), |
| 4229 | _ER(MCE_DURING_VMENTRY), |
| 4230 | _ER(TPR_BELOW_THRESHOLD), |
| 4231 | _ER(APIC_ACCESS), |
| 4232 | _ER(EPT_VIOLATION), |
| 4233 | _ER(EPT_MISCONFIG), |
| 4234 | _ER(WBINVD), |
| 4235 | { -1, NULL } |
| 4236 | }; |
| 4237 | |
| 4238 | #undef _ER |
| 4239 | |
| 4240 | static int vmx_get_lpage_level(void) |
| 4241 | { |
| 4242 | if (enable_ept && !cpu_has_vmx_ept_1g_page()) |
| 4243 | return PT_DIRECTORY_LEVEL; |
| 4244 | else |
| 4245 | /* For shadow and EPT supported 1GB page */ |
| 4246 | return PT_PDPE_LEVEL; |
| 4247 | } |
| 4248 | |
| 4249 | static void vmx_cpuid_update(struct kvm_vcpu *vcpu) |
| 4250 | { |
| 4251 | struct kvm_cpuid_entry2 *best; |
| 4252 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 4253 | u32 exec_control; |
| 4254 | |
| 4255 | vmx->rdtscp_enabled = false; |
| 4256 | if (vmx_rdtscp_supported()) { |
| 4257 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); |
| 4258 | if (exec_control & SECONDARY_EXEC_RDTSCP) { |
| 4259 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
| 4260 | if (best && (best->edx & bit(X86_FEATURE_RDTSCP))) |
| 4261 | vmx->rdtscp_enabled = true; |
| 4262 | else { |
| 4263 | exec_control &= ~SECONDARY_EXEC_RDTSCP; |
| 4264 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, |
| 4265 | exec_control); |
| 4266 | } |
| 4267 | } |
| 4268 | } |
| 4269 | } |
| 4270 | |
| 4271 | static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) |
| 4272 | { |
| 4273 | } |
| 4274 | |
| 4275 | static struct kvm_x86_ops vmx_x86_ops = { |
| 4276 | .cpu_has_kvm_support = cpu_has_kvm_support, |
| 4277 | .disabled_by_bios = vmx_disabled_by_bios, |
| 4278 | .hardware_setup = hardware_setup, |
| 4279 | .hardware_unsetup = hardware_unsetup, |
| 4280 | .check_processor_compatibility = vmx_check_processor_compat, |
| 4281 | .hardware_enable = hardware_enable, |
| 4282 | .hardware_disable = hardware_disable, |
| 4283 | .cpu_has_accelerated_tpr = report_flexpriority, |
| 4284 | |
| 4285 | .vcpu_create = vmx_create_vcpu, |
| 4286 | .vcpu_free = vmx_free_vcpu, |
| 4287 | .vcpu_reset = vmx_vcpu_reset, |
| 4288 | |
| 4289 | .prepare_guest_switch = vmx_save_host_state, |
| 4290 | .vcpu_load = vmx_vcpu_load, |
| 4291 | .vcpu_put = vmx_vcpu_put, |
| 4292 | |
| 4293 | .set_guest_debug = set_guest_debug, |
| 4294 | .get_msr = vmx_get_msr, |
| 4295 | .set_msr = vmx_set_msr, |
| 4296 | .get_segment_base = vmx_get_segment_base, |
| 4297 | .get_segment = vmx_get_segment, |
| 4298 | .set_segment = vmx_set_segment, |
| 4299 | .get_cpl = vmx_get_cpl, |
| 4300 | .get_cs_db_l_bits = vmx_get_cs_db_l_bits, |
| 4301 | .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits, |
| 4302 | .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits, |
| 4303 | .set_cr0 = vmx_set_cr0, |
| 4304 | .set_cr3 = vmx_set_cr3, |
| 4305 | .set_cr4 = vmx_set_cr4, |
| 4306 | .set_efer = vmx_set_efer, |
| 4307 | .get_idt = vmx_get_idt, |
| 4308 | .set_idt = vmx_set_idt, |
| 4309 | .get_gdt = vmx_get_gdt, |
| 4310 | .set_gdt = vmx_set_gdt, |
| 4311 | .set_dr7 = vmx_set_dr7, |
| 4312 | .cache_reg = vmx_cache_reg, |
| 4313 | .get_rflags = vmx_get_rflags, |
| 4314 | .set_rflags = vmx_set_rflags, |
| 4315 | .fpu_activate = vmx_fpu_activate, |
| 4316 | .fpu_deactivate = vmx_fpu_deactivate, |
| 4317 | |
| 4318 | .tlb_flush = vmx_flush_tlb, |
| 4319 | |
| 4320 | .run = vmx_vcpu_run, |
| 4321 | .handle_exit = vmx_handle_exit, |
| 4322 | .skip_emulated_instruction = skip_emulated_instruction, |
| 4323 | .set_interrupt_shadow = vmx_set_interrupt_shadow, |
| 4324 | .get_interrupt_shadow = vmx_get_interrupt_shadow, |
| 4325 | .patch_hypercall = vmx_patch_hypercall, |
| 4326 | .set_irq = vmx_inject_irq, |
| 4327 | .set_nmi = vmx_inject_nmi, |
| 4328 | .queue_exception = vmx_queue_exception, |
| 4329 | .cancel_injection = vmx_cancel_injection, |
| 4330 | .interrupt_allowed = vmx_interrupt_allowed, |
| 4331 | .nmi_allowed = vmx_nmi_allowed, |
| 4332 | .get_nmi_mask = vmx_get_nmi_mask, |
| 4333 | .set_nmi_mask = vmx_set_nmi_mask, |
| 4334 | .enable_nmi_window = enable_nmi_window, |
| 4335 | .enable_irq_window = enable_irq_window, |
| 4336 | .update_cr8_intercept = update_cr8_intercept, |
| 4337 | |
| 4338 | .set_tss_addr = vmx_set_tss_addr, |
| 4339 | .get_tdp_level = get_ept_level, |
| 4340 | .get_mt_mask = vmx_get_mt_mask, |
| 4341 | |
| 4342 | .exit_reasons_str = vmx_exit_reasons_str, |
| 4343 | .get_lpage_level = vmx_get_lpage_level, |
| 4344 | |
| 4345 | .cpuid_update = vmx_cpuid_update, |
| 4346 | |
| 4347 | .rdtscp_supported = vmx_rdtscp_supported, |
| 4348 | |
| 4349 | .set_supported_cpuid = vmx_set_supported_cpuid, |
| 4350 | |
| 4351 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
| 4352 | |
| 4353 | .write_tsc_offset = vmx_write_tsc_offset, |
| 4354 | .adjust_tsc_offset = vmx_adjust_tsc_offset, |
| 4355 | |
| 4356 | .set_tdp_cr3 = vmx_set_cr3, |
| 4357 | }; |
| 4358 | |
| 4359 | static int __init vmx_init(void) |
| 4360 | { |
| 4361 | int r, i; |
| 4362 | |
| 4363 | rdmsrl_safe(MSR_EFER, &host_efer); |
| 4364 | |
| 4365 | for (i = 0; i < NR_VMX_MSR; ++i) |
| 4366 | kvm_define_shared_msr(i, vmx_msr_index[i]); |
| 4367 | |
| 4368 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); |
| 4369 | if (!vmx_io_bitmap_a) |
| 4370 | return -ENOMEM; |
| 4371 | |
| 4372 | vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); |
| 4373 | if (!vmx_io_bitmap_b) { |
| 4374 | r = -ENOMEM; |
| 4375 | goto out; |
| 4376 | } |
| 4377 | |
| 4378 | vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL); |
| 4379 | if (!vmx_msr_bitmap_legacy) { |
| 4380 | r = -ENOMEM; |
| 4381 | goto out1; |
| 4382 | } |
| 4383 | |
| 4384 | vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); |
| 4385 | if (!vmx_msr_bitmap_longmode) { |
| 4386 | r = -ENOMEM; |
| 4387 | goto out2; |
| 4388 | } |
| 4389 | |
| 4390 | /* |
| 4391 | * Allow direct access to the PC debug port (it is often used for I/O |
| 4392 | * delays, but the vmexits simply slow things down). |
| 4393 | */ |
| 4394 | memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); |
| 4395 | clear_bit(0x80, vmx_io_bitmap_a); |
| 4396 | |
| 4397 | memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); |
| 4398 | |
| 4399 | memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); |
| 4400 | memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); |
| 4401 | |
| 4402 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ |
| 4403 | |
| 4404 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), |
| 4405 | __alignof__(struct vcpu_vmx), THIS_MODULE); |
| 4406 | if (r) |
| 4407 | goto out3; |
| 4408 | |
| 4409 | vmx_disable_intercept_for_msr(MSR_FS_BASE, false); |
| 4410 | vmx_disable_intercept_for_msr(MSR_GS_BASE, false); |
| 4411 | vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); |
| 4412 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); |
| 4413 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); |
| 4414 | vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); |
| 4415 | |
| 4416 | if (enable_ept) { |
| 4417 | bypass_guest_pf = 0; |
| 4418 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
| 4419 | VMX_EPT_EXECUTABLE_MASK); |
| 4420 | kvm_enable_tdp(); |
| 4421 | } else |
| 4422 | kvm_disable_tdp(); |
| 4423 | |
| 4424 | if (bypass_guest_pf) |
| 4425 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); |
| 4426 | |
| 4427 | return 0; |
| 4428 | |
| 4429 | out3: |
| 4430 | free_page((unsigned long)vmx_msr_bitmap_longmode); |
| 4431 | out2: |
| 4432 | free_page((unsigned long)vmx_msr_bitmap_legacy); |
| 4433 | out1: |
| 4434 | free_page((unsigned long)vmx_io_bitmap_b); |
| 4435 | out: |
| 4436 | free_page((unsigned long)vmx_io_bitmap_a); |
| 4437 | return r; |
| 4438 | } |
| 4439 | |
| 4440 | static void __exit vmx_exit(void) |
| 4441 | { |
| 4442 | free_page((unsigned long)vmx_msr_bitmap_legacy); |
| 4443 | free_page((unsigned long)vmx_msr_bitmap_longmode); |
| 4444 | free_page((unsigned long)vmx_io_bitmap_b); |
| 4445 | free_page((unsigned long)vmx_io_bitmap_a); |
| 4446 | |
| 4447 | kvm_exit(); |
| 4448 | } |
| 4449 | |
| 4450 | module_init(vmx_init) |
| 4451 | module_exit(vmx_exit) |