2 * Kernel-based Virtual Machine driver for Linux
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
17 #include <linux/kvm_host.h>
21 #include "kvm_cache_regs.h"
26 #include <linux/module.h>
27 #include <linux/mod_devicetable.h>
28 #include <linux/kernel.h>
29 #include <linux/vmalloc.h>
30 #include <linux/highmem.h>
31 #include <linux/sched.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
35 #include <asm/perf_event.h>
36 #include <asm/tlbflush.h>
38 #include <asm/debugreg.h>
39 #include <asm/kvm_para.h>
41 #include <asm/virtext.h>
44 #define __ex(x) __kvm_handle_fault_on_reboot(x)
46 MODULE_AUTHOR("Qumranet");
47 MODULE_LICENSE("GPL");
49 static const struct x86_cpu_id svm_cpu_id[] = {
50 X86_FEATURE_MATCH(X86_FEATURE_SVM),
53 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
55 #define IOPM_ALLOC_ORDER 2
56 #define MSRPM_ALLOC_ORDER 1
58 #define SEG_TYPE_LDT 2
59 #define SEG_TYPE_BUSY_TSS16 3
61 #define SVM_FEATURE_NPT (1 << 0)
62 #define SVM_FEATURE_LBRV (1 << 1)
63 #define SVM_FEATURE_SVML (1 << 2)
64 #define SVM_FEATURE_NRIP (1 << 3)
65 #define SVM_FEATURE_TSC_RATE (1 << 4)
66 #define SVM_FEATURE_VMCB_CLEAN (1 << 5)
67 #define SVM_FEATURE_FLUSH_ASID (1 << 6)
68 #define SVM_FEATURE_DECODE_ASSIST (1 << 7)
69 #define SVM_FEATURE_PAUSE_FILTER (1 << 10)
71 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
72 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
73 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
75 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
77 #define TSC_RATIO_RSVD 0xffffff0000000000ULL
78 #define TSC_RATIO_MIN 0x0000000000000001ULL
79 #define TSC_RATIO_MAX 0x000000ffffffffffULL
81 static bool erratum_383_found __read_mostly;
83 static const u32 host_save_user_msrs[] = {
85 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
88 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
91 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
101 /* These are the merged vectors */
104 /* gpa pointers to the real vectors */
108 /* A VMEXIT is required but not yet emulated */
111 /* cache for intercepts of the guest */
114 u32 intercept_exceptions;
117 /* Nested Paging related state */
121 #define MSRPM_OFFSETS 16
122 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
125 * Set osvw_len to higher value when updated Revision Guides
126 * are published and we know what the new status bits are
128 static uint64_t osvw_len = 4, osvw_status;
131 struct kvm_vcpu vcpu;
133 unsigned long vmcb_pa;
134 struct svm_cpu_data *svm_data;
135 uint64_t asid_generation;
136 uint64_t sysenter_esp;
137 uint64_t sysenter_eip;
141 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
153 struct nested_state nested;
157 unsigned int3_injected;
158 unsigned long int3_rip;
161 /* cached guest cpuid flags for faster access */
162 bool nrips_enabled : 1;
165 static DEFINE_PER_CPU(u64, current_tsc_ratio);
166 #define TSC_RATIO_DEFAULT 0x0100000000ULL
168 #define MSR_INVALID 0xffffffffU
170 static const struct svm_direct_access_msrs {
171 u32 index; /* Index of the MSR */
172 bool always; /* True if intercept is always on */
173 } direct_access_msrs[] = {
174 { .index = MSR_STAR, .always = true },
175 { .index = MSR_IA32_SYSENTER_CS, .always = true },
177 { .index = MSR_GS_BASE, .always = true },
178 { .index = MSR_FS_BASE, .always = true },
179 { .index = MSR_KERNEL_GS_BASE, .always = true },
180 { .index = MSR_LSTAR, .always = true },
181 { .index = MSR_CSTAR, .always = true },
182 { .index = MSR_SYSCALL_MASK, .always = true },
184 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
185 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
186 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
187 { .index = MSR_IA32_LASTINTTOIP, .always = false },
188 { .index = MSR_INVALID, .always = false },
191 /* enable NPT for AMD64 and X86 with PAE */
192 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
193 static bool npt_enabled = true;
195 static bool npt_enabled;
198 /* allow nested paging (virtualized MMU) for all guests */
199 static int npt = true;
200 module_param(npt, int, S_IRUGO);
202 /* allow nested virtualization in KVM/SVM */
203 static int nested = true;
204 module_param(nested, int, S_IRUGO);
206 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
207 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
208 static void svm_complete_interrupts(struct vcpu_svm *svm);
210 static int nested_svm_exit_handled(struct vcpu_svm *svm);
211 static int nested_svm_intercept(struct vcpu_svm *svm);
212 static int nested_svm_vmexit(struct vcpu_svm *svm);
213 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
214 bool has_error_code, u32 error_code);
217 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
218 pause filter count */
219 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
220 VMCB_ASID, /* ASID */
221 VMCB_INTR, /* int_ctl, int_vector */
222 VMCB_NPT, /* npt_en, nCR3, gPAT */
223 VMCB_CR, /* CR0, CR3, CR4, EFER */
224 VMCB_DR, /* DR6, DR7 */
225 VMCB_DT, /* GDT, IDT */
226 VMCB_SEG, /* CS, DS, SS, ES, CPL */
227 VMCB_CR2, /* CR2 only */
228 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
232 /* TPR and CR2 are always written before VMRUN */
233 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
235 static inline void mark_all_dirty(struct vmcb *vmcb)
237 vmcb->control.clean = 0;
240 static inline void mark_all_clean(struct vmcb *vmcb)
242 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
243 & ~VMCB_ALWAYS_DIRTY_MASK;
246 static inline void mark_dirty(struct vmcb *vmcb, int bit)
248 vmcb->control.clean &= ~(1 << bit);
251 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
253 return container_of(vcpu, struct vcpu_svm, vcpu);
256 static void recalc_intercepts(struct vcpu_svm *svm)
258 struct vmcb_control_area *c, *h;
259 struct nested_state *g;
261 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
263 if (!is_guest_mode(&svm->vcpu))
266 c = &svm->vmcb->control;
267 h = &svm->nested.hsave->control;
270 c->intercept_cr = h->intercept_cr | g->intercept_cr;
271 c->intercept_dr = h->intercept_dr | g->intercept_dr;
272 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
273 c->intercept = h->intercept | g->intercept;
276 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
278 if (is_guest_mode(&svm->vcpu))
279 return svm->nested.hsave;
284 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
286 struct vmcb *vmcb = get_host_vmcb(svm);
288 vmcb->control.intercept_cr |= (1U << bit);
290 recalc_intercepts(svm);
293 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
295 struct vmcb *vmcb = get_host_vmcb(svm);
297 vmcb->control.intercept_cr &= ~(1U << bit);
299 recalc_intercepts(svm);
302 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
304 struct vmcb *vmcb = get_host_vmcb(svm);
306 return vmcb->control.intercept_cr & (1U << bit);
309 static inline void set_dr_intercepts(struct vcpu_svm *svm)
311 struct vmcb *vmcb = get_host_vmcb(svm);
313 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
314 | (1 << INTERCEPT_DR1_READ)
315 | (1 << INTERCEPT_DR2_READ)
316 | (1 << INTERCEPT_DR3_READ)
317 | (1 << INTERCEPT_DR4_READ)
318 | (1 << INTERCEPT_DR5_READ)
319 | (1 << INTERCEPT_DR6_READ)
320 | (1 << INTERCEPT_DR7_READ)
321 | (1 << INTERCEPT_DR0_WRITE)
322 | (1 << INTERCEPT_DR1_WRITE)
323 | (1 << INTERCEPT_DR2_WRITE)
324 | (1 << INTERCEPT_DR3_WRITE)
325 | (1 << INTERCEPT_DR4_WRITE)
326 | (1 << INTERCEPT_DR5_WRITE)
327 | (1 << INTERCEPT_DR6_WRITE)
328 | (1 << INTERCEPT_DR7_WRITE);
330 recalc_intercepts(svm);
333 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
335 struct vmcb *vmcb = get_host_vmcb(svm);
337 vmcb->control.intercept_dr = 0;
339 recalc_intercepts(svm);
342 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
344 struct vmcb *vmcb = get_host_vmcb(svm);
346 vmcb->control.intercept_exceptions |= (1U << bit);
348 recalc_intercepts(svm);
351 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
353 struct vmcb *vmcb = get_host_vmcb(svm);
355 vmcb->control.intercept_exceptions &= ~(1U << bit);
357 recalc_intercepts(svm);
360 static inline void set_intercept(struct vcpu_svm *svm, int bit)
362 struct vmcb *vmcb = get_host_vmcb(svm);
364 vmcb->control.intercept |= (1ULL << bit);
366 recalc_intercepts(svm);
369 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
371 struct vmcb *vmcb = get_host_vmcb(svm);
373 vmcb->control.intercept &= ~(1ULL << bit);
375 recalc_intercepts(svm);
378 static inline void enable_gif(struct vcpu_svm *svm)
380 svm->vcpu.arch.hflags |= HF_GIF_MASK;
383 static inline void disable_gif(struct vcpu_svm *svm)
385 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
388 static inline bool gif_set(struct vcpu_svm *svm)
390 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
393 static unsigned long iopm_base;
395 struct kvm_ldttss_desc {
398 unsigned base1:8, type:5, dpl:2, p:1;
399 unsigned limit1:4, zero0:3, g:1, base2:8;
402 } __attribute__((packed));
404 struct svm_cpu_data {
410 struct kvm_ldttss_desc *tss_desc;
412 struct page *save_area;
415 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
417 struct svm_init_data {
422 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
424 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
425 #define MSRS_RANGE_SIZE 2048
426 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
428 static u32 svm_msrpm_offset(u32 msr)
433 for (i = 0; i < NUM_MSR_MAPS; i++) {
434 if (msr < msrpm_ranges[i] ||
435 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
438 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
439 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
441 /* Now we have the u8 offset - but need the u32 offset */
445 /* MSR not in any range */
449 #define MAX_INST_SIZE 15
451 static inline void clgi(void)
453 asm volatile (__ex(SVM_CLGI));
456 static inline void stgi(void)
458 asm volatile (__ex(SVM_STGI));
461 static inline void invlpga(unsigned long addr, u32 asid)
463 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
466 static int get_npt_level(void)
469 return PT64_ROOT_LEVEL;
471 return PT32E_ROOT_LEVEL;
475 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
477 vcpu->arch.efer = efer;
478 if (!npt_enabled && !(efer & EFER_LMA))
481 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
482 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
485 static int is_external_interrupt(u32 info)
487 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
488 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
491 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
493 struct vcpu_svm *svm = to_svm(vcpu);
496 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
497 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
501 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
503 struct vcpu_svm *svm = to_svm(vcpu);
506 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
508 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
512 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
514 struct vcpu_svm *svm = to_svm(vcpu);
516 if (svm->vmcb->control.next_rip != 0) {
517 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
518 svm->next_rip = svm->vmcb->control.next_rip;
521 if (!svm->next_rip) {
522 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
524 printk(KERN_DEBUG "%s: NOP\n", __func__);
527 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
528 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
529 __func__, kvm_rip_read(vcpu), svm->next_rip);
531 kvm_rip_write(vcpu, svm->next_rip);
532 svm_set_interrupt_shadow(vcpu, 0);
535 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
536 bool has_error_code, u32 error_code,
539 struct vcpu_svm *svm = to_svm(vcpu);
542 * If we are within a nested VM we'd better #VMEXIT and let the guest
543 * handle the exception
546 nested_svm_check_exception(svm, nr, has_error_code, error_code))
549 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
550 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
553 * For guest debugging where we have to reinject #BP if some
554 * INT3 is guest-owned:
555 * Emulate nRIP by moving RIP forward. Will fail if injection
556 * raises a fault that is not intercepted. Still better than
557 * failing in all cases.
559 skip_emulated_instruction(&svm->vcpu);
560 rip = kvm_rip_read(&svm->vcpu);
561 svm->int3_rip = rip + svm->vmcb->save.cs.base;
562 svm->int3_injected = rip - old_rip;
565 svm->vmcb->control.event_inj = nr
567 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
568 | SVM_EVTINJ_TYPE_EXEPT;
569 svm->vmcb->control.event_inj_err = error_code;
572 static void svm_init_erratum_383(void)
578 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
581 /* Use _safe variants to not break nested virtualization */
582 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
588 low = lower_32_bits(val);
589 high = upper_32_bits(val);
591 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
593 erratum_383_found = true;
596 static void svm_init_osvw(struct kvm_vcpu *vcpu)
599 * Guests should see errata 400 and 415 as fixed (assuming that
600 * HLT and IO instructions are intercepted).
602 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
603 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
606 * By increasing VCPU's osvw.length to 3 we are telling the guest that
607 * all osvw.status bits inside that length, including bit 0 (which is
608 * reserved for erratum 298), are valid. However, if host processor's
609 * osvw_len is 0 then osvw_status[0] carries no information. We need to
610 * be conservative here and therefore we tell the guest that erratum 298
611 * is present (because we really don't know).
613 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
614 vcpu->arch.osvw.status |= 1;
617 static int has_svm(void)
621 if (!cpu_has_svm(&msg)) {
622 printk(KERN_INFO "has_svm: %s\n", msg);
629 static void svm_hardware_disable(void)
631 /* Make sure we clean up behind us */
632 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
633 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
637 amd_pmu_disable_virt();
640 static int svm_hardware_enable(void)
643 struct svm_cpu_data *sd;
645 struct desc_ptr gdt_descr;
646 struct desc_struct *gdt;
647 int me = raw_smp_processor_id();
649 rdmsrl(MSR_EFER, efer);
650 if (efer & EFER_SVME)
654 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
657 sd = per_cpu(svm_data, me);
659 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
663 sd->asid_generation = 1;
664 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
665 sd->next_asid = sd->max_asid + 1;
667 native_store_gdt(&gdt_descr);
668 gdt = (struct desc_struct *)gdt_descr.address;
669 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
671 wrmsrl(MSR_EFER, efer | EFER_SVME);
673 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
675 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
676 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
677 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
684 * Note that it is possible to have a system with mixed processor
685 * revisions and therefore different OSVW bits. If bits are not the same
686 * on different processors then choose the worst case (i.e. if erratum
687 * is present on one processor and not on another then assume that the
688 * erratum is present everywhere).
690 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
691 uint64_t len, status = 0;
694 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
696 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
700 osvw_status = osvw_len = 0;
704 osvw_status |= status;
705 osvw_status &= (1ULL << osvw_len) - 1;
708 osvw_status = osvw_len = 0;
710 svm_init_erratum_383();
712 amd_pmu_enable_virt();
717 static void svm_cpu_uninit(int cpu)
719 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
724 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
725 __free_page(sd->save_area);
729 static int svm_cpu_init(int cpu)
731 struct svm_cpu_data *sd;
734 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
738 sd->save_area = alloc_page(GFP_KERNEL);
743 per_cpu(svm_data, cpu) = sd;
753 static bool valid_msr_intercept(u32 index)
757 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
758 if (direct_access_msrs[i].index == index)
764 static void set_msr_interception(u32 *msrpm, unsigned msr,
767 u8 bit_read, bit_write;
772 * If this warning triggers extend the direct_access_msrs list at the
773 * beginning of the file
775 WARN_ON(!valid_msr_intercept(msr));
777 offset = svm_msrpm_offset(msr);
778 bit_read = 2 * (msr & 0x0f);
779 bit_write = 2 * (msr & 0x0f) + 1;
782 BUG_ON(offset == MSR_INVALID);
784 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
785 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
790 static void svm_vcpu_init_msrpm(u32 *msrpm)
794 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
796 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
797 if (!direct_access_msrs[i].always)
800 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
804 static void add_msr_offset(u32 offset)
808 for (i = 0; i < MSRPM_OFFSETS; ++i) {
810 /* Offset already in list? */
811 if (msrpm_offsets[i] == offset)
814 /* Slot used by another offset? */
815 if (msrpm_offsets[i] != MSR_INVALID)
818 /* Add offset to list */
819 msrpm_offsets[i] = offset;
825 * If this BUG triggers the msrpm_offsets table has an overflow. Just
826 * increase MSRPM_OFFSETS in this case.
831 static void init_msrpm_offsets(void)
835 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
837 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
840 offset = svm_msrpm_offset(direct_access_msrs[i].index);
841 BUG_ON(offset == MSR_INVALID);
843 add_msr_offset(offset);
847 static void svm_enable_lbrv(struct vcpu_svm *svm)
849 u32 *msrpm = svm->msrpm;
851 svm->vmcb->control.lbr_ctl = 1;
852 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
853 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
854 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
855 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
858 static void svm_disable_lbrv(struct vcpu_svm *svm)
860 u32 *msrpm = svm->msrpm;
862 svm->vmcb->control.lbr_ctl = 0;
863 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
864 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
865 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
866 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
869 static __init int svm_hardware_setup(void)
872 struct page *iopm_pages;
876 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
881 iopm_va = page_address(iopm_pages);
882 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
883 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
885 init_msrpm_offsets();
887 if (boot_cpu_has(X86_FEATURE_NX))
888 kvm_enable_efer_bits(EFER_NX);
890 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
891 kvm_enable_efer_bits(EFER_FFXSR);
893 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
894 kvm_has_tsc_control = true;
895 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
896 kvm_tsc_scaling_ratio_frac_bits = 32;
900 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
901 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
904 for_each_possible_cpu(cpu) {
905 r = svm_cpu_init(cpu);
910 if (!boot_cpu_has(X86_FEATURE_NPT))
913 if (npt_enabled && !npt) {
914 printk(KERN_INFO "kvm: Nested Paging disabled\n");
919 printk(KERN_INFO "kvm: Nested Paging enabled\n");
927 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
932 static __exit void svm_hardware_unsetup(void)
936 for_each_possible_cpu(cpu)
939 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
943 static void init_seg(struct vmcb_seg *seg)
946 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
947 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
952 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
955 seg->attrib = SVM_SELECTOR_P_MASK | type;
960 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
965 /* Guest TSC same frequency as host TSC? */
967 vcpu->arch.tsc_scaling_ratio = TSC_RATIO_DEFAULT;
971 /* TSC scaling supported? */
972 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
973 if (user_tsc_khz > tsc_khz) {
974 vcpu->arch.tsc_catchup = 1;
975 vcpu->arch.tsc_always_catchup = 1;
977 WARN(1, "user requested TSC rate below hardware speed\n");
983 /* TSC scaling required - calculate ratio */
985 do_div(ratio, tsc_khz);
987 if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
988 WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
992 vcpu->arch.tsc_scaling_ratio = ratio;
995 static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
997 struct vcpu_svm *svm = to_svm(vcpu);
999 return svm->vmcb->control.tsc_offset;
1002 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1004 struct vcpu_svm *svm = to_svm(vcpu);
1005 u64 g_tsc_offset = 0;
1007 if (is_guest_mode(vcpu)) {
1008 g_tsc_offset = svm->vmcb->control.tsc_offset -
1009 svm->nested.hsave->control.tsc_offset;
1010 svm->nested.hsave->control.tsc_offset = offset;
1012 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1013 svm->vmcb->control.tsc_offset,
1016 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1018 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1021 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
1023 struct vcpu_svm *svm = to_svm(vcpu);
1026 if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
1027 WARN_ON(adjustment < 0);
1028 adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
1031 svm->vmcb->control.tsc_offset += adjustment;
1032 if (is_guest_mode(vcpu))
1033 svm->nested.hsave->control.tsc_offset += adjustment;
1035 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1036 svm->vmcb->control.tsc_offset - adjustment,
1037 svm->vmcb->control.tsc_offset);
1039 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1042 static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1046 tsc = kvm_scale_tsc(vcpu, rdtsc());
1048 return target_tsc - tsc;
1051 static void init_vmcb(struct vcpu_svm *svm)
1053 struct vmcb_control_area *control = &svm->vmcb->control;
1054 struct vmcb_save_area *save = &svm->vmcb->save;
1056 svm->vcpu.fpu_active = 1;
1057 svm->vcpu.arch.hflags = 0;
1059 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1060 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1061 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1062 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1063 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1064 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1065 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1067 set_dr_intercepts(svm);
1069 set_exception_intercept(svm, PF_VECTOR);
1070 set_exception_intercept(svm, UD_VECTOR);
1071 set_exception_intercept(svm, MC_VECTOR);
1073 set_intercept(svm, INTERCEPT_INTR);
1074 set_intercept(svm, INTERCEPT_NMI);
1075 set_intercept(svm, INTERCEPT_SMI);
1076 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1077 set_intercept(svm, INTERCEPT_RDPMC);
1078 set_intercept(svm, INTERCEPT_CPUID);
1079 set_intercept(svm, INTERCEPT_INVD);
1080 set_intercept(svm, INTERCEPT_HLT);
1081 set_intercept(svm, INTERCEPT_INVLPG);
1082 set_intercept(svm, INTERCEPT_INVLPGA);
1083 set_intercept(svm, INTERCEPT_IOIO_PROT);
1084 set_intercept(svm, INTERCEPT_MSR_PROT);
1085 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1086 set_intercept(svm, INTERCEPT_SHUTDOWN);
1087 set_intercept(svm, INTERCEPT_VMRUN);
1088 set_intercept(svm, INTERCEPT_VMMCALL);
1089 set_intercept(svm, INTERCEPT_VMLOAD);
1090 set_intercept(svm, INTERCEPT_VMSAVE);
1091 set_intercept(svm, INTERCEPT_STGI);
1092 set_intercept(svm, INTERCEPT_CLGI);
1093 set_intercept(svm, INTERCEPT_SKINIT);
1094 set_intercept(svm, INTERCEPT_WBINVD);
1095 set_intercept(svm, INTERCEPT_MONITOR);
1096 set_intercept(svm, INTERCEPT_MWAIT);
1097 set_intercept(svm, INTERCEPT_XSETBV);
1099 control->iopm_base_pa = iopm_base;
1100 control->msrpm_base_pa = __pa(svm->msrpm);
1101 control->int_ctl = V_INTR_MASKING_MASK;
1103 init_seg(&save->es);
1104 init_seg(&save->ss);
1105 init_seg(&save->ds);
1106 init_seg(&save->fs);
1107 init_seg(&save->gs);
1109 save->cs.selector = 0xf000;
1110 save->cs.base = 0xffff0000;
1111 /* Executable/Readable Code Segment */
1112 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1113 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1114 save->cs.limit = 0xffff;
1116 save->gdtr.limit = 0xffff;
1117 save->idtr.limit = 0xffff;
1119 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1120 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1122 svm_set_efer(&svm->vcpu, 0);
1123 save->dr6 = 0xffff0ff0;
1124 kvm_set_rflags(&svm->vcpu, 2);
1125 save->rip = 0x0000fff0;
1126 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1129 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1130 * It also updates the guest-visible cr0 value.
1132 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1133 kvm_mmu_reset_context(&svm->vcpu);
1135 save->cr4 = X86_CR4_PAE;
1139 /* Setup VMCB for Nested Paging */
1140 control->nested_ctl = 1;
1141 clr_intercept(svm, INTERCEPT_INVLPG);
1142 clr_exception_intercept(svm, PF_VECTOR);
1143 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1144 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1145 save->g_pat = svm->vcpu.arch.pat;
1149 svm->asid_generation = 0;
1151 svm->nested.vmcb = 0;
1152 svm->vcpu.arch.hflags = 0;
1154 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1155 control->pause_filter_count = 3000;
1156 set_intercept(svm, INTERCEPT_PAUSE);
1159 mark_all_dirty(svm->vmcb);
1164 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1166 struct vcpu_svm *svm = to_svm(vcpu);
1171 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1172 MSR_IA32_APICBASE_ENABLE;
1173 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1174 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1178 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
1179 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
1182 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
1184 struct vcpu_svm *svm;
1186 struct page *msrpm_pages;
1187 struct page *hsave_page;
1188 struct page *nested_msrpm_pages;
1191 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1197 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1202 page = alloc_page(GFP_KERNEL);
1206 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1210 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1211 if (!nested_msrpm_pages)
1214 hsave_page = alloc_page(GFP_KERNEL);
1218 svm->nested.hsave = page_address(hsave_page);
1220 svm->msrpm = page_address(msrpm_pages);
1221 svm_vcpu_init_msrpm(svm->msrpm);
1223 svm->nested.msrpm = page_address(nested_msrpm_pages);
1224 svm_vcpu_init_msrpm(svm->nested.msrpm);
1226 svm->vmcb = page_address(page);
1227 clear_page(svm->vmcb);
1228 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
1229 svm->asid_generation = 0;
1232 svm_init_osvw(&svm->vcpu);
1237 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1239 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1243 kvm_vcpu_uninit(&svm->vcpu);
1245 kmem_cache_free(kvm_vcpu_cache, svm);
1247 return ERR_PTR(err);
1250 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1252 struct vcpu_svm *svm = to_svm(vcpu);
1254 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
1255 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
1256 __free_page(virt_to_page(svm->nested.hsave));
1257 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
1258 kvm_vcpu_uninit(vcpu);
1259 kmem_cache_free(kvm_vcpu_cache, svm);
1262 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1264 struct vcpu_svm *svm = to_svm(vcpu);
1267 if (unlikely(cpu != vcpu->cpu)) {
1268 svm->asid_generation = 0;
1269 mark_all_dirty(svm->vmcb);
1272 #ifdef CONFIG_X86_64
1273 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1275 savesegment(fs, svm->host.fs);
1276 savesegment(gs, svm->host.gs);
1277 svm->host.ldt = kvm_read_ldt();
1279 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1280 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1282 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1283 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1284 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1285 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1286 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1291 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1293 struct vcpu_svm *svm = to_svm(vcpu);
1296 ++vcpu->stat.host_state_reload;
1297 kvm_load_ldt(svm->host.ldt);
1298 #ifdef CONFIG_X86_64
1299 loadsegment(fs, svm->host.fs);
1300 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
1301 load_gs_index(svm->host.gs);
1303 #ifdef CONFIG_X86_32_LAZY_GS
1304 loadsegment(gs, svm->host.gs);
1307 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
1308 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
1311 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1313 return to_svm(vcpu)->vmcb->save.rflags;
1316 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1319 * Any change of EFLAGS.VM is accompained by a reload of SS
1320 * (caused by either a task switch or an inter-privilege IRET),
1321 * so we do not need to update the CPL here.
1323 to_svm(vcpu)->vmcb->save.rflags = rflags;
1326 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1329 case VCPU_EXREG_PDPTR:
1330 BUG_ON(!npt_enabled);
1331 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
1338 static void svm_set_vintr(struct vcpu_svm *svm)
1340 set_intercept(svm, INTERCEPT_VINTR);
1343 static void svm_clear_vintr(struct vcpu_svm *svm)
1345 clr_intercept(svm, INTERCEPT_VINTR);
1348 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1350 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1353 case VCPU_SREG_CS: return &save->cs;
1354 case VCPU_SREG_DS: return &save->ds;
1355 case VCPU_SREG_ES: return &save->es;
1356 case VCPU_SREG_FS: return &save->fs;
1357 case VCPU_SREG_GS: return &save->gs;
1358 case VCPU_SREG_SS: return &save->ss;
1359 case VCPU_SREG_TR: return &save->tr;
1360 case VCPU_SREG_LDTR: return &save->ldtr;
1366 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1368 struct vmcb_seg *s = svm_seg(vcpu, seg);
1373 static void svm_get_segment(struct kvm_vcpu *vcpu,
1374 struct kvm_segment *var, int seg)
1376 struct vmcb_seg *s = svm_seg(vcpu, seg);
1378 var->base = s->base;
1379 var->limit = s->limit;
1380 var->selector = s->selector;
1381 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1382 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1383 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1384 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1385 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1386 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1387 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1390 * AMD CPUs circa 2014 track the G bit for all segments except CS.
1391 * However, the SVM spec states that the G bit is not observed by the
1392 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1393 * So let's synthesize a legal G bit for all segments, this helps
1394 * running KVM nested. It also helps cross-vendor migration, because
1395 * Intel's vmentry has a check on the 'G' bit.
1397 var->g = s->limit > 0xfffff;
1400 * AMD's VMCB does not have an explicit unusable field, so emulate it
1401 * for cross vendor migration purposes by "not present"
1403 var->unusable = !var->present || (var->type == 0);
1408 * Work around a bug where the busy flag in the tr selector
1418 * The accessed bit must always be set in the segment
1419 * descriptor cache, although it can be cleared in the
1420 * descriptor, the cached bit always remains at 1. Since
1421 * Intel has a check on this, set it here to support
1422 * cross-vendor migration.
1429 * On AMD CPUs sometimes the DB bit in the segment
1430 * descriptor is left as 1, although the whole segment has
1431 * been made unusable. Clear it here to pass an Intel VMX
1432 * entry check when cross vendor migrating.
1436 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1441 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1443 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1448 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1450 struct vcpu_svm *svm = to_svm(vcpu);
1452 dt->size = svm->vmcb->save.idtr.limit;
1453 dt->address = svm->vmcb->save.idtr.base;
1456 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1458 struct vcpu_svm *svm = to_svm(vcpu);
1460 svm->vmcb->save.idtr.limit = dt->size;
1461 svm->vmcb->save.idtr.base = dt->address ;
1462 mark_dirty(svm->vmcb, VMCB_DT);
1465 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1467 struct vcpu_svm *svm = to_svm(vcpu);
1469 dt->size = svm->vmcb->save.gdtr.limit;
1470 dt->address = svm->vmcb->save.gdtr.base;
1473 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1475 struct vcpu_svm *svm = to_svm(vcpu);
1477 svm->vmcb->save.gdtr.limit = dt->size;
1478 svm->vmcb->save.gdtr.base = dt->address ;
1479 mark_dirty(svm->vmcb, VMCB_DT);
1482 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
1486 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
1490 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1494 static void update_cr0_intercept(struct vcpu_svm *svm)
1496 ulong gcr0 = svm->vcpu.arch.cr0;
1497 u64 *hcr0 = &svm->vmcb->save.cr0;
1499 if (!svm->vcpu.fpu_active)
1500 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
1502 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
1503 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
1505 mark_dirty(svm->vmcb, VMCB_CR);
1507 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
1508 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
1509 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1511 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1512 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1516 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1518 struct vcpu_svm *svm = to_svm(vcpu);
1520 #ifdef CONFIG_X86_64
1521 if (vcpu->arch.efer & EFER_LME) {
1522 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1523 vcpu->arch.efer |= EFER_LMA;
1524 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1527 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1528 vcpu->arch.efer &= ~EFER_LMA;
1529 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1533 vcpu->arch.cr0 = cr0;
1536 cr0 |= X86_CR0_PG | X86_CR0_WP;
1538 if (!vcpu->fpu_active)
1541 * re-enable caching here because the QEMU bios
1542 * does not do it - this results in some delay at
1545 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1546 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1547 svm->vmcb->save.cr0 = cr0;
1548 mark_dirty(svm->vmcb, VMCB_CR);
1549 update_cr0_intercept(svm);
1552 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1554 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1555 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1557 if (cr4 & X86_CR4_VMXE)
1560 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1561 svm_flush_tlb(vcpu);
1563 vcpu->arch.cr4 = cr4;
1566 cr4 |= host_cr4_mce;
1567 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1568 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1572 static void svm_set_segment(struct kvm_vcpu *vcpu,
1573 struct kvm_segment *var, int seg)
1575 struct vcpu_svm *svm = to_svm(vcpu);
1576 struct vmcb_seg *s = svm_seg(vcpu, seg);
1578 s->base = var->base;
1579 s->limit = var->limit;
1580 s->selector = var->selector;
1584 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1585 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1586 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1587 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
1588 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1589 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1590 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1591 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1595 * This is always accurate, except if SYSRET returned to a segment
1596 * with SS.DPL != 3. Intel does not have this quirk, and always
1597 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1598 * would entail passing the CPL to userspace and back.
1600 if (seg == VCPU_SREG_SS)
1601 svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1603 mark_dirty(svm->vmcb, VMCB_SEG);
1606 static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
1608 struct vcpu_svm *svm = to_svm(vcpu);
1610 clr_exception_intercept(svm, DB_VECTOR);
1611 clr_exception_intercept(svm, BP_VECTOR);
1613 if (svm->nmi_singlestep)
1614 set_exception_intercept(svm, DB_VECTOR);
1616 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1617 if (vcpu->guest_debug &
1618 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
1619 set_exception_intercept(svm, DB_VECTOR);
1620 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1621 set_exception_intercept(svm, BP_VECTOR);
1623 vcpu->guest_debug = 0;
1626 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1628 if (sd->next_asid > sd->max_asid) {
1629 ++sd->asid_generation;
1631 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1634 svm->asid_generation = sd->asid_generation;
1635 svm->vmcb->control.asid = sd->next_asid++;
1637 mark_dirty(svm->vmcb, VMCB_ASID);
1640 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
1642 return to_svm(vcpu)->vmcb->save.dr6;
1645 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
1647 struct vcpu_svm *svm = to_svm(vcpu);
1649 svm->vmcb->save.dr6 = value;
1650 mark_dirty(svm->vmcb, VMCB_DR);
1653 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1655 struct vcpu_svm *svm = to_svm(vcpu);
1657 get_debugreg(vcpu->arch.db[0], 0);
1658 get_debugreg(vcpu->arch.db[1], 1);
1659 get_debugreg(vcpu->arch.db[2], 2);
1660 get_debugreg(vcpu->arch.db[3], 3);
1661 vcpu->arch.dr6 = svm_get_dr6(vcpu);
1662 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1664 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1665 set_dr_intercepts(svm);
1668 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1670 struct vcpu_svm *svm = to_svm(vcpu);
1672 svm->vmcb->save.dr7 = value;
1673 mark_dirty(svm->vmcb, VMCB_DR);
1676 static int pf_interception(struct vcpu_svm *svm)
1678 u64 fault_address = svm->vmcb->control.exit_info_2;
1682 switch (svm->apf_reason) {
1684 error_code = svm->vmcb->control.exit_info_1;
1686 trace_kvm_page_fault(fault_address, error_code);
1687 if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
1688 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1689 r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
1690 svm->vmcb->control.insn_bytes,
1691 svm->vmcb->control.insn_len);
1693 case KVM_PV_REASON_PAGE_NOT_PRESENT:
1694 svm->apf_reason = 0;
1695 local_irq_disable();
1696 kvm_async_pf_task_wait(fault_address);
1699 case KVM_PV_REASON_PAGE_READY:
1700 svm->apf_reason = 0;
1701 local_irq_disable();
1702 kvm_async_pf_task_wake(fault_address);
1709 static int db_interception(struct vcpu_svm *svm)
1711 struct kvm_run *kvm_run = svm->vcpu.run;
1713 if (!(svm->vcpu.guest_debug &
1714 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
1715 !svm->nmi_singlestep) {
1716 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1720 if (svm->nmi_singlestep) {
1721 svm->nmi_singlestep = false;
1722 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1723 svm->vmcb->save.rflags &=
1724 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1725 update_db_bp_intercept(&svm->vcpu);
1728 if (svm->vcpu.guest_debug &
1729 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
1730 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1731 kvm_run->debug.arch.pc =
1732 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1733 kvm_run->debug.arch.exception = DB_VECTOR;
1740 static int bp_interception(struct vcpu_svm *svm)
1742 struct kvm_run *kvm_run = svm->vcpu.run;
1744 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1745 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1746 kvm_run->debug.arch.exception = BP_VECTOR;
1750 static int ud_interception(struct vcpu_svm *svm)
1754 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
1755 if (er != EMULATE_DONE)
1756 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1760 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1762 struct vcpu_svm *svm = to_svm(vcpu);
1764 clr_exception_intercept(svm, NM_VECTOR);
1766 svm->vcpu.fpu_active = 1;
1767 update_cr0_intercept(svm);
1770 static int nm_interception(struct vcpu_svm *svm)
1772 svm_fpu_activate(&svm->vcpu);
1776 static bool is_erratum_383(void)
1781 if (!erratum_383_found)
1784 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
1788 /* Bit 62 may or may not be set for this mce */
1789 value &= ~(1ULL << 62);
1791 if (value != 0xb600000000010015ULL)
1794 /* Clear MCi_STATUS registers */
1795 for (i = 0; i < 6; ++i)
1796 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
1798 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
1802 value &= ~(1ULL << 2);
1803 low = lower_32_bits(value);
1804 high = upper_32_bits(value);
1806 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
1809 /* Flush tlb to evict multi-match entries */
1815 static void svm_handle_mce(struct vcpu_svm *svm)
1817 if (is_erratum_383()) {
1819 * Erratum 383 triggered. Guest state is corrupt so kill the
1822 pr_err("KVM: Guest triggered AMD Erratum 383\n");
1824 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
1830 * On an #MC intercept the MCE handler is not called automatically in
1831 * the host. So do it by hand here.
1835 /* not sure if we ever come back to this point */
1840 static int mc_interception(struct vcpu_svm *svm)
1845 static int shutdown_interception(struct vcpu_svm *svm)
1847 struct kvm_run *kvm_run = svm->vcpu.run;
1850 * VMCB is undefined after a SHUTDOWN intercept
1851 * so reinitialize it.
1853 clear_page(svm->vmcb);
1856 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1860 static int io_interception(struct vcpu_svm *svm)
1862 struct kvm_vcpu *vcpu = &svm->vcpu;
1863 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1864 int size, in, string;
1867 ++svm->vcpu.stat.io_exits;
1868 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1869 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1871 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
1873 port = io_info >> 16;
1874 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1875 svm->next_rip = svm->vmcb->control.exit_info_2;
1876 skip_emulated_instruction(&svm->vcpu);
1878 return kvm_fast_pio_out(vcpu, size, port);
1881 static int nmi_interception(struct vcpu_svm *svm)
1886 static int intr_interception(struct vcpu_svm *svm)
1888 ++svm->vcpu.stat.irq_exits;
1892 static int nop_on_interception(struct vcpu_svm *svm)
1897 static int halt_interception(struct vcpu_svm *svm)
1899 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1900 return kvm_emulate_halt(&svm->vcpu);
1903 static int vmmcall_interception(struct vcpu_svm *svm)
1905 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1906 kvm_emulate_hypercall(&svm->vcpu);
1910 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
1912 struct vcpu_svm *svm = to_svm(vcpu);
1914 return svm->nested.nested_cr3;
1917 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
1919 struct vcpu_svm *svm = to_svm(vcpu);
1920 u64 cr3 = svm->nested.nested_cr3;
1924 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
1925 offset_in_page(cr3) + index * 8, 8);
1931 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
1934 struct vcpu_svm *svm = to_svm(vcpu);
1936 svm->vmcb->control.nested_cr3 = root;
1937 mark_dirty(svm->vmcb, VMCB_NPT);
1938 svm_flush_tlb(vcpu);
1941 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
1942 struct x86_exception *fault)
1944 struct vcpu_svm *svm = to_svm(vcpu);
1946 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
1948 * TODO: track the cause of the nested page fault, and
1949 * correctly fill in the high bits of exit_info_1.
1951 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
1952 svm->vmcb->control.exit_code_hi = 0;
1953 svm->vmcb->control.exit_info_1 = (1ULL << 32);
1954 svm->vmcb->control.exit_info_2 = fault->address;
1957 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
1958 svm->vmcb->control.exit_info_1 |= fault->error_code;
1961 * The present bit is always zero for page structure faults on real
1964 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
1965 svm->vmcb->control.exit_info_1 &= ~1;
1967 nested_svm_vmexit(svm);
1970 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
1972 WARN_ON(mmu_is_nested(vcpu));
1973 kvm_init_shadow_mmu(vcpu);
1974 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
1975 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
1976 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
1977 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
1978 vcpu->arch.mmu.shadow_root_level = get_npt_level();
1979 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
1980 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
1983 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
1985 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
1988 static int nested_svm_check_permissions(struct vcpu_svm *svm)
1990 if (!(svm->vcpu.arch.efer & EFER_SVME)
1991 || !is_paging(&svm->vcpu)) {
1992 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1996 if (svm->vmcb->save.cpl) {
1997 kvm_inject_gp(&svm->vcpu, 0);
2004 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2005 bool has_error_code, u32 error_code)
2009 if (!is_guest_mode(&svm->vcpu))
2012 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2013 svm->vmcb->control.exit_code_hi = 0;
2014 svm->vmcb->control.exit_info_1 = error_code;
2015 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
2017 vmexit = nested_svm_intercept(svm);
2018 if (vmexit == NESTED_EXIT_DONE)
2019 svm->nested.exit_required = true;
2024 /* This function returns true if it is save to enable the irq window */
2025 static inline bool nested_svm_intr(struct vcpu_svm *svm)
2027 if (!is_guest_mode(&svm->vcpu))
2030 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2033 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
2037 * if vmexit was already requested (by intercepted exception
2038 * for instance) do not overwrite it with "external interrupt"
2041 if (svm->nested.exit_required)
2044 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2045 svm->vmcb->control.exit_info_1 = 0;
2046 svm->vmcb->control.exit_info_2 = 0;
2048 if (svm->nested.intercept & 1ULL) {
2050 * The #vmexit can't be emulated here directly because this
2051 * code path runs with irqs and preemption disabled. A
2052 * #vmexit emulation might sleep. Only signal request for
2055 svm->nested.exit_required = true;
2056 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
2063 /* This function returns true if it is save to enable the nmi window */
2064 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2066 if (!is_guest_mode(&svm->vcpu))
2069 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2072 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2073 svm->nested.exit_required = true;
2078 static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
2084 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
2085 if (is_error_page(page))
2093 kvm_inject_gp(&svm->vcpu, 0);
2098 static void nested_svm_unmap(struct page *page)
2101 kvm_release_page_dirty(page);
2104 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
2106 unsigned port, size, iopm_len;
2111 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2112 return NESTED_EXIT_HOST;
2114 port = svm->vmcb->control.exit_info_1 >> 16;
2115 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2116 SVM_IOIO_SIZE_SHIFT;
2117 gpa = svm->nested.vmcb_iopm + (port / 8);
2118 start_bit = port % 8;
2119 iopm_len = (start_bit + size > 8) ? 2 : 1;
2120 mask = (0xf >> (4 - size)) << start_bit;
2123 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
2124 return NESTED_EXIT_DONE;
2126 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2129 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
2131 u32 offset, msr, value;
2134 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2135 return NESTED_EXIT_HOST;
2137 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2138 offset = svm_msrpm_offset(msr);
2139 write = svm->vmcb->control.exit_info_1 & 1;
2140 mask = 1 << ((2 * (msr & 0xf)) + write);
2142 if (offset == MSR_INVALID)
2143 return NESTED_EXIT_DONE;
2145 /* Offset is in 32 bit units but need in 8 bit units */
2148 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
2149 return NESTED_EXIT_DONE;
2151 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
2154 static int nested_svm_exit_special(struct vcpu_svm *svm)
2156 u32 exit_code = svm->vmcb->control.exit_code;
2158 switch (exit_code) {
2161 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
2162 return NESTED_EXIT_HOST;
2164 /* For now we are always handling NPFs when using them */
2166 return NESTED_EXIT_HOST;
2168 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
2169 /* When we're shadowing, trap PFs, but not async PF */
2170 if (!npt_enabled && svm->apf_reason == 0)
2171 return NESTED_EXIT_HOST;
2173 case SVM_EXIT_EXCP_BASE + NM_VECTOR:
2174 nm_interception(svm);
2180 return NESTED_EXIT_CONTINUE;
2184 * If this function returns true, this #vmexit was already handled
2186 static int nested_svm_intercept(struct vcpu_svm *svm)
2188 u32 exit_code = svm->vmcb->control.exit_code;
2189 int vmexit = NESTED_EXIT_HOST;
2191 switch (exit_code) {
2193 vmexit = nested_svm_exit_handled_msr(svm);
2196 vmexit = nested_svm_intercept_ioio(svm);
2198 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2199 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2200 if (svm->nested.intercept_cr & bit)
2201 vmexit = NESTED_EXIT_DONE;
2204 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2205 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2206 if (svm->nested.intercept_dr & bit)
2207 vmexit = NESTED_EXIT_DONE;
2210 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2211 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
2212 if (svm->nested.intercept_exceptions & excp_bits)
2213 vmexit = NESTED_EXIT_DONE;
2214 /* async page fault always cause vmexit */
2215 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2216 svm->apf_reason != 0)
2217 vmexit = NESTED_EXIT_DONE;
2220 case SVM_EXIT_ERR: {
2221 vmexit = NESTED_EXIT_DONE;
2225 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
2226 if (svm->nested.intercept & exit_bits)
2227 vmexit = NESTED_EXIT_DONE;
2234 static int nested_svm_exit_handled(struct vcpu_svm *svm)
2238 vmexit = nested_svm_intercept(svm);
2240 if (vmexit == NESTED_EXIT_DONE)
2241 nested_svm_vmexit(svm);
2246 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2248 struct vmcb_control_area *dst = &dst_vmcb->control;
2249 struct vmcb_control_area *from = &from_vmcb->control;
2251 dst->intercept_cr = from->intercept_cr;
2252 dst->intercept_dr = from->intercept_dr;
2253 dst->intercept_exceptions = from->intercept_exceptions;
2254 dst->intercept = from->intercept;
2255 dst->iopm_base_pa = from->iopm_base_pa;
2256 dst->msrpm_base_pa = from->msrpm_base_pa;
2257 dst->tsc_offset = from->tsc_offset;
2258 dst->asid = from->asid;
2259 dst->tlb_ctl = from->tlb_ctl;
2260 dst->int_ctl = from->int_ctl;
2261 dst->int_vector = from->int_vector;
2262 dst->int_state = from->int_state;
2263 dst->exit_code = from->exit_code;
2264 dst->exit_code_hi = from->exit_code_hi;
2265 dst->exit_info_1 = from->exit_info_1;
2266 dst->exit_info_2 = from->exit_info_2;
2267 dst->exit_int_info = from->exit_int_info;
2268 dst->exit_int_info_err = from->exit_int_info_err;
2269 dst->nested_ctl = from->nested_ctl;
2270 dst->event_inj = from->event_inj;
2271 dst->event_inj_err = from->event_inj_err;
2272 dst->nested_cr3 = from->nested_cr3;
2273 dst->lbr_ctl = from->lbr_ctl;
2276 static int nested_svm_vmexit(struct vcpu_svm *svm)
2278 struct vmcb *nested_vmcb;
2279 struct vmcb *hsave = svm->nested.hsave;
2280 struct vmcb *vmcb = svm->vmcb;
2283 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
2284 vmcb->control.exit_info_1,
2285 vmcb->control.exit_info_2,
2286 vmcb->control.exit_int_info,
2287 vmcb->control.exit_int_info_err,
2290 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
2294 /* Exit Guest-Mode */
2295 leave_guest_mode(&svm->vcpu);
2296 svm->nested.vmcb = 0;
2298 /* Give the current vmcb to the guest */
2301 nested_vmcb->save.es = vmcb->save.es;
2302 nested_vmcb->save.cs = vmcb->save.cs;
2303 nested_vmcb->save.ss = vmcb->save.ss;
2304 nested_vmcb->save.ds = vmcb->save.ds;
2305 nested_vmcb->save.gdtr = vmcb->save.gdtr;
2306 nested_vmcb->save.idtr = vmcb->save.idtr;
2307 nested_vmcb->save.efer = svm->vcpu.arch.efer;
2308 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
2309 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
2310 nested_vmcb->save.cr2 = vmcb->save.cr2;
2311 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
2312 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2313 nested_vmcb->save.rip = vmcb->save.rip;
2314 nested_vmcb->save.rsp = vmcb->save.rsp;
2315 nested_vmcb->save.rax = vmcb->save.rax;
2316 nested_vmcb->save.dr7 = vmcb->save.dr7;
2317 nested_vmcb->save.dr6 = vmcb->save.dr6;
2318 nested_vmcb->save.cpl = vmcb->save.cpl;
2320 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
2321 nested_vmcb->control.int_vector = vmcb->control.int_vector;
2322 nested_vmcb->control.int_state = vmcb->control.int_state;
2323 nested_vmcb->control.exit_code = vmcb->control.exit_code;
2324 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
2325 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
2326 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
2327 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
2328 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
2330 if (svm->nrips_enabled)
2331 nested_vmcb->control.next_rip = vmcb->control.next_rip;
2334 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
2335 * to make sure that we do not lose injected events. So check event_inj
2336 * here and copy it to exit_int_info if it is valid.
2337 * Exit_int_info and event_inj can't be both valid because the case
2338 * below only happens on a VMRUN instruction intercept which has
2339 * no valid exit_int_info set.
2341 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
2342 struct vmcb_control_area *nc = &nested_vmcb->control;
2344 nc->exit_int_info = vmcb->control.event_inj;
2345 nc->exit_int_info_err = vmcb->control.event_inj_err;
2348 nested_vmcb->control.tlb_ctl = 0;
2349 nested_vmcb->control.event_inj = 0;
2350 nested_vmcb->control.event_inj_err = 0;
2352 /* We always set V_INTR_MASKING and remember the old value in hflags */
2353 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
2354 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
2356 /* Restore the original control entries */
2357 copy_vmcb_control_area(vmcb, hsave);
2359 kvm_clear_exception_queue(&svm->vcpu);
2360 kvm_clear_interrupt_queue(&svm->vcpu);
2362 svm->nested.nested_cr3 = 0;
2364 /* Restore selected save entries */
2365 svm->vmcb->save.es = hsave->save.es;
2366 svm->vmcb->save.cs = hsave->save.cs;
2367 svm->vmcb->save.ss = hsave->save.ss;
2368 svm->vmcb->save.ds = hsave->save.ds;
2369 svm->vmcb->save.gdtr = hsave->save.gdtr;
2370 svm->vmcb->save.idtr = hsave->save.idtr;
2371 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2372 svm_set_efer(&svm->vcpu, hsave->save.efer);
2373 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2374 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
2376 svm->vmcb->save.cr3 = hsave->save.cr3;
2377 svm->vcpu.arch.cr3 = hsave->save.cr3;
2379 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
2381 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
2382 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
2383 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
2384 svm->vmcb->save.dr7 = 0;
2385 svm->vmcb->save.cpl = 0;
2386 svm->vmcb->control.exit_int_info = 0;
2388 mark_all_dirty(svm->vmcb);
2390 nested_svm_unmap(page);
2392 nested_svm_uninit_mmu_context(&svm->vcpu);
2393 kvm_mmu_reset_context(&svm->vcpu);
2394 kvm_mmu_load(&svm->vcpu);
2399 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
2402 * This function merges the msr permission bitmaps of kvm and the
2403 * nested vmcb. It is optimized in that it only merges the parts where
2404 * the kvm msr permission bitmap may contain zero bits
2408 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
2411 for (i = 0; i < MSRPM_OFFSETS; i++) {
2415 if (msrpm_offsets[i] == 0xffffffff)
2418 p = msrpm_offsets[i];
2419 offset = svm->nested.vmcb_msrpm + (p * 4);
2421 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
2424 svm->nested.msrpm[p] = svm->msrpm[p] | value;
2427 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
2432 static bool nested_vmcb_checks(struct vmcb *vmcb)
2434 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
2437 if (vmcb->control.asid == 0)
2440 if (vmcb->control.nested_ctl && !npt_enabled)
2446 static bool nested_svm_vmrun(struct vcpu_svm *svm)
2448 struct vmcb *nested_vmcb;
2449 struct vmcb *hsave = svm->nested.hsave;
2450 struct vmcb *vmcb = svm->vmcb;
2454 vmcb_gpa = svm->vmcb->save.rax;
2456 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2460 if (!nested_vmcb_checks(nested_vmcb)) {
2461 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
2462 nested_vmcb->control.exit_code_hi = 0;
2463 nested_vmcb->control.exit_info_1 = 0;
2464 nested_vmcb->control.exit_info_2 = 0;
2466 nested_svm_unmap(page);
2471 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
2472 nested_vmcb->save.rip,
2473 nested_vmcb->control.int_ctl,
2474 nested_vmcb->control.event_inj,
2475 nested_vmcb->control.nested_ctl);
2477 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
2478 nested_vmcb->control.intercept_cr >> 16,
2479 nested_vmcb->control.intercept_exceptions,
2480 nested_vmcb->control.intercept);
2482 /* Clear internal status */
2483 kvm_clear_exception_queue(&svm->vcpu);
2484 kvm_clear_interrupt_queue(&svm->vcpu);
2487 * Save the old vmcb, so we don't need to pick what we save, but can
2488 * restore everything when a VMEXIT occurs
2490 hsave->save.es = vmcb->save.es;
2491 hsave->save.cs = vmcb->save.cs;
2492 hsave->save.ss = vmcb->save.ss;
2493 hsave->save.ds = vmcb->save.ds;
2494 hsave->save.gdtr = vmcb->save.gdtr;
2495 hsave->save.idtr = vmcb->save.idtr;
2496 hsave->save.efer = svm->vcpu.arch.efer;
2497 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
2498 hsave->save.cr4 = svm->vcpu.arch.cr4;
2499 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2500 hsave->save.rip = kvm_rip_read(&svm->vcpu);
2501 hsave->save.rsp = vmcb->save.rsp;
2502 hsave->save.rax = vmcb->save.rax;
2504 hsave->save.cr3 = vmcb->save.cr3;
2506 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
2508 copy_vmcb_control_area(hsave, vmcb);
2510 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
2511 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2513 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
2515 if (nested_vmcb->control.nested_ctl) {
2516 kvm_mmu_unload(&svm->vcpu);
2517 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
2518 nested_svm_init_mmu_context(&svm->vcpu);
2521 /* Load the nested guest state */
2522 svm->vmcb->save.es = nested_vmcb->save.es;
2523 svm->vmcb->save.cs = nested_vmcb->save.cs;
2524 svm->vmcb->save.ss = nested_vmcb->save.ss;
2525 svm->vmcb->save.ds = nested_vmcb->save.ds;
2526 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2527 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2528 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
2529 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2530 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2531 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
2533 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
2534 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
2536 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
2538 /* Guest paging mode is active - reset mmu */
2539 kvm_mmu_reset_context(&svm->vcpu);
2541 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
2542 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
2543 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
2544 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
2546 /* In case we don't even reach vcpu_run, the fields are not updated */
2547 svm->vmcb->save.rax = nested_vmcb->save.rax;
2548 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
2549 svm->vmcb->save.rip = nested_vmcb->save.rip;
2550 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
2551 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
2552 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
2554 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
2555 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
2557 /* cache intercepts */
2558 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
2559 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
2560 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
2561 svm->nested.intercept = nested_vmcb->control.intercept;
2563 svm_flush_tlb(&svm->vcpu);
2564 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
2565 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
2566 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
2568 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
2570 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
2571 /* We only want the cr8 intercept bits of the guest */
2572 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
2573 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
2576 /* We don't want to see VMMCALLs from a nested guest */
2577 clr_intercept(svm, INTERCEPT_VMMCALL);
2579 svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
2580 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
2581 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
2582 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
2583 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
2584 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
2586 nested_svm_unmap(page);
2588 /* Enter Guest-Mode */
2589 enter_guest_mode(&svm->vcpu);
2592 * Merge guest and host intercepts - must be called with vcpu in
2593 * guest-mode to take affect here
2595 recalc_intercepts(svm);
2597 svm->nested.vmcb = vmcb_gpa;
2601 mark_all_dirty(svm->vmcb);
2606 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
2608 to_vmcb->save.fs = from_vmcb->save.fs;
2609 to_vmcb->save.gs = from_vmcb->save.gs;
2610 to_vmcb->save.tr = from_vmcb->save.tr;
2611 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
2612 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
2613 to_vmcb->save.star = from_vmcb->save.star;
2614 to_vmcb->save.lstar = from_vmcb->save.lstar;
2615 to_vmcb->save.cstar = from_vmcb->save.cstar;
2616 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
2617 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
2618 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
2619 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
2622 static int vmload_interception(struct vcpu_svm *svm)
2624 struct vmcb *nested_vmcb;
2627 if (nested_svm_check_permissions(svm))
2630 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2634 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2635 skip_emulated_instruction(&svm->vcpu);
2637 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2638 nested_svm_unmap(page);
2643 static int vmsave_interception(struct vcpu_svm *svm)
2645 struct vmcb *nested_vmcb;
2648 if (nested_svm_check_permissions(svm))
2651 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
2655 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2656 skip_emulated_instruction(&svm->vcpu);
2658 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2659 nested_svm_unmap(page);
2664 static int vmrun_interception(struct vcpu_svm *svm)
2666 if (nested_svm_check_permissions(svm))
2669 /* Save rip after vmrun instruction */
2670 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
2672 if (!nested_svm_vmrun(svm))
2675 if (!nested_svm_vmrun_msrpm(svm))
2682 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
2683 svm->vmcb->control.exit_code_hi = 0;
2684 svm->vmcb->control.exit_info_1 = 0;
2685 svm->vmcb->control.exit_info_2 = 0;
2687 nested_svm_vmexit(svm);
2692 static int stgi_interception(struct vcpu_svm *svm)
2694 if (nested_svm_check_permissions(svm))
2697 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2698 skip_emulated_instruction(&svm->vcpu);
2699 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2706 static int clgi_interception(struct vcpu_svm *svm)
2708 if (nested_svm_check_permissions(svm))
2711 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2712 skip_emulated_instruction(&svm->vcpu);
2716 /* After a CLGI no interrupts should come */
2717 svm_clear_vintr(svm);
2718 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2720 mark_dirty(svm->vmcb, VMCB_INTR);
2725 static int invlpga_interception(struct vcpu_svm *svm)
2727 struct kvm_vcpu *vcpu = &svm->vcpu;
2729 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
2730 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2732 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2733 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2735 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2736 skip_emulated_instruction(&svm->vcpu);
2740 static int skinit_interception(struct vcpu_svm *svm)
2742 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
2744 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2748 static int wbinvd_interception(struct vcpu_svm *svm)
2750 kvm_emulate_wbinvd(&svm->vcpu);
2754 static int xsetbv_interception(struct vcpu_svm *svm)
2756 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
2757 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
2759 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
2760 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
2761 skip_emulated_instruction(&svm->vcpu);
2767 static int task_switch_interception(struct vcpu_svm *svm)
2771 int int_type = svm->vmcb->control.exit_int_info &
2772 SVM_EXITINTINFO_TYPE_MASK;
2773 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2775 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2777 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2778 bool has_error_code = false;
2781 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2783 if (svm->vmcb->control.exit_info_2 &
2784 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2785 reason = TASK_SWITCH_IRET;
2786 else if (svm->vmcb->control.exit_info_2 &
2787 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2788 reason = TASK_SWITCH_JMP;
2790 reason = TASK_SWITCH_GATE;
2792 reason = TASK_SWITCH_CALL;
2794 if (reason == TASK_SWITCH_GATE) {
2796 case SVM_EXITINTINFO_TYPE_NMI:
2797 svm->vcpu.arch.nmi_injected = false;
2799 case SVM_EXITINTINFO_TYPE_EXEPT:
2800 if (svm->vmcb->control.exit_info_2 &
2801 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2802 has_error_code = true;
2804 (u32)svm->vmcb->control.exit_info_2;
2806 kvm_clear_exception_queue(&svm->vcpu);
2808 case SVM_EXITINTINFO_TYPE_INTR:
2809 kvm_clear_interrupt_queue(&svm->vcpu);
2816 if (reason != TASK_SWITCH_GATE ||
2817 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2818 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2819 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
2820 skip_emulated_instruction(&svm->vcpu);
2822 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2825 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
2826 has_error_code, error_code) == EMULATE_FAIL) {
2827 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2828 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2829 svm->vcpu.run->internal.ndata = 0;
2835 static int cpuid_interception(struct vcpu_svm *svm)
2837 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2838 kvm_emulate_cpuid(&svm->vcpu);
2842 static int iret_interception(struct vcpu_svm *svm)
2844 ++svm->vcpu.stat.nmi_window_exits;
2845 clr_intercept(svm, INTERCEPT_IRET);
2846 svm->vcpu.arch.hflags |= HF_IRET_MASK;
2847 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
2848 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2852 static int invlpg_interception(struct vcpu_svm *svm)
2854 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2855 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2857 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2858 skip_emulated_instruction(&svm->vcpu);
2862 static int emulate_on_interception(struct vcpu_svm *svm)
2864 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
2867 static int rdpmc_interception(struct vcpu_svm *svm)
2871 if (!static_cpu_has(X86_FEATURE_NRIPS))
2872 return emulate_on_interception(svm);
2874 err = kvm_rdpmc(&svm->vcpu);
2875 kvm_complete_insn_gp(&svm->vcpu, err);
2880 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
2883 unsigned long cr0 = svm->vcpu.arch.cr0;
2887 intercept = svm->nested.intercept;
2889 if (!is_guest_mode(&svm->vcpu) ||
2890 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
2893 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2894 val &= ~SVM_CR0_SELECTIVE_MASK;
2897 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2898 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2904 #define CR_VALID (1ULL << 63)
2906 static int cr_interception(struct vcpu_svm *svm)
2912 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2913 return emulate_on_interception(svm);
2915 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2916 return emulate_on_interception(svm);
2918 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2919 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2920 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2922 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2925 if (cr >= 16) { /* mov to cr */
2927 val = kvm_register_read(&svm->vcpu, reg);
2930 if (!check_selective_cr0_intercepted(svm, val))
2931 err = kvm_set_cr0(&svm->vcpu, val);
2937 err = kvm_set_cr3(&svm->vcpu, val);
2940 err = kvm_set_cr4(&svm->vcpu, val);
2943 err = kvm_set_cr8(&svm->vcpu, val);
2946 WARN(1, "unhandled write to CR%d", cr);
2947 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2950 } else { /* mov from cr */
2953 val = kvm_read_cr0(&svm->vcpu);
2956 val = svm->vcpu.arch.cr2;
2959 val = kvm_read_cr3(&svm->vcpu);
2962 val = kvm_read_cr4(&svm->vcpu);
2965 val = kvm_get_cr8(&svm->vcpu);
2968 WARN(1, "unhandled read from CR%d", cr);
2969 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2972 kvm_register_write(&svm->vcpu, reg, val);
2974 kvm_complete_insn_gp(&svm->vcpu, err);
2979 static int dr_interception(struct vcpu_svm *svm)
2984 if (svm->vcpu.guest_debug == 0) {
2986 * No more DR vmexits; force a reload of the debug registers
2987 * and reenter on this instruction. The next vmexit will
2988 * retrieve the full state of the debug registers.
2990 clr_dr_intercepts(svm);
2991 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2995 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2996 return emulate_on_interception(svm);
2998 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2999 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3001 if (dr >= 16) { /* mov to DRn */
3002 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3004 val = kvm_register_read(&svm->vcpu, reg);
3005 kvm_set_dr(&svm->vcpu, dr - 16, val);
3007 if (!kvm_require_dr(&svm->vcpu, dr))
3009 kvm_get_dr(&svm->vcpu, dr, &val);
3010 kvm_register_write(&svm->vcpu, reg, val);
3013 skip_emulated_instruction(&svm->vcpu);
3018 static int cr8_write_interception(struct vcpu_svm *svm)
3020 struct kvm_run *kvm_run = svm->vcpu.run;
3023 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3024 /* instruction emulation calls kvm_set_cr8() */
3025 r = cr_interception(svm);
3026 if (lapic_in_kernel(&svm->vcpu))
3028 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
3030 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3034 static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
3036 struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
3037 return vmcb->control.tsc_offset +
3038 kvm_scale_tsc(vcpu, host_tsc);
3041 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3043 struct vcpu_svm *svm = to_svm(vcpu);
3045 switch (msr_info->index) {
3046 case MSR_IA32_TSC: {
3047 msr_info->data = svm->vmcb->control.tsc_offset +
3048 kvm_scale_tsc(vcpu, rdtsc());
3053 msr_info->data = svm->vmcb->save.star;
3055 #ifdef CONFIG_X86_64
3057 msr_info->data = svm->vmcb->save.lstar;
3060 msr_info->data = svm->vmcb->save.cstar;
3062 case MSR_KERNEL_GS_BASE:
3063 msr_info->data = svm->vmcb->save.kernel_gs_base;
3065 case MSR_SYSCALL_MASK:
3066 msr_info->data = svm->vmcb->save.sfmask;
3069 case MSR_IA32_SYSENTER_CS:
3070 msr_info->data = svm->vmcb->save.sysenter_cs;
3072 case MSR_IA32_SYSENTER_EIP:
3073 msr_info->data = svm->sysenter_eip;
3075 case MSR_IA32_SYSENTER_ESP:
3076 msr_info->data = svm->sysenter_esp;
3079 * Nobody will change the following 5 values in the VMCB so we can
3080 * safely return them on rdmsr. They will always be 0 until LBRV is
3083 case MSR_IA32_DEBUGCTLMSR:
3084 msr_info->data = svm->vmcb->save.dbgctl;
3086 case MSR_IA32_LASTBRANCHFROMIP:
3087 msr_info->data = svm->vmcb->save.br_from;
3089 case MSR_IA32_LASTBRANCHTOIP:
3090 msr_info->data = svm->vmcb->save.br_to;
3092 case MSR_IA32_LASTINTFROMIP:
3093 msr_info->data = svm->vmcb->save.last_excp_from;
3095 case MSR_IA32_LASTINTTOIP:
3096 msr_info->data = svm->vmcb->save.last_excp_to;
3098 case MSR_VM_HSAVE_PA:
3099 msr_info->data = svm->nested.hsave_msr;
3102 msr_info->data = svm->nested.vm_cr_msr;
3104 case MSR_IA32_UCODE_REV:
3105 msr_info->data = 0x01000065;
3108 return kvm_get_msr_common(vcpu, msr_info);
3113 static int rdmsr_interception(struct vcpu_svm *svm)
3115 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3116 struct msr_data msr_info;
3118 msr_info.index = ecx;
3119 msr_info.host_initiated = false;
3120 if (svm_get_msr(&svm->vcpu, &msr_info)) {
3121 trace_kvm_msr_read_ex(ecx);
3122 kvm_inject_gp(&svm->vcpu, 0);
3124 trace_kvm_msr_read(ecx, msr_info.data);
3126 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3127 msr_info.data & 0xffffffff);
3128 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3129 msr_info.data >> 32);
3130 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3131 skip_emulated_instruction(&svm->vcpu);
3136 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3138 struct vcpu_svm *svm = to_svm(vcpu);
3139 int svm_dis, chg_mask;
3141 if (data & ~SVM_VM_CR_VALID_MASK)
3144 chg_mask = SVM_VM_CR_VALID_MASK;
3146 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3147 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3149 svm->nested.vm_cr_msr &= ~chg_mask;
3150 svm->nested.vm_cr_msr |= (data & chg_mask);
3152 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3154 /* check for svm_disable while efer.svme is set */
3155 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3161 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
3163 struct vcpu_svm *svm = to_svm(vcpu);
3165 u32 ecx = msr->index;
3166 u64 data = msr->data;
3169 kvm_write_tsc(vcpu, msr);
3172 svm->vmcb->save.star = data;
3174 #ifdef CONFIG_X86_64
3176 svm->vmcb->save.lstar = data;
3179 svm->vmcb->save.cstar = data;
3181 case MSR_KERNEL_GS_BASE:
3182 svm->vmcb->save.kernel_gs_base = data;
3184 case MSR_SYSCALL_MASK:
3185 svm->vmcb->save.sfmask = data;
3188 case MSR_IA32_SYSENTER_CS:
3189 svm->vmcb->save.sysenter_cs = data;
3191 case MSR_IA32_SYSENTER_EIP:
3192 svm->sysenter_eip = data;
3193 svm->vmcb->save.sysenter_eip = data;
3195 case MSR_IA32_SYSENTER_ESP:
3196 svm->sysenter_esp = data;
3197 svm->vmcb->save.sysenter_esp = data;
3199 case MSR_IA32_DEBUGCTLMSR:
3200 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
3201 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3205 if (data & DEBUGCTL_RESERVED_BITS)
3208 svm->vmcb->save.dbgctl = data;
3209 mark_dirty(svm->vmcb, VMCB_LBR);
3210 if (data & (1ULL<<0))
3211 svm_enable_lbrv(svm);
3213 svm_disable_lbrv(svm);
3215 case MSR_VM_HSAVE_PA:
3216 svm->nested.hsave_msr = data;
3219 return svm_set_vm_cr(vcpu, data);
3221 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3224 return kvm_set_msr_common(vcpu, msr);
3229 static int wrmsr_interception(struct vcpu_svm *svm)
3231 struct msr_data msr;
3232 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3233 u64 data = kvm_read_edx_eax(&svm->vcpu);
3237 msr.host_initiated = false;
3239 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3240 if (kvm_set_msr(&svm->vcpu, &msr)) {
3241 trace_kvm_msr_write_ex(ecx, data);
3242 kvm_inject_gp(&svm->vcpu, 0);
3244 trace_kvm_msr_write(ecx, data);
3245 skip_emulated_instruction(&svm->vcpu);
3250 static int msr_interception(struct vcpu_svm *svm)
3252 if (svm->vmcb->control.exit_info_1)
3253 return wrmsr_interception(svm);
3255 return rdmsr_interception(svm);
3258 static int interrupt_window_interception(struct vcpu_svm *svm)
3260 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3261 svm_clear_vintr(svm);
3262 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3263 mark_dirty(svm->vmcb, VMCB_INTR);
3264 ++svm->vcpu.stat.irq_window_exits;
3268 static int pause_interception(struct vcpu_svm *svm)
3270 kvm_vcpu_on_spin(&(svm->vcpu));
3274 static int nop_interception(struct vcpu_svm *svm)
3276 skip_emulated_instruction(&(svm->vcpu));
3280 static int monitor_interception(struct vcpu_svm *svm)
3282 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
3283 return nop_interception(svm);
3286 static int mwait_interception(struct vcpu_svm *svm)
3288 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
3289 return nop_interception(svm);
3292 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
3293 [SVM_EXIT_READ_CR0] = cr_interception,
3294 [SVM_EXIT_READ_CR3] = cr_interception,
3295 [SVM_EXIT_READ_CR4] = cr_interception,
3296 [SVM_EXIT_READ_CR8] = cr_interception,
3297 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
3298 [SVM_EXIT_WRITE_CR0] = cr_interception,
3299 [SVM_EXIT_WRITE_CR3] = cr_interception,
3300 [SVM_EXIT_WRITE_CR4] = cr_interception,
3301 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
3302 [SVM_EXIT_READ_DR0] = dr_interception,
3303 [SVM_EXIT_READ_DR1] = dr_interception,
3304 [SVM_EXIT_READ_DR2] = dr_interception,
3305 [SVM_EXIT_READ_DR3] = dr_interception,
3306 [SVM_EXIT_READ_DR4] = dr_interception,
3307 [SVM_EXIT_READ_DR5] = dr_interception,
3308 [SVM_EXIT_READ_DR6] = dr_interception,
3309 [SVM_EXIT_READ_DR7] = dr_interception,
3310 [SVM_EXIT_WRITE_DR0] = dr_interception,
3311 [SVM_EXIT_WRITE_DR1] = dr_interception,
3312 [SVM_EXIT_WRITE_DR2] = dr_interception,
3313 [SVM_EXIT_WRITE_DR3] = dr_interception,
3314 [SVM_EXIT_WRITE_DR4] = dr_interception,
3315 [SVM_EXIT_WRITE_DR5] = dr_interception,
3316 [SVM_EXIT_WRITE_DR6] = dr_interception,
3317 [SVM_EXIT_WRITE_DR7] = dr_interception,
3318 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
3319 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
3320 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
3321 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
3322 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
3323 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
3324 [SVM_EXIT_INTR] = intr_interception,
3325 [SVM_EXIT_NMI] = nmi_interception,
3326 [SVM_EXIT_SMI] = nop_on_interception,
3327 [SVM_EXIT_INIT] = nop_on_interception,
3328 [SVM_EXIT_VINTR] = interrupt_window_interception,
3329 [SVM_EXIT_RDPMC] = rdpmc_interception,
3330 [SVM_EXIT_CPUID] = cpuid_interception,
3331 [SVM_EXIT_IRET] = iret_interception,
3332 [SVM_EXIT_INVD] = emulate_on_interception,
3333 [SVM_EXIT_PAUSE] = pause_interception,
3334 [SVM_EXIT_HLT] = halt_interception,
3335 [SVM_EXIT_INVLPG] = invlpg_interception,
3336 [SVM_EXIT_INVLPGA] = invlpga_interception,
3337 [SVM_EXIT_IOIO] = io_interception,
3338 [SVM_EXIT_MSR] = msr_interception,
3339 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
3340 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3341 [SVM_EXIT_VMRUN] = vmrun_interception,
3342 [SVM_EXIT_VMMCALL] = vmmcall_interception,
3343 [SVM_EXIT_VMLOAD] = vmload_interception,
3344 [SVM_EXIT_VMSAVE] = vmsave_interception,
3345 [SVM_EXIT_STGI] = stgi_interception,
3346 [SVM_EXIT_CLGI] = clgi_interception,
3347 [SVM_EXIT_SKINIT] = skinit_interception,
3348 [SVM_EXIT_WBINVD] = wbinvd_interception,
3349 [SVM_EXIT_MONITOR] = monitor_interception,
3350 [SVM_EXIT_MWAIT] = mwait_interception,
3351 [SVM_EXIT_XSETBV] = xsetbv_interception,
3352 [SVM_EXIT_NPF] = pf_interception,
3353 [SVM_EXIT_RSM] = emulate_on_interception,
3356 static void dump_vmcb(struct kvm_vcpu *vcpu)
3358 struct vcpu_svm *svm = to_svm(vcpu);
3359 struct vmcb_control_area *control = &svm->vmcb->control;
3360 struct vmcb_save_area *save = &svm->vmcb->save;
3362 pr_err("VMCB Control Area:\n");
3363 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
3364 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
3365 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
3366 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
3367 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
3368 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
3369 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3370 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3371 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3372 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3373 pr_err("%-20s%d\n", "asid:", control->asid);
3374 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3375 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3376 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3377 pr_err("%-20s%08x\n", "int_state:", control->int_state);
3378 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3379 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3380 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3381 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3382 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3383 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3384 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3385 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3386 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3387 pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
3388 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3389 pr_err("VMCB State Save Area:\n");
3390 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3392 save->es.selector, save->es.attrib,
3393 save->es.limit, save->es.base);
3394 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3396 save->cs.selector, save->cs.attrib,
3397 save->cs.limit, save->cs.base);
3398 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3400 save->ss.selector, save->ss.attrib,
3401 save->ss.limit, save->ss.base);
3402 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3404 save->ds.selector, save->ds.attrib,
3405 save->ds.limit, save->ds.base);
3406 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3408 save->fs.selector, save->fs.attrib,
3409 save->fs.limit, save->fs.base);
3410 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3412 save->gs.selector, save->gs.attrib,
3413 save->gs.limit, save->gs.base);
3414 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3416 save->gdtr.selector, save->gdtr.attrib,
3417 save->gdtr.limit, save->gdtr.base);
3418 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3420 save->ldtr.selector, save->ldtr.attrib,
3421 save->ldtr.limit, save->ldtr.base);
3422 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3424 save->idtr.selector, save->idtr.attrib,
3425 save->idtr.limit, save->idtr.base);
3426 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3428 save->tr.selector, save->tr.attrib,
3429 save->tr.limit, save->tr.base);
3430 pr_err("cpl: %d efer: %016llx\n",
3431 save->cpl, save->efer);
3432 pr_err("%-15s %016llx %-13s %016llx\n",
3433 "cr0:", save->cr0, "cr2:", save->cr2);
3434 pr_err("%-15s %016llx %-13s %016llx\n",
3435 "cr3:", save->cr3, "cr4:", save->cr4);
3436 pr_err("%-15s %016llx %-13s %016llx\n",
3437 "dr6:", save->dr6, "dr7:", save->dr7);
3438 pr_err("%-15s %016llx %-13s %016llx\n",
3439 "rip:", save->rip, "rflags:", save->rflags);
3440 pr_err("%-15s %016llx %-13s %016llx\n",
3441 "rsp:", save->rsp, "rax:", save->rax);
3442 pr_err("%-15s %016llx %-13s %016llx\n",
3443 "star:", save->star, "lstar:", save->lstar);
3444 pr_err("%-15s %016llx %-13s %016llx\n",
3445 "cstar:", save->cstar, "sfmask:", save->sfmask);
3446 pr_err("%-15s %016llx %-13s %016llx\n",
3447 "kernel_gs_base:", save->kernel_gs_base,
3448 "sysenter_cs:", save->sysenter_cs);
3449 pr_err("%-15s %016llx %-13s %016llx\n",
3450 "sysenter_esp:", save->sysenter_esp,
3451 "sysenter_eip:", save->sysenter_eip);
3452 pr_err("%-15s %016llx %-13s %016llx\n",
3453 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3454 pr_err("%-15s %016llx %-13s %016llx\n",
3455 "br_from:", save->br_from, "br_to:", save->br_to);
3456 pr_err("%-15s %016llx %-13s %016llx\n",
3457 "excp_from:", save->last_excp_from,
3458 "excp_to:", save->last_excp_to);
3461 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
3463 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3465 *info1 = control->exit_info_1;
3466 *info2 = control->exit_info_2;
3469 static int handle_exit(struct kvm_vcpu *vcpu)
3471 struct vcpu_svm *svm = to_svm(vcpu);
3472 struct kvm_run *kvm_run = vcpu->run;
3473 u32 exit_code = svm->vmcb->control.exit_code;
3475 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3476 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3478 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3480 if (unlikely(svm->nested.exit_required)) {
3481 nested_svm_vmexit(svm);
3482 svm->nested.exit_required = false;
3487 if (is_guest_mode(vcpu)) {
3490 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
3491 svm->vmcb->control.exit_info_1,
3492 svm->vmcb->control.exit_info_2,
3493 svm->vmcb->control.exit_int_info,
3494 svm->vmcb->control.exit_int_info_err,
3497 vmexit = nested_svm_exit_special(svm);
3499 if (vmexit == NESTED_EXIT_CONTINUE)
3500 vmexit = nested_svm_exit_handled(svm);
3502 if (vmexit == NESTED_EXIT_DONE)
3506 svm_complete_interrupts(svm);
3508 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3509 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3510 kvm_run->fail_entry.hardware_entry_failure_reason
3511 = svm->vmcb->control.exit_code;
3512 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
3517 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
3518 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
3519 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
3520 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
3521 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
3523 __func__, svm->vmcb->control.exit_int_info,
3526 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
3527 || !svm_exit_handlers[exit_code]) {
3528 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
3529 kvm_queue_exception(vcpu, UD_VECTOR);
3533 return svm_exit_handlers[exit_code](svm);
3536 static void reload_tss(struct kvm_vcpu *vcpu)
3538 int cpu = raw_smp_processor_id();
3540 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3541 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
3545 static void pre_svm_run(struct vcpu_svm *svm)
3547 int cpu = raw_smp_processor_id();
3549 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
3551 /* FIXME: handle wraparound of asid_generation */
3552 if (svm->asid_generation != sd->asid_generation)
3556 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3558 struct vcpu_svm *svm = to_svm(vcpu);
3560 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3561 vcpu->arch.hflags |= HF_NMI_MASK;
3562 set_intercept(svm, INTERCEPT_IRET);
3563 ++vcpu->stat.nmi_injections;
3566 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
3568 struct vmcb_control_area *control;
3570 control = &svm->vmcb->control;
3571 control->int_vector = irq;
3572 control->int_ctl &= ~V_INTR_PRIO_MASK;
3573 control->int_ctl |= V_IRQ_MASK |
3574 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
3575 mark_dirty(svm->vmcb, VMCB_INTR);
3578 static void svm_set_irq(struct kvm_vcpu *vcpu)
3580 struct vcpu_svm *svm = to_svm(vcpu);
3582 BUG_ON(!(gif_set(svm)));
3584 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
3585 ++vcpu->stat.irq_injections;
3587 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3588 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
3591 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3593 struct vcpu_svm *svm = to_svm(vcpu);
3595 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3598 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3604 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3607 static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3612 static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu)
3617 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu)
3622 static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
3627 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3629 struct vcpu_svm *svm = to_svm(vcpu);
3630 struct vmcb *vmcb = svm->vmcb;
3632 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
3633 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
3634 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
3639 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3641 struct vcpu_svm *svm = to_svm(vcpu);
3643 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
3646 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3648 struct vcpu_svm *svm = to_svm(vcpu);
3651 svm->vcpu.arch.hflags |= HF_NMI_MASK;
3652 set_intercept(svm, INTERCEPT_IRET);
3654 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
3655 clr_intercept(svm, INTERCEPT_IRET);
3659 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3661 struct vcpu_svm *svm = to_svm(vcpu);
3662 struct vmcb *vmcb = svm->vmcb;
3665 if (!gif_set(svm) ||
3666 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3669 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3671 if (is_guest_mode(vcpu))
3672 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
3677 static void enable_irq_window(struct kvm_vcpu *vcpu)
3679 struct vcpu_svm *svm = to_svm(vcpu);
3682 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3683 * 1, because that's a separate STGI/VMRUN intercept. The next time we
3684 * get that intercept, this function will be called again though and
3685 * we'll get the vintr intercept.
3687 if (gif_set(svm) && nested_svm_intr(svm)) {
3689 svm_inject_irq(svm, 0x0);
3693 static void enable_nmi_window(struct kvm_vcpu *vcpu)
3695 struct vcpu_svm *svm = to_svm(vcpu);
3697 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
3699 return; /* IRET will cause a vm exit */
3702 * Something prevents NMI from been injected. Single step over possible
3703 * problem (IRET or exception injection or interrupt shadow)
3705 svm->nmi_singlestep = true;
3706 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3707 update_db_bp_intercept(vcpu);
3710 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
3715 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
3717 struct vcpu_svm *svm = to_svm(vcpu);
3719 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3720 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3722 svm->asid_generation--;
3725 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
3729 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3731 struct vcpu_svm *svm = to_svm(vcpu);
3733 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3736 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
3737 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3738 kvm_set_cr8(vcpu, cr8);
3742 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3744 struct vcpu_svm *svm = to_svm(vcpu);
3747 if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
3750 cr8 = kvm_get_cr8(vcpu);
3751 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3752 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3755 static void svm_complete_interrupts(struct vcpu_svm *svm)
3759 u32 exitintinfo = svm->vmcb->control.exit_int_info;
3760 unsigned int3_injected = svm->int3_injected;
3762 svm->int3_injected = 0;
3765 * If we've made progress since setting HF_IRET_MASK, we've
3766 * executed an IRET and can allow NMI injection.
3768 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
3769 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
3770 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3771 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3774 svm->vcpu.arch.nmi_injected = false;
3775 kvm_clear_exception_queue(&svm->vcpu);
3776 kvm_clear_interrupt_queue(&svm->vcpu);
3778 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3781 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3783 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3784 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3787 case SVM_EXITINTINFO_TYPE_NMI:
3788 svm->vcpu.arch.nmi_injected = true;
3790 case SVM_EXITINTINFO_TYPE_EXEPT:
3792 * In case of software exceptions, do not reinject the vector,
3793 * but re-execute the instruction instead. Rewind RIP first
3794 * if we emulated INT3 before.
3796 if (kvm_exception_is_soft(vector)) {
3797 if (vector == BP_VECTOR && int3_injected &&
3798 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
3799 kvm_rip_write(&svm->vcpu,
3800 kvm_rip_read(&svm->vcpu) -
3804 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3805 u32 err = svm->vmcb->control.exit_int_info_err;
3806 kvm_requeue_exception_e(&svm->vcpu, vector, err);
3809 kvm_requeue_exception(&svm->vcpu, vector);
3811 case SVM_EXITINTINFO_TYPE_INTR:
3812 kvm_queue_interrupt(&svm->vcpu, vector, false);
3819 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3821 struct vcpu_svm *svm = to_svm(vcpu);
3822 struct vmcb_control_area *control = &svm->vmcb->control;
3824 control->exit_int_info = control->event_inj;
3825 control->exit_int_info_err = control->event_inj_err;
3826 control->event_inj = 0;
3827 svm_complete_interrupts(svm);
3830 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3832 struct vcpu_svm *svm = to_svm(vcpu);
3834 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3835 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3836 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3839 * A vmexit emulation is required before the vcpu can be executed
3842 if (unlikely(svm->nested.exit_required))
3847 sync_lapic_to_cr8(vcpu);
3849 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3856 "push %%" _ASM_BP "; \n\t"
3857 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
3858 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
3859 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
3860 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
3861 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
3862 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
3863 #ifdef CONFIG_X86_64
3864 "mov %c[r8](%[svm]), %%r8 \n\t"
3865 "mov %c[r9](%[svm]), %%r9 \n\t"
3866 "mov %c[r10](%[svm]), %%r10 \n\t"
3867 "mov %c[r11](%[svm]), %%r11 \n\t"
3868 "mov %c[r12](%[svm]), %%r12 \n\t"
3869 "mov %c[r13](%[svm]), %%r13 \n\t"
3870 "mov %c[r14](%[svm]), %%r14 \n\t"
3871 "mov %c[r15](%[svm]), %%r15 \n\t"
3874 /* Enter guest mode */
3875 "push %%" _ASM_AX " \n\t"
3876 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
3877 __ex(SVM_VMLOAD) "\n\t"
3878 __ex(SVM_VMRUN) "\n\t"
3879 __ex(SVM_VMSAVE) "\n\t"
3880 "pop %%" _ASM_AX " \n\t"
3882 /* Save guest registers, load host registers */
3883 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
3884 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
3885 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
3886 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
3887 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
3888 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
3889 #ifdef CONFIG_X86_64
3890 "mov %%r8, %c[r8](%[svm]) \n\t"
3891 "mov %%r9, %c[r9](%[svm]) \n\t"
3892 "mov %%r10, %c[r10](%[svm]) \n\t"
3893 "mov %%r11, %c[r11](%[svm]) \n\t"
3894 "mov %%r12, %c[r12](%[svm]) \n\t"
3895 "mov %%r13, %c[r13](%[svm]) \n\t"
3896 "mov %%r14, %c[r14](%[svm]) \n\t"
3897 "mov %%r15, %c[r15](%[svm]) \n\t"
3902 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
3903 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
3904 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
3905 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
3906 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
3907 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
3908 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
3909 #ifdef CONFIG_X86_64
3910 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
3911 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
3912 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
3913 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
3914 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
3915 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
3916 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
3917 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
3920 #ifdef CONFIG_X86_64
3921 , "rbx", "rcx", "rdx", "rsi", "rdi"
3922 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
3924 , "ebx", "ecx", "edx", "esi", "edi"
3928 #ifdef CONFIG_X86_64
3929 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
3931 loadsegment(fs, svm->host.fs);
3932 #ifndef CONFIG_X86_32_LAZY_GS
3933 loadsegment(gs, svm->host.gs);
3939 local_irq_disable();
3941 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3942 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3943 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3944 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3946 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3948 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3949 kvm_before_handle_nmi(&svm->vcpu);
3953 /* Any pending NMI will happen here */
3955 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3956 kvm_after_handle_nmi(&svm->vcpu);
3958 sync_cr8_to_lapic(vcpu);
3962 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3964 /* if exit due to PF check for async PF */
3965 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3966 svm->apf_reason = kvm_read_and_reset_pf_reason();
3969 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
3970 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
3974 * We need to handle MC intercepts here before the vcpu has a chance to
3975 * change the physical cpu
3977 if (unlikely(svm->vmcb->control.exit_code ==
3978 SVM_EXIT_EXCP_BASE + MC_VECTOR))
3979 svm_handle_mce(svm);
3981 mark_all_clean(svm->vmcb);
3984 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3986 struct vcpu_svm *svm = to_svm(vcpu);
3988 svm->vmcb->save.cr3 = root;
3989 mark_dirty(svm->vmcb, VMCB_CR);
3990 svm_flush_tlb(vcpu);
3993 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
3995 struct vcpu_svm *svm = to_svm(vcpu);
3997 svm->vmcb->control.nested_cr3 = root;
3998 mark_dirty(svm->vmcb, VMCB_NPT);
4000 /* Also sync guest cr3 here in case we live migrate */
4001 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
4002 mark_dirty(svm->vmcb, VMCB_CR);
4004 svm_flush_tlb(vcpu);
4007 static int is_disabled(void)
4011 rdmsrl(MSR_VM_CR, vm_cr);
4012 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
4019 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4022 * Patch in the VMMCALL instruction:
4024 hypercall[0] = 0x0f;
4025 hypercall[1] = 0x01;
4026 hypercall[2] = 0xd9;
4029 static void svm_check_processor_compat(void *rtn)
4034 static bool svm_cpu_has_accelerated_tpr(void)
4039 static bool svm_has_high_real_mode_segbase(void)
4044 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
4049 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
4051 struct vcpu_svm *svm = to_svm(vcpu);
4053 /* Update nrips enabled cache */
4054 svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
4057 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
4062 entry->ecx |= (1 << 2); /* Set SVM bit */
4065 entry->eax = 1; /* SVM revision 1 */
4066 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
4067 ASID emulation to nested SVM */
4068 entry->ecx = 0; /* Reserved */
4069 entry->edx = 0; /* Per default do not support any
4070 additional features */
4072 /* Support next_rip if host supports it */
4073 if (boot_cpu_has(X86_FEATURE_NRIPS))
4074 entry->edx |= SVM_FEATURE_NRIP;
4076 /* Support NPT for the guest if enabled */
4078 entry->edx |= SVM_FEATURE_NPT;
4084 static int svm_get_lpage_level(void)
4086 return PT_PDPE_LEVEL;
4089 static bool svm_rdtscp_supported(void)
4094 static bool svm_invpcid_supported(void)
4099 static bool svm_mpx_supported(void)
4104 static bool svm_xsaves_supported(void)
4109 static bool svm_has_wbinvd_exit(void)
4114 static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
4116 struct vcpu_svm *svm = to_svm(vcpu);
4118 set_exception_intercept(svm, NM_VECTOR);
4119 update_cr0_intercept(svm);
4122 #define PRE_EX(exit) { .exit_code = (exit), \
4123 .stage = X86_ICPT_PRE_EXCEPT, }
4124 #define POST_EX(exit) { .exit_code = (exit), \
4125 .stage = X86_ICPT_POST_EXCEPT, }
4126 #define POST_MEM(exit) { .exit_code = (exit), \
4127 .stage = X86_ICPT_POST_MEMACCESS, }
4129 static const struct __x86_intercept {
4131 enum x86_intercept_stage stage;
4132 } x86_intercept_map[] = {
4133 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
4134 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
4135 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
4136 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
4137 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
4138 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
4139 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
4140 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
4141 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
4142 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
4143 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
4144 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
4145 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
4146 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
4147 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
4148 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
4149 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
4150 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
4151 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
4152 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
4153 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
4154 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
4155 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
4156 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
4157 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
4158 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
4159 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
4160 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
4161 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
4162 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
4163 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
4164 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
4165 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
4166 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
4167 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
4168 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
4169 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
4170 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
4171 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
4172 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
4173 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
4174 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
4175 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
4176 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
4177 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
4178 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
4185 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4186 struct x86_instruction_info *info,
4187 enum x86_intercept_stage stage)
4189 struct vcpu_svm *svm = to_svm(vcpu);
4190 int vmexit, ret = X86EMUL_CONTINUE;
4191 struct __x86_intercept icpt_info;
4192 struct vmcb *vmcb = svm->vmcb;
4194 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4197 icpt_info = x86_intercept_map[info->intercept];
4199 if (stage != icpt_info.stage)
4202 switch (icpt_info.exit_code) {
4203 case SVM_EXIT_READ_CR0:
4204 if (info->intercept == x86_intercept_cr_read)
4205 icpt_info.exit_code += info->modrm_reg;
4207 case SVM_EXIT_WRITE_CR0: {
4208 unsigned long cr0, val;
4211 if (info->intercept == x86_intercept_cr_write)
4212 icpt_info.exit_code += info->modrm_reg;
4214 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4215 info->intercept == x86_intercept_clts)
4218 intercept = svm->nested.intercept;
4220 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
4223 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4224 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
4226 if (info->intercept == x86_intercept_lmsw) {
4229 /* lmsw can't clear PE - catch this here */
4230 if (cr0 & X86_CR0_PE)
4235 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4239 case SVM_EXIT_READ_DR0:
4240 case SVM_EXIT_WRITE_DR0:
4241 icpt_info.exit_code += info->modrm_reg;
4244 if (info->intercept == x86_intercept_wrmsr)
4245 vmcb->control.exit_info_1 = 1;
4247 vmcb->control.exit_info_1 = 0;
4249 case SVM_EXIT_PAUSE:
4251 * We get this for NOP only, but pause
4252 * is rep not, check this here
4254 if (info->rep_prefix != REPE_PREFIX)
4256 case SVM_EXIT_IOIO: {
4260 if (info->intercept == x86_intercept_in ||
4261 info->intercept == x86_intercept_ins) {
4262 exit_info = ((info->src_val & 0xffff) << 16) |
4264 bytes = info->dst_bytes;
4266 exit_info = (info->dst_val & 0xffff) << 16;
4267 bytes = info->src_bytes;
4270 if (info->intercept == x86_intercept_outs ||
4271 info->intercept == x86_intercept_ins)
4272 exit_info |= SVM_IOIO_STR_MASK;
4274 if (info->rep_prefix)
4275 exit_info |= SVM_IOIO_REP_MASK;
4277 bytes = min(bytes, 4u);
4279 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4281 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4283 vmcb->control.exit_info_1 = exit_info;
4284 vmcb->control.exit_info_2 = info->next_rip;
4292 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4293 if (static_cpu_has(X86_FEATURE_NRIPS))
4294 vmcb->control.next_rip = info->next_rip;
4295 vmcb->control.exit_code = icpt_info.exit_code;
4296 vmexit = nested_svm_exit_handled(svm);
4298 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4305 static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
4310 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4314 static struct kvm_x86_ops svm_x86_ops = {
4315 .cpu_has_kvm_support = has_svm,
4316 .disabled_by_bios = is_disabled,
4317 .hardware_setup = svm_hardware_setup,
4318 .hardware_unsetup = svm_hardware_unsetup,
4319 .check_processor_compatibility = svm_check_processor_compat,
4320 .hardware_enable = svm_hardware_enable,
4321 .hardware_disable = svm_hardware_disable,
4322 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
4323 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
4325 .vcpu_create = svm_create_vcpu,
4326 .vcpu_free = svm_free_vcpu,
4327 .vcpu_reset = svm_vcpu_reset,
4329 .prepare_guest_switch = svm_prepare_guest_switch,
4330 .vcpu_load = svm_vcpu_load,
4331 .vcpu_put = svm_vcpu_put,
4333 .update_db_bp_intercept = update_db_bp_intercept,
4334 .get_msr = svm_get_msr,
4335 .set_msr = svm_set_msr,
4336 .get_segment_base = svm_get_segment_base,
4337 .get_segment = svm_get_segment,
4338 .set_segment = svm_set_segment,
4339 .get_cpl = svm_get_cpl,
4340 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
4341 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
4342 .decache_cr3 = svm_decache_cr3,
4343 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
4344 .set_cr0 = svm_set_cr0,
4345 .set_cr3 = svm_set_cr3,
4346 .set_cr4 = svm_set_cr4,
4347 .set_efer = svm_set_efer,
4348 .get_idt = svm_get_idt,
4349 .set_idt = svm_set_idt,
4350 .get_gdt = svm_get_gdt,
4351 .set_gdt = svm_set_gdt,
4352 .get_dr6 = svm_get_dr6,
4353 .set_dr6 = svm_set_dr6,
4354 .set_dr7 = svm_set_dr7,
4355 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4356 .cache_reg = svm_cache_reg,
4357 .get_rflags = svm_get_rflags,
4358 .set_rflags = svm_set_rflags,
4359 .fpu_activate = svm_fpu_activate,
4360 .fpu_deactivate = svm_fpu_deactivate,
4362 .tlb_flush = svm_flush_tlb,
4364 .run = svm_vcpu_run,
4365 .handle_exit = handle_exit,
4366 .skip_emulated_instruction = skip_emulated_instruction,
4367 .set_interrupt_shadow = svm_set_interrupt_shadow,
4368 .get_interrupt_shadow = svm_get_interrupt_shadow,
4369 .patch_hypercall = svm_patch_hypercall,
4370 .set_irq = svm_set_irq,
4371 .set_nmi = svm_inject_nmi,
4372 .queue_exception = svm_queue_exception,
4373 .cancel_injection = svm_cancel_injection,
4374 .interrupt_allowed = svm_interrupt_allowed,
4375 .nmi_allowed = svm_nmi_allowed,
4376 .get_nmi_mask = svm_get_nmi_mask,
4377 .set_nmi_mask = svm_set_nmi_mask,
4378 .enable_nmi_window = enable_nmi_window,
4379 .enable_irq_window = enable_irq_window,
4380 .update_cr8_intercept = update_cr8_intercept,
4381 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4382 .cpu_uses_apicv = svm_cpu_uses_apicv,
4383 .load_eoi_exitmap = svm_load_eoi_exitmap,
4384 .sync_pir_to_irr = svm_sync_pir_to_irr,
4386 .set_tss_addr = svm_set_tss_addr,
4387 .get_tdp_level = get_npt_level,
4388 .get_mt_mask = svm_get_mt_mask,
4390 .get_exit_info = svm_get_exit_info,
4392 .get_lpage_level = svm_get_lpage_level,
4394 .cpuid_update = svm_cpuid_update,
4396 .rdtscp_supported = svm_rdtscp_supported,
4397 .invpcid_supported = svm_invpcid_supported,
4398 .mpx_supported = svm_mpx_supported,
4399 .xsaves_supported = svm_xsaves_supported,
4401 .set_supported_cpuid = svm_set_supported_cpuid,
4403 .has_wbinvd_exit = svm_has_wbinvd_exit,
4405 .set_tsc_khz = svm_set_tsc_khz,
4406 .read_tsc_offset = svm_read_tsc_offset,
4407 .write_tsc_offset = svm_write_tsc_offset,
4408 .adjust_tsc_offset = svm_adjust_tsc_offset,
4409 .compute_tsc_offset = svm_compute_tsc_offset,
4410 .read_l1_tsc = svm_read_l1_tsc,
4412 .set_tdp_cr3 = set_tdp_cr3,
4414 .check_intercept = svm_check_intercept,
4415 .handle_external_intr = svm_handle_external_intr,
4417 .sched_in = svm_sched_in,
4419 .pmu_ops = &amd_pmu_ops,
4422 static int __init svm_init(void)
4424 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
4425 __alignof__(struct vcpu_svm), THIS_MODULE);
4428 static void __exit svm_exit(void)
4433 module_init(svm_init)
4434 module_exit(svm_exit)