Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-block.git] / arch / x86 / kvm / svm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_host.h>
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23 #include "cpuid.h"
24 #include "pmu.h"
25
26 #include <linux/module.h>
27 #include <linux/mod_devicetable.h>
28 #include <linux/kernel.h>
29 #include <linux/vmalloc.h>
30 #include <linux/highmem.h>
31 #include <linux/sched.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/amd-iommu.h>
35 #include <linux/hashtable.h>
36 #include <linux/frame.h>
37 #include <linux/psp-sev.h>
38 #include <linux/file.h>
39 #include <linux/pagemap.h>
40 #include <linux/swap.h>
41
42 #include <asm/apic.h>
43 #include <asm/perf_event.h>
44 #include <asm/tlbflush.h>
45 #include <asm/desc.h>
46 #include <asm/debugreg.h>
47 #include <asm/kvm_para.h>
48 #include <asm/irq_remapping.h>
49 #include <asm/spec-ctrl.h>
50
51 #include <asm/virtext.h>
52 #include "trace.h"
53
54 #define __ex(x) __kvm_handle_fault_on_reboot(x)
55
56 MODULE_AUTHOR("Qumranet");
57 MODULE_LICENSE("GPL");
58
59 static const struct x86_cpu_id svm_cpu_id[] = {
60         X86_FEATURE_MATCH(X86_FEATURE_SVM),
61         {}
62 };
63 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
64
65 #define IOPM_ALLOC_ORDER 2
66 #define MSRPM_ALLOC_ORDER 1
67
68 #define SEG_TYPE_LDT 2
69 #define SEG_TYPE_BUSY_TSS16 3
70
71 #define SVM_FEATURE_LBRV           (1 <<  1)
72 #define SVM_FEATURE_SVML           (1 <<  2)
73 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
74 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
75 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
76 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
77 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
78
79 #define SVM_AVIC_DOORBELL       0xc001011b
80
81 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
82 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
83 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
84
85 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
86
87 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
88 #define TSC_RATIO_MIN           0x0000000000000001ULL
89 #define TSC_RATIO_MAX           0x000000ffffffffffULL
90
91 #define AVIC_HPA_MASK   ~((0xFFFULL << 52) | 0xFFF)
92
93 /*
94  * 0xff is broadcast, so the max index allowed for physical APIC ID
95  * table is 0xfe.  APIC IDs above 0xff are reserved.
96  */
97 #define AVIC_MAX_PHYSICAL_ID_COUNT      255
98
99 #define AVIC_UNACCEL_ACCESS_WRITE_MASK          1
100 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK         0xFF0
101 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK         0xFFFFFFFF
102
103 /* AVIC GATAG is encoded using VM and VCPU IDs */
104 #define AVIC_VCPU_ID_BITS               8
105 #define AVIC_VCPU_ID_MASK               ((1 << AVIC_VCPU_ID_BITS) - 1)
106
107 #define AVIC_VM_ID_BITS                 24
108 #define AVIC_VM_ID_NR                   (1 << AVIC_VM_ID_BITS)
109 #define AVIC_VM_ID_MASK                 ((1 << AVIC_VM_ID_BITS) - 1)
110
111 #define AVIC_GATAG(x, y)                (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
112                                                 (y & AVIC_VCPU_ID_MASK))
113 #define AVIC_GATAG_TO_VMID(x)           ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
114 #define AVIC_GATAG_TO_VCPUID(x)         (x & AVIC_VCPU_ID_MASK)
115
116 static bool erratum_383_found __read_mostly;
117
118 static const u32 host_save_user_msrs[] = {
119 #ifdef CONFIG_X86_64
120         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
121         MSR_FS_BASE,
122 #endif
123         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
124         MSR_TSC_AUX,
125 };
126
127 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
128
129 struct kvm_sev_info {
130         bool active;            /* SEV enabled guest */
131         unsigned int asid;      /* ASID used for this guest */
132         unsigned int handle;    /* SEV firmware handle */
133         int fd;                 /* SEV device fd */
134         unsigned long pages_locked; /* Number of pages locked */
135         struct list_head regions_list;  /* List of registered regions */
136 };
137
138 struct kvm_svm {
139         struct kvm kvm;
140
141         /* Struct members for AVIC */
142         u32 avic_vm_id;
143         struct page *avic_logical_id_table_page;
144         struct page *avic_physical_id_table_page;
145         struct hlist_node hnode;
146
147         struct kvm_sev_info sev_info;
148 };
149
150 struct kvm_vcpu;
151
152 struct nested_state {
153         struct vmcb *hsave;
154         u64 hsave_msr;
155         u64 vm_cr_msr;
156         u64 vmcb;
157
158         /* These are the merged vectors */
159         u32 *msrpm;
160
161         /* gpa pointers to the real vectors */
162         u64 vmcb_msrpm;
163         u64 vmcb_iopm;
164
165         /* A VMEXIT is required but not yet emulated */
166         bool exit_required;
167
168         /* cache for intercepts of the guest */
169         u32 intercept_cr;
170         u32 intercept_dr;
171         u32 intercept_exceptions;
172         u64 intercept;
173
174         /* Nested Paging related state */
175         u64 nested_cr3;
176 };
177
178 #define MSRPM_OFFSETS   16
179 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
180
181 /*
182  * Set osvw_len to higher value when updated Revision Guides
183  * are published and we know what the new status bits are
184  */
185 static uint64_t osvw_len = 4, osvw_status;
186
187 struct vcpu_svm {
188         struct kvm_vcpu vcpu;
189         struct vmcb *vmcb;
190         unsigned long vmcb_pa;
191         struct svm_cpu_data *svm_data;
192         uint64_t asid_generation;
193         uint64_t sysenter_esp;
194         uint64_t sysenter_eip;
195         uint64_t tsc_aux;
196
197         u64 msr_decfg;
198
199         u64 next_rip;
200
201         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
202         struct {
203                 u16 fs;
204                 u16 gs;
205                 u16 ldt;
206                 u64 gs_base;
207         } host;
208
209         u64 spec_ctrl;
210         /*
211          * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
212          * translated into the appropriate L2_CFG bits on the host to
213          * perform speculative control.
214          */
215         u64 virt_spec_ctrl;
216
217         u32 *msrpm;
218
219         ulong nmi_iret_rip;
220
221         struct nested_state nested;
222
223         bool nmi_singlestep;
224         u64 nmi_singlestep_guest_rflags;
225
226         unsigned int3_injected;
227         unsigned long int3_rip;
228
229         /* cached guest cpuid flags for faster access */
230         bool nrips_enabled      : 1;
231
232         u32 ldr_reg;
233         u32 dfr_reg;
234         struct page *avic_backing_page;
235         u64 *avic_physical_id_cache;
236         bool avic_is_running;
237
238         /*
239          * Per-vcpu list of struct amd_svm_iommu_ir:
240          * This is used mainly to store interrupt remapping information used
241          * when update the vcpu affinity. This avoids the need to scan for
242          * IRTE and try to match ga_tag in the IOMMU driver.
243          */
244         struct list_head ir_list;
245         spinlock_t ir_list_lock;
246
247         /* which host CPU was used for running this vcpu */
248         unsigned int last_cpu;
249 };
250
251 /*
252  * This is a wrapper of struct amd_iommu_ir_data.
253  */
254 struct amd_svm_iommu_ir {
255         struct list_head node;  /* Used by SVM for per-vcpu ir_list */
256         void *data;             /* Storing pointer to struct amd_ir_data */
257 };
258
259 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK    (0xFF)
260 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                 31
261 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK                (1 << 31)
262
263 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK    (0xFFULL)
264 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK        (0xFFFFFFFFFFULL << 12)
265 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK          (1ULL << 62)
266 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK               (1ULL << 63)
267
268 static DEFINE_PER_CPU(u64, current_tsc_ratio);
269 #define TSC_RATIO_DEFAULT       0x0100000000ULL
270
271 #define MSR_INVALID                     0xffffffffU
272
273 static const struct svm_direct_access_msrs {
274         u32 index;   /* Index of the MSR */
275         bool always; /* True if intercept is always on */
276 } direct_access_msrs[] = {
277         { .index = MSR_STAR,                            .always = true  },
278         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
279 #ifdef CONFIG_X86_64
280         { .index = MSR_GS_BASE,                         .always = true  },
281         { .index = MSR_FS_BASE,                         .always = true  },
282         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
283         { .index = MSR_LSTAR,                           .always = true  },
284         { .index = MSR_CSTAR,                           .always = true  },
285         { .index = MSR_SYSCALL_MASK,                    .always = true  },
286 #endif
287         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
288         { .index = MSR_IA32_PRED_CMD,                   .always = false },
289         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
290         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
291         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
292         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
293         { .index = MSR_INVALID,                         .always = false },
294 };
295
296 /* enable NPT for AMD64 and X86 with PAE */
297 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
298 static bool npt_enabled = true;
299 #else
300 static bool npt_enabled;
301 #endif
302
303 /*
304  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
305  * pause_filter_count: On processors that support Pause filtering(indicated
306  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
307  *      count value. On VMRUN this value is loaded into an internal counter.
308  *      Each time a pause instruction is executed, this counter is decremented
309  *      until it reaches zero at which time a #VMEXIT is generated if pause
310  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
311  *      Intercept Filtering for more details.
312  *      This also indicate if ple logic enabled.
313  *
314  * pause_filter_thresh: In addition, some processor families support advanced
315  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
316  *      the amount of time a guest is allowed to execute in a pause loop.
317  *      In this mode, a 16-bit pause filter threshold field is added in the
318  *      VMCB. The threshold value is a cycle count that is used to reset the
319  *      pause counter. As with simple pause filtering, VMRUN loads the pause
320  *      count value from VMCB into an internal counter. Then, on each pause
321  *      instruction the hardware checks the elapsed number of cycles since
322  *      the most recent pause instruction against the pause filter threshold.
323  *      If the elapsed cycle count is greater than the pause filter threshold,
324  *      then the internal pause count is reloaded from the VMCB and execution
325  *      continues. If the elapsed cycle count is less than the pause filter
326  *      threshold, then the internal pause count is decremented. If the count
327  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
328  *      triggered. If advanced pause filtering is supported and pause filter
329  *      threshold field is set to zero, the filter will operate in the simpler,
330  *      count only mode.
331  */
332
333 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
334 module_param(pause_filter_thresh, ushort, 0444);
335
336 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
337 module_param(pause_filter_count, ushort, 0444);
338
339 /* Default doubles per-vcpu window every exit. */
340 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
341 module_param(pause_filter_count_grow, ushort, 0444);
342
343 /* Default resets per-vcpu window every exit to pause_filter_count. */
344 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
345 module_param(pause_filter_count_shrink, ushort, 0444);
346
347 /* Default is to compute the maximum so we can never overflow. */
348 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
349 module_param(pause_filter_count_max, ushort, 0444);
350
351 /* allow nested paging (virtualized MMU) for all guests */
352 static int npt = true;
353 module_param(npt, int, S_IRUGO);
354
355 /* allow nested virtualization in KVM/SVM */
356 static int nested = true;
357 module_param(nested, int, S_IRUGO);
358
359 /* enable / disable AVIC */
360 static int avic;
361 #ifdef CONFIG_X86_LOCAL_APIC
362 module_param(avic, int, S_IRUGO);
363 #endif
364
365 /* enable/disable Next RIP Save */
366 static int nrips = true;
367 module_param(nrips, int, 0444);
368
369 /* enable/disable Virtual VMLOAD VMSAVE */
370 static int vls = true;
371 module_param(vls, int, 0444);
372
373 /* enable/disable Virtual GIF */
374 static int vgif = true;
375 module_param(vgif, int, 0444);
376
377 /* enable/disable SEV support */
378 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
379 module_param(sev, int, 0444);
380
381 static bool __read_mostly dump_invalid_vmcb = 0;
382 module_param(dump_invalid_vmcb, bool, 0644);
383
384 static u8 rsm_ins_bytes[] = "\x0f\xaa";
385
386 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
387 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
388 static void svm_complete_interrupts(struct vcpu_svm *svm);
389
390 static int nested_svm_exit_handled(struct vcpu_svm *svm);
391 static int nested_svm_intercept(struct vcpu_svm *svm);
392 static int nested_svm_vmexit(struct vcpu_svm *svm);
393 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
394                                       bool has_error_code, u32 error_code);
395
396 enum {
397         VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
398                             pause filter count */
399         VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
400         VMCB_ASID,       /* ASID */
401         VMCB_INTR,       /* int_ctl, int_vector */
402         VMCB_NPT,        /* npt_en, nCR3, gPAT */
403         VMCB_CR,         /* CR0, CR3, CR4, EFER */
404         VMCB_DR,         /* DR6, DR7 */
405         VMCB_DT,         /* GDT, IDT */
406         VMCB_SEG,        /* CS, DS, SS, ES, CPL */
407         VMCB_CR2,        /* CR2 only */
408         VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
409         VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
410                           * AVIC PHYSICAL_TABLE pointer,
411                           * AVIC LOGICAL_TABLE pointer
412                           */
413         VMCB_DIRTY_MAX,
414 };
415
416 /* TPR and CR2 are always written before VMRUN */
417 #define VMCB_ALWAYS_DIRTY_MASK  ((1U << VMCB_INTR) | (1U << VMCB_CR2))
418
419 #define VMCB_AVIC_APIC_BAR_MASK         0xFFFFFFFFFF000ULL
420
421 static unsigned int max_sev_asid;
422 static unsigned int min_sev_asid;
423 static unsigned long *sev_asid_bitmap;
424 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
425
426 struct enc_region {
427         struct list_head list;
428         unsigned long npages;
429         struct page **pages;
430         unsigned long uaddr;
431         unsigned long size;
432 };
433
434
435 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
436 {
437         return container_of(kvm, struct kvm_svm, kvm);
438 }
439
440 static inline bool svm_sev_enabled(void)
441 {
442         return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
443 }
444
445 static inline bool sev_guest(struct kvm *kvm)
446 {
447 #ifdef CONFIG_KVM_AMD_SEV
448         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
449
450         return sev->active;
451 #else
452         return false;
453 #endif
454 }
455
456 static inline int sev_get_asid(struct kvm *kvm)
457 {
458         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
459
460         return sev->asid;
461 }
462
463 static inline void mark_all_dirty(struct vmcb *vmcb)
464 {
465         vmcb->control.clean = 0;
466 }
467
468 static inline void mark_all_clean(struct vmcb *vmcb)
469 {
470         vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
471                                & ~VMCB_ALWAYS_DIRTY_MASK;
472 }
473
474 static inline void mark_dirty(struct vmcb *vmcb, int bit)
475 {
476         vmcb->control.clean &= ~(1 << bit);
477 }
478
479 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
480 {
481         return container_of(vcpu, struct vcpu_svm, vcpu);
482 }
483
484 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
485 {
486         svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
487         mark_dirty(svm->vmcb, VMCB_AVIC);
488 }
489
490 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
491 {
492         struct vcpu_svm *svm = to_svm(vcpu);
493         u64 *entry = svm->avic_physical_id_cache;
494
495         if (!entry)
496                 return false;
497
498         return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
499 }
500
501 static void recalc_intercepts(struct vcpu_svm *svm)
502 {
503         struct vmcb_control_area *c, *h;
504         struct nested_state *g;
505
506         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
507
508         if (!is_guest_mode(&svm->vcpu))
509                 return;
510
511         c = &svm->vmcb->control;
512         h = &svm->nested.hsave->control;
513         g = &svm->nested;
514
515         c->intercept_cr = h->intercept_cr | g->intercept_cr;
516         c->intercept_dr = h->intercept_dr | g->intercept_dr;
517         c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
518         c->intercept = h->intercept | g->intercept;
519 }
520
521 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
522 {
523         if (is_guest_mode(&svm->vcpu))
524                 return svm->nested.hsave;
525         else
526                 return svm->vmcb;
527 }
528
529 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
530 {
531         struct vmcb *vmcb = get_host_vmcb(svm);
532
533         vmcb->control.intercept_cr |= (1U << bit);
534
535         recalc_intercepts(svm);
536 }
537
538 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
539 {
540         struct vmcb *vmcb = get_host_vmcb(svm);
541
542         vmcb->control.intercept_cr &= ~(1U << bit);
543
544         recalc_intercepts(svm);
545 }
546
547 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
548 {
549         struct vmcb *vmcb = get_host_vmcb(svm);
550
551         return vmcb->control.intercept_cr & (1U << bit);
552 }
553
554 static inline void set_dr_intercepts(struct vcpu_svm *svm)
555 {
556         struct vmcb *vmcb = get_host_vmcb(svm);
557
558         vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
559                 | (1 << INTERCEPT_DR1_READ)
560                 | (1 << INTERCEPT_DR2_READ)
561                 | (1 << INTERCEPT_DR3_READ)
562                 | (1 << INTERCEPT_DR4_READ)
563                 | (1 << INTERCEPT_DR5_READ)
564                 | (1 << INTERCEPT_DR6_READ)
565                 | (1 << INTERCEPT_DR7_READ)
566                 | (1 << INTERCEPT_DR0_WRITE)
567                 | (1 << INTERCEPT_DR1_WRITE)
568                 | (1 << INTERCEPT_DR2_WRITE)
569                 | (1 << INTERCEPT_DR3_WRITE)
570                 | (1 << INTERCEPT_DR4_WRITE)
571                 | (1 << INTERCEPT_DR5_WRITE)
572                 | (1 << INTERCEPT_DR6_WRITE)
573                 | (1 << INTERCEPT_DR7_WRITE);
574
575         recalc_intercepts(svm);
576 }
577
578 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
579 {
580         struct vmcb *vmcb = get_host_vmcb(svm);
581
582         vmcb->control.intercept_dr = 0;
583
584         recalc_intercepts(svm);
585 }
586
587 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
588 {
589         struct vmcb *vmcb = get_host_vmcb(svm);
590
591         vmcb->control.intercept_exceptions |= (1U << bit);
592
593         recalc_intercepts(svm);
594 }
595
596 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
597 {
598         struct vmcb *vmcb = get_host_vmcb(svm);
599
600         vmcb->control.intercept_exceptions &= ~(1U << bit);
601
602         recalc_intercepts(svm);
603 }
604
605 static inline void set_intercept(struct vcpu_svm *svm, int bit)
606 {
607         struct vmcb *vmcb = get_host_vmcb(svm);
608
609         vmcb->control.intercept |= (1ULL << bit);
610
611         recalc_intercepts(svm);
612 }
613
614 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
615 {
616         struct vmcb *vmcb = get_host_vmcb(svm);
617
618         vmcb->control.intercept &= ~(1ULL << bit);
619
620         recalc_intercepts(svm);
621 }
622
623 static inline bool vgif_enabled(struct vcpu_svm *svm)
624 {
625         return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
626 }
627
628 static inline void enable_gif(struct vcpu_svm *svm)
629 {
630         if (vgif_enabled(svm))
631                 svm->vmcb->control.int_ctl |= V_GIF_MASK;
632         else
633                 svm->vcpu.arch.hflags |= HF_GIF_MASK;
634 }
635
636 static inline void disable_gif(struct vcpu_svm *svm)
637 {
638         if (vgif_enabled(svm))
639                 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
640         else
641                 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
642 }
643
644 static inline bool gif_set(struct vcpu_svm *svm)
645 {
646         if (vgif_enabled(svm))
647                 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
648         else
649                 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
650 }
651
652 static unsigned long iopm_base;
653
654 struct kvm_ldttss_desc {
655         u16 limit0;
656         u16 base0;
657         unsigned base1:8, type:5, dpl:2, p:1;
658         unsigned limit1:4, zero0:3, g:1, base2:8;
659         u32 base3;
660         u32 zero1;
661 } __attribute__((packed));
662
663 struct svm_cpu_data {
664         int cpu;
665
666         u64 asid_generation;
667         u32 max_asid;
668         u32 next_asid;
669         u32 min_asid;
670         struct kvm_ldttss_desc *tss_desc;
671
672         struct page *save_area;
673         struct vmcb *current_vmcb;
674
675         /* index = sev_asid, value = vmcb pointer */
676         struct vmcb **sev_vmcbs;
677 };
678
679 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
680
681 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
682
683 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
684 #define MSRS_RANGE_SIZE 2048
685 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
686
687 static u32 svm_msrpm_offset(u32 msr)
688 {
689         u32 offset;
690         int i;
691
692         for (i = 0; i < NUM_MSR_MAPS; i++) {
693                 if (msr < msrpm_ranges[i] ||
694                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
695                         continue;
696
697                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
698                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
699
700                 /* Now we have the u8 offset - but need the u32 offset */
701                 return offset / 4;
702         }
703
704         /* MSR not in any range */
705         return MSR_INVALID;
706 }
707
708 #define MAX_INST_SIZE 15
709
710 static inline void clgi(void)
711 {
712         asm volatile (__ex("clgi"));
713 }
714
715 static inline void stgi(void)
716 {
717         asm volatile (__ex("stgi"));
718 }
719
720 static inline void invlpga(unsigned long addr, u32 asid)
721 {
722         asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
723 }
724
725 static int get_npt_level(struct kvm_vcpu *vcpu)
726 {
727 #ifdef CONFIG_X86_64
728         return PT64_ROOT_4LEVEL;
729 #else
730         return PT32E_ROOT_LEVEL;
731 #endif
732 }
733
734 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
735 {
736         vcpu->arch.efer = efer;
737         if (!npt_enabled && !(efer & EFER_LMA))
738                 efer &= ~EFER_LME;
739
740         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
741         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
742 }
743
744 static int is_external_interrupt(u32 info)
745 {
746         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
747         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
748 }
749
750 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
751 {
752         struct vcpu_svm *svm = to_svm(vcpu);
753         u32 ret = 0;
754
755         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
756                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
757         return ret;
758 }
759
760 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
761 {
762         struct vcpu_svm *svm = to_svm(vcpu);
763
764         if (mask == 0)
765                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
766         else
767                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
768
769 }
770
771 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
772 {
773         struct vcpu_svm *svm = to_svm(vcpu);
774
775         if (nrips && svm->vmcb->control.next_rip != 0) {
776                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
777                 svm->next_rip = svm->vmcb->control.next_rip;
778         }
779
780         if (!svm->next_rip)
781                 return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP);
782
783         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
784                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
785                        __func__, kvm_rip_read(vcpu), svm->next_rip);
786
787         kvm_rip_write(vcpu, svm->next_rip);
788         svm_set_interrupt_shadow(vcpu, 0);
789
790         return EMULATE_DONE;
791 }
792
793 static void svm_queue_exception(struct kvm_vcpu *vcpu)
794 {
795         struct vcpu_svm *svm = to_svm(vcpu);
796         unsigned nr = vcpu->arch.exception.nr;
797         bool has_error_code = vcpu->arch.exception.has_error_code;
798         bool reinject = vcpu->arch.exception.injected;
799         u32 error_code = vcpu->arch.exception.error_code;
800
801         /*
802          * If we are within a nested VM we'd better #VMEXIT and let the guest
803          * handle the exception
804          */
805         if (!reinject &&
806             nested_svm_check_exception(svm, nr, has_error_code, error_code))
807                 return;
808
809         kvm_deliver_exception_payload(&svm->vcpu);
810
811         if (nr == BP_VECTOR && !nrips) {
812                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
813
814                 /*
815                  * For guest debugging where we have to reinject #BP if some
816                  * INT3 is guest-owned:
817                  * Emulate nRIP by moving RIP forward. Will fail if injection
818                  * raises a fault that is not intercepted. Still better than
819                  * failing in all cases.
820                  */
821                 (void)skip_emulated_instruction(&svm->vcpu);
822                 rip = kvm_rip_read(&svm->vcpu);
823                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
824                 svm->int3_injected = rip - old_rip;
825         }
826
827         svm->vmcb->control.event_inj = nr
828                 | SVM_EVTINJ_VALID
829                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
830                 | SVM_EVTINJ_TYPE_EXEPT;
831         svm->vmcb->control.event_inj_err = error_code;
832 }
833
834 static void svm_init_erratum_383(void)
835 {
836         u32 low, high;
837         int err;
838         u64 val;
839
840         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
841                 return;
842
843         /* Use _safe variants to not break nested virtualization */
844         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
845         if (err)
846                 return;
847
848         val |= (1ULL << 47);
849
850         low  = lower_32_bits(val);
851         high = upper_32_bits(val);
852
853         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
854
855         erratum_383_found = true;
856 }
857
858 static void svm_init_osvw(struct kvm_vcpu *vcpu)
859 {
860         /*
861          * Guests should see errata 400 and 415 as fixed (assuming that
862          * HLT and IO instructions are intercepted).
863          */
864         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
865         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
866
867         /*
868          * By increasing VCPU's osvw.length to 3 we are telling the guest that
869          * all osvw.status bits inside that length, including bit 0 (which is
870          * reserved for erratum 298), are valid. However, if host processor's
871          * osvw_len is 0 then osvw_status[0] carries no information. We need to
872          * be conservative here and therefore we tell the guest that erratum 298
873          * is present (because we really don't know).
874          */
875         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
876                 vcpu->arch.osvw.status |= 1;
877 }
878
879 static int has_svm(void)
880 {
881         const char *msg;
882
883         if (!cpu_has_svm(&msg)) {
884                 printk(KERN_INFO "has_svm: %s\n", msg);
885                 return 0;
886         }
887
888         return 1;
889 }
890
891 static void svm_hardware_disable(void)
892 {
893         /* Make sure we clean up behind us */
894         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
895                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
896
897         cpu_svm_disable();
898
899         amd_pmu_disable_virt();
900 }
901
902 static int svm_hardware_enable(void)
903 {
904
905         struct svm_cpu_data *sd;
906         uint64_t efer;
907         struct desc_struct *gdt;
908         int me = raw_smp_processor_id();
909
910         rdmsrl(MSR_EFER, efer);
911         if (efer & EFER_SVME)
912                 return -EBUSY;
913
914         if (!has_svm()) {
915                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
916                 return -EINVAL;
917         }
918         sd = per_cpu(svm_data, me);
919         if (!sd) {
920                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
921                 return -EINVAL;
922         }
923
924         sd->asid_generation = 1;
925         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
926         sd->next_asid = sd->max_asid + 1;
927         sd->min_asid = max_sev_asid + 1;
928
929         gdt = get_current_gdt_rw();
930         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
931
932         wrmsrl(MSR_EFER, efer | EFER_SVME);
933
934         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
935
936         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
937                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
938                 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
939         }
940
941
942         /*
943          * Get OSVW bits.
944          *
945          * Note that it is possible to have a system with mixed processor
946          * revisions and therefore different OSVW bits. If bits are not the same
947          * on different processors then choose the worst case (i.e. if erratum
948          * is present on one processor and not on another then assume that the
949          * erratum is present everywhere).
950          */
951         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
952                 uint64_t len, status = 0;
953                 int err;
954
955                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
956                 if (!err)
957                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
958                                                       &err);
959
960                 if (err)
961                         osvw_status = osvw_len = 0;
962                 else {
963                         if (len < osvw_len)
964                                 osvw_len = len;
965                         osvw_status |= status;
966                         osvw_status &= (1ULL << osvw_len) - 1;
967                 }
968         } else
969                 osvw_status = osvw_len = 0;
970
971         svm_init_erratum_383();
972
973         amd_pmu_enable_virt();
974
975         return 0;
976 }
977
978 static void svm_cpu_uninit(int cpu)
979 {
980         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
981
982         if (!sd)
983                 return;
984
985         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
986         kfree(sd->sev_vmcbs);
987         __free_page(sd->save_area);
988         kfree(sd);
989 }
990
991 static int svm_cpu_init(int cpu)
992 {
993         struct svm_cpu_data *sd;
994         int r;
995
996         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
997         if (!sd)
998                 return -ENOMEM;
999         sd->cpu = cpu;
1000         r = -ENOMEM;
1001         sd->save_area = alloc_page(GFP_KERNEL);
1002         if (!sd->save_area)
1003                 goto err_1;
1004
1005         if (svm_sev_enabled()) {
1006                 r = -ENOMEM;
1007                 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
1008                                               sizeof(void *),
1009                                               GFP_KERNEL);
1010                 if (!sd->sev_vmcbs)
1011                         goto err_1;
1012         }
1013
1014         per_cpu(svm_data, cpu) = sd;
1015
1016         return 0;
1017
1018 err_1:
1019         kfree(sd);
1020         return r;
1021
1022 }
1023
1024 static bool valid_msr_intercept(u32 index)
1025 {
1026         int i;
1027
1028         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
1029                 if (direct_access_msrs[i].index == index)
1030                         return true;
1031
1032         return false;
1033 }
1034
1035 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
1036 {
1037         u8 bit_write;
1038         unsigned long tmp;
1039         u32 offset;
1040         u32 *msrpm;
1041
1042         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
1043                                       to_svm(vcpu)->msrpm;
1044
1045         offset    = svm_msrpm_offset(msr);
1046         bit_write = 2 * (msr & 0x0f) + 1;
1047         tmp       = msrpm[offset];
1048
1049         BUG_ON(offset == MSR_INVALID);
1050
1051         return !!test_bit(bit_write,  &tmp);
1052 }
1053
1054 static void set_msr_interception(u32 *msrpm, unsigned msr,
1055                                  int read, int write)
1056 {
1057         u8 bit_read, bit_write;
1058         unsigned long tmp;
1059         u32 offset;
1060
1061         /*
1062          * If this warning triggers extend the direct_access_msrs list at the
1063          * beginning of the file
1064          */
1065         WARN_ON(!valid_msr_intercept(msr));
1066
1067         offset    = svm_msrpm_offset(msr);
1068         bit_read  = 2 * (msr & 0x0f);
1069         bit_write = 2 * (msr & 0x0f) + 1;
1070         tmp       = msrpm[offset];
1071
1072         BUG_ON(offset == MSR_INVALID);
1073
1074         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
1075         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
1076
1077         msrpm[offset] = tmp;
1078 }
1079
1080 static void svm_vcpu_init_msrpm(u32 *msrpm)
1081 {
1082         int i;
1083
1084         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
1085
1086         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1087                 if (!direct_access_msrs[i].always)
1088                         continue;
1089
1090                 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
1091         }
1092 }
1093
1094 static void add_msr_offset(u32 offset)
1095 {
1096         int i;
1097
1098         for (i = 0; i < MSRPM_OFFSETS; ++i) {
1099
1100                 /* Offset already in list? */
1101                 if (msrpm_offsets[i] == offset)
1102                         return;
1103
1104                 /* Slot used by another offset? */
1105                 if (msrpm_offsets[i] != MSR_INVALID)
1106                         continue;
1107
1108                 /* Add offset to list */
1109                 msrpm_offsets[i] = offset;
1110
1111                 return;
1112         }
1113
1114         /*
1115          * If this BUG triggers the msrpm_offsets table has an overflow. Just
1116          * increase MSRPM_OFFSETS in this case.
1117          */
1118         BUG();
1119 }
1120
1121 static void init_msrpm_offsets(void)
1122 {
1123         int i;
1124
1125         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1126
1127         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1128                 u32 offset;
1129
1130                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1131                 BUG_ON(offset == MSR_INVALID);
1132
1133                 add_msr_offset(offset);
1134         }
1135 }
1136
1137 static void svm_enable_lbrv(struct vcpu_svm *svm)
1138 {
1139         u32 *msrpm = svm->msrpm;
1140
1141         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1142         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1143         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1144         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1145         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1146 }
1147
1148 static void svm_disable_lbrv(struct vcpu_svm *svm)
1149 {
1150         u32 *msrpm = svm->msrpm;
1151
1152         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1153         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1154         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1155         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1156         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1157 }
1158
1159 static void disable_nmi_singlestep(struct vcpu_svm *svm)
1160 {
1161         svm->nmi_singlestep = false;
1162
1163         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1164                 /* Clear our flags if they were not set by the guest */
1165                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1166                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1167                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1168                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1169         }
1170 }
1171
1172 /* Note:
1173  * This hash table is used to map VM_ID to a struct kvm_svm,
1174  * when handling AMD IOMMU GALOG notification to schedule in
1175  * a particular vCPU.
1176  */
1177 #define SVM_VM_DATA_HASH_BITS   8
1178 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
1179 static u32 next_vm_id = 0;
1180 static bool next_vm_id_wrapped = 0;
1181 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
1182
1183 /* Note:
1184  * This function is called from IOMMU driver to notify
1185  * SVM to schedule in a particular vCPU of a particular VM.
1186  */
1187 static int avic_ga_log_notifier(u32 ga_tag)
1188 {
1189         unsigned long flags;
1190         struct kvm_svm *kvm_svm;
1191         struct kvm_vcpu *vcpu = NULL;
1192         u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1193         u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1194
1195         pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1196
1197         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1198         hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
1199                 if (kvm_svm->avic_vm_id != vm_id)
1200                         continue;
1201                 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
1202                 break;
1203         }
1204         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1205
1206         /* Note:
1207          * At this point, the IOMMU should have already set the pending
1208          * bit in the vAPIC backing page. So, we just need to schedule
1209          * in the vcpu.
1210          */
1211         if (vcpu)
1212                 kvm_vcpu_wake_up(vcpu);
1213
1214         return 0;
1215 }
1216
1217 static __init int sev_hardware_setup(void)
1218 {
1219         struct sev_user_data_status *status;
1220         int rc;
1221
1222         /* Maximum number of encrypted guests supported simultaneously */
1223         max_sev_asid = cpuid_ecx(0x8000001F);
1224
1225         if (!max_sev_asid)
1226                 return 1;
1227
1228         /* Minimum ASID value that should be used for SEV guest */
1229         min_sev_asid = cpuid_edx(0x8000001F);
1230
1231         /* Initialize SEV ASID bitmap */
1232         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1233         if (!sev_asid_bitmap)
1234                 return 1;
1235
1236         status = kmalloc(sizeof(*status), GFP_KERNEL);
1237         if (!status)
1238                 return 1;
1239
1240         /*
1241          * Check SEV platform status.
1242          *
1243          * PLATFORM_STATUS can be called in any state, if we failed to query
1244          * the PLATFORM status then either PSP firmware does not support SEV
1245          * feature or SEV firmware is dead.
1246          */
1247         rc = sev_platform_status(status, NULL);
1248         if (rc)
1249                 goto err;
1250
1251         pr_info("SEV supported\n");
1252
1253 err:
1254         kfree(status);
1255         return rc;
1256 }
1257
1258 static void grow_ple_window(struct kvm_vcpu *vcpu)
1259 {
1260         struct vcpu_svm *svm = to_svm(vcpu);
1261         struct vmcb_control_area *control = &svm->vmcb->control;
1262         int old = control->pause_filter_count;
1263
1264         control->pause_filter_count = __grow_ple_window(old,
1265                                                         pause_filter_count,
1266                                                         pause_filter_count_grow,
1267                                                         pause_filter_count_max);
1268
1269         if (control->pause_filter_count != old) {
1270                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1271                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1272                                             control->pause_filter_count, old);
1273         }
1274 }
1275
1276 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1277 {
1278         struct vcpu_svm *svm = to_svm(vcpu);
1279         struct vmcb_control_area *control = &svm->vmcb->control;
1280         int old = control->pause_filter_count;
1281
1282         control->pause_filter_count =
1283                                 __shrink_ple_window(old,
1284                                                     pause_filter_count,
1285                                                     pause_filter_count_shrink,
1286                                                     pause_filter_count);
1287         if (control->pause_filter_count != old) {
1288                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1289                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1290                                             control->pause_filter_count, old);
1291         }
1292 }
1293
1294 static __init int svm_hardware_setup(void)
1295 {
1296         int cpu;
1297         struct page *iopm_pages;
1298         void *iopm_va;
1299         int r;
1300
1301         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1302
1303         if (!iopm_pages)
1304                 return -ENOMEM;
1305
1306         iopm_va = page_address(iopm_pages);
1307         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
1308         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1309
1310         init_msrpm_offsets();
1311
1312         if (boot_cpu_has(X86_FEATURE_NX))
1313                 kvm_enable_efer_bits(EFER_NX);
1314
1315         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1316                 kvm_enable_efer_bits(EFER_FFXSR);
1317
1318         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1319                 kvm_has_tsc_control = true;
1320                 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1321                 kvm_tsc_scaling_ratio_frac_bits = 32;
1322         }
1323
1324         /* Check for pause filtering support */
1325         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1326                 pause_filter_count = 0;
1327                 pause_filter_thresh = 0;
1328         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
1329                 pause_filter_thresh = 0;
1330         }
1331
1332         if (nested) {
1333                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
1334                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
1335         }
1336
1337         if (sev) {
1338                 if (boot_cpu_has(X86_FEATURE_SEV) &&
1339                     IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1340                         r = sev_hardware_setup();
1341                         if (r)
1342                                 sev = false;
1343                 } else {
1344                         sev = false;
1345                 }
1346         }
1347
1348         for_each_possible_cpu(cpu) {
1349                 r = svm_cpu_init(cpu);
1350                 if (r)
1351                         goto err;
1352         }
1353
1354         if (!boot_cpu_has(X86_FEATURE_NPT))
1355                 npt_enabled = false;
1356
1357         if (npt_enabled && !npt) {
1358                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1359                 npt_enabled = false;
1360         }
1361
1362         if (npt_enabled) {
1363                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
1364                 kvm_enable_tdp();
1365         } else
1366                 kvm_disable_tdp();
1367
1368         if (nrips) {
1369                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
1370                         nrips = false;
1371         }
1372
1373         if (avic) {
1374                 if (!npt_enabled ||
1375                     !boot_cpu_has(X86_FEATURE_AVIC) ||
1376                     !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
1377                         avic = false;
1378                 } else {
1379                         pr_info("AVIC enabled\n");
1380
1381                         amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1382                 }
1383         }
1384
1385         if (vls) {
1386                 if (!npt_enabled ||
1387                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
1388                     !IS_ENABLED(CONFIG_X86_64)) {
1389                         vls = false;
1390                 } else {
1391                         pr_info("Virtual VMLOAD VMSAVE supported\n");
1392                 }
1393         }
1394
1395         if (vgif) {
1396                 if (!boot_cpu_has(X86_FEATURE_VGIF))
1397                         vgif = false;
1398                 else
1399                         pr_info("Virtual GIF supported\n");
1400         }
1401
1402         return 0;
1403
1404 err:
1405         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1406         iopm_base = 0;
1407         return r;
1408 }
1409
1410 static __exit void svm_hardware_unsetup(void)
1411 {
1412         int cpu;
1413
1414         if (svm_sev_enabled())
1415                 bitmap_free(sev_asid_bitmap);
1416
1417         for_each_possible_cpu(cpu)
1418                 svm_cpu_uninit(cpu);
1419
1420         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
1421         iopm_base = 0;
1422 }
1423
1424 static void init_seg(struct vmcb_seg *seg)
1425 {
1426         seg->selector = 0;
1427         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1428                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1429         seg->limit = 0xffff;
1430         seg->base = 0;
1431 }
1432
1433 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1434 {
1435         seg->selector = 0;
1436         seg->attrib = SVM_SELECTOR_P_MASK | type;
1437         seg->limit = 0xffff;
1438         seg->base = 0;
1439 }
1440
1441 static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1442 {
1443         struct vcpu_svm *svm = to_svm(vcpu);
1444
1445         if (is_guest_mode(vcpu))
1446                 return svm->nested.hsave->control.tsc_offset;
1447
1448         return vcpu->arch.tsc_offset;
1449 }
1450
1451 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1452 {
1453         struct vcpu_svm *svm = to_svm(vcpu);
1454         u64 g_tsc_offset = 0;
1455
1456         if (is_guest_mode(vcpu)) {
1457                 /* Write L1's TSC offset.  */
1458                 g_tsc_offset = svm->vmcb->control.tsc_offset -
1459                                svm->nested.hsave->control.tsc_offset;
1460                 svm->nested.hsave->control.tsc_offset = offset;
1461         }
1462
1463         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1464                                    svm->vmcb->control.tsc_offset - g_tsc_offset,
1465                                    offset);
1466
1467         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1468
1469         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1470         return svm->vmcb->control.tsc_offset;
1471 }
1472
1473 static void avic_init_vmcb(struct vcpu_svm *svm)
1474 {
1475         struct vmcb *vmcb = svm->vmcb;
1476         struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
1477         phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1478         phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
1479         phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
1480
1481         vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1482         vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1483         vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1484         vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1485         vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
1486 }
1487
1488 static void init_vmcb(struct vcpu_svm *svm)
1489 {
1490         struct vmcb_control_area *control = &svm->vmcb->control;
1491         struct vmcb_save_area *save = &svm->vmcb->save;
1492
1493         svm->vcpu.arch.hflags = 0;
1494
1495         set_cr_intercept(svm, INTERCEPT_CR0_READ);
1496         set_cr_intercept(svm, INTERCEPT_CR3_READ);
1497         set_cr_intercept(svm, INTERCEPT_CR4_READ);
1498         set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1499         set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1500         set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1501         if (!kvm_vcpu_apicv_active(&svm->vcpu))
1502                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1503
1504         set_dr_intercepts(svm);
1505
1506         set_exception_intercept(svm, PF_VECTOR);
1507         set_exception_intercept(svm, UD_VECTOR);
1508         set_exception_intercept(svm, MC_VECTOR);
1509         set_exception_intercept(svm, AC_VECTOR);
1510         set_exception_intercept(svm, DB_VECTOR);
1511         /*
1512          * Guest access to VMware backdoor ports could legitimately
1513          * trigger #GP because of TSS I/O permission bitmap.
1514          * We intercept those #GP and allow access to them anyway
1515          * as VMware does.
1516          */
1517         if (enable_vmware_backdoor)
1518                 set_exception_intercept(svm, GP_VECTOR);
1519
1520         set_intercept(svm, INTERCEPT_INTR);
1521         set_intercept(svm, INTERCEPT_NMI);
1522         set_intercept(svm, INTERCEPT_SMI);
1523         set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1524         set_intercept(svm, INTERCEPT_RDPMC);
1525         set_intercept(svm, INTERCEPT_CPUID);
1526         set_intercept(svm, INTERCEPT_INVD);
1527         set_intercept(svm, INTERCEPT_INVLPG);
1528         set_intercept(svm, INTERCEPT_INVLPGA);
1529         set_intercept(svm, INTERCEPT_IOIO_PROT);
1530         set_intercept(svm, INTERCEPT_MSR_PROT);
1531         set_intercept(svm, INTERCEPT_TASK_SWITCH);
1532         set_intercept(svm, INTERCEPT_SHUTDOWN);
1533         set_intercept(svm, INTERCEPT_VMRUN);
1534         set_intercept(svm, INTERCEPT_VMMCALL);
1535         set_intercept(svm, INTERCEPT_VMLOAD);
1536         set_intercept(svm, INTERCEPT_VMSAVE);
1537         set_intercept(svm, INTERCEPT_STGI);
1538         set_intercept(svm, INTERCEPT_CLGI);
1539         set_intercept(svm, INTERCEPT_SKINIT);
1540         set_intercept(svm, INTERCEPT_WBINVD);
1541         set_intercept(svm, INTERCEPT_XSETBV);
1542         set_intercept(svm, INTERCEPT_RSM);
1543
1544         if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1545                 set_intercept(svm, INTERCEPT_MONITOR);
1546                 set_intercept(svm, INTERCEPT_MWAIT);
1547         }
1548
1549         if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1550                 set_intercept(svm, INTERCEPT_HLT);
1551
1552         control->iopm_base_pa = __sme_set(iopm_base);
1553         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1554         control->int_ctl = V_INTR_MASKING_MASK;
1555
1556         init_seg(&save->es);
1557         init_seg(&save->ss);
1558         init_seg(&save->ds);
1559         init_seg(&save->fs);
1560         init_seg(&save->gs);
1561
1562         save->cs.selector = 0xf000;
1563         save->cs.base = 0xffff0000;
1564         /* Executable/Readable Code Segment */
1565         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1566                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1567         save->cs.limit = 0xffff;
1568
1569         save->gdtr.limit = 0xffff;
1570         save->idtr.limit = 0xffff;
1571
1572         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1573         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1574
1575         svm_set_efer(&svm->vcpu, 0);
1576         save->dr6 = 0xffff0ff0;
1577         kvm_set_rflags(&svm->vcpu, 2);
1578         save->rip = 0x0000fff0;
1579         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1580
1581         /*
1582          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1583          * It also updates the guest-visible cr0 value.
1584          */
1585         svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1586         kvm_mmu_reset_context(&svm->vcpu);
1587
1588         save->cr4 = X86_CR4_PAE;
1589         /* rdx = ?? */
1590
1591         if (npt_enabled) {
1592                 /* Setup VMCB for Nested Paging */
1593                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1594                 clr_intercept(svm, INTERCEPT_INVLPG);
1595                 clr_exception_intercept(svm, PF_VECTOR);
1596                 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1597                 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1598                 save->g_pat = svm->vcpu.arch.pat;
1599                 save->cr3 = 0;
1600                 save->cr4 = 0;
1601         }
1602         svm->asid_generation = 0;
1603
1604         svm->nested.vmcb = 0;
1605         svm->vcpu.arch.hflags = 0;
1606
1607         if (pause_filter_count) {
1608                 control->pause_filter_count = pause_filter_count;
1609                 if (pause_filter_thresh)
1610                         control->pause_filter_thresh = pause_filter_thresh;
1611                 set_intercept(svm, INTERCEPT_PAUSE);
1612         } else {
1613                 clr_intercept(svm, INTERCEPT_PAUSE);
1614         }
1615
1616         if (kvm_vcpu_apicv_active(&svm->vcpu))
1617                 avic_init_vmcb(svm);
1618
1619         /*
1620          * If hardware supports Virtual VMLOAD VMSAVE then enable it
1621          * in VMCB and clear intercepts to avoid #VMEXIT.
1622          */
1623         if (vls) {
1624                 clr_intercept(svm, INTERCEPT_VMLOAD);
1625                 clr_intercept(svm, INTERCEPT_VMSAVE);
1626                 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1627         }
1628
1629         if (vgif) {
1630                 clr_intercept(svm, INTERCEPT_STGI);
1631                 clr_intercept(svm, INTERCEPT_CLGI);
1632                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1633         }
1634
1635         if (sev_guest(svm->vcpu.kvm)) {
1636                 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1637                 clr_exception_intercept(svm, UD_VECTOR);
1638         }
1639
1640         mark_all_dirty(svm->vmcb);
1641
1642         enable_gif(svm);
1643
1644 }
1645
1646 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1647                                        unsigned int index)
1648 {
1649         u64 *avic_physical_id_table;
1650         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
1651
1652         if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1653                 return NULL;
1654
1655         avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
1656
1657         return &avic_physical_id_table[index];
1658 }
1659
1660 /**
1661  * Note:
1662  * AVIC hardware walks the nested page table to check permissions,
1663  * but does not use the SPA address specified in the leaf page
1664  * table entry since it uses  address in the AVIC_BACKING_PAGE pointer
1665  * field of the VMCB. Therefore, we set up the
1666  * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1667  */
1668 static int avic_init_access_page(struct kvm_vcpu *vcpu)
1669 {
1670         struct kvm *kvm = vcpu->kvm;
1671         int ret = 0;
1672
1673         mutex_lock(&kvm->slots_lock);
1674         if (kvm->arch.apic_access_page_done)
1675                 goto out;
1676
1677         ret = __x86_set_memory_region(kvm,
1678                                       APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1679                                       APIC_DEFAULT_PHYS_BASE,
1680                                       PAGE_SIZE);
1681         if (ret)
1682                 goto out;
1683
1684         kvm->arch.apic_access_page_done = true;
1685 out:
1686         mutex_unlock(&kvm->slots_lock);
1687         return ret;
1688 }
1689
1690 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1691 {
1692         int ret;
1693         u64 *entry, new_entry;
1694         int id = vcpu->vcpu_id;
1695         struct vcpu_svm *svm = to_svm(vcpu);
1696
1697         ret = avic_init_access_page(vcpu);
1698         if (ret)
1699                 return ret;
1700
1701         if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1702                 return -EINVAL;
1703
1704         if (!svm->vcpu.arch.apic->regs)
1705                 return -EINVAL;
1706
1707         svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1708
1709         /* Setting AVIC backing page address in the phy APIC ID table */
1710         entry = avic_get_physical_id_entry(vcpu, id);
1711         if (!entry)
1712                 return -EINVAL;
1713
1714         new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1715                               AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1716                               AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
1717         WRITE_ONCE(*entry, new_entry);
1718
1719         svm->avic_physical_id_cache = entry;
1720
1721         return 0;
1722 }
1723
1724 static void __sev_asid_free(int asid)
1725 {
1726         struct svm_cpu_data *sd;
1727         int cpu, pos;
1728
1729         pos = asid - 1;
1730         clear_bit(pos, sev_asid_bitmap);
1731
1732         for_each_possible_cpu(cpu) {
1733                 sd = per_cpu(svm_data, cpu);
1734                 sd->sev_vmcbs[pos] = NULL;
1735         }
1736 }
1737
1738 static void sev_asid_free(struct kvm *kvm)
1739 {
1740         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1741
1742         __sev_asid_free(sev->asid);
1743 }
1744
1745 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1746 {
1747         struct sev_data_decommission *decommission;
1748         struct sev_data_deactivate *data;
1749
1750         if (!handle)
1751                 return;
1752
1753         data = kzalloc(sizeof(*data), GFP_KERNEL);
1754         if (!data)
1755                 return;
1756
1757         /* deactivate handle */
1758         data->handle = handle;
1759         sev_guest_deactivate(data, NULL);
1760
1761         wbinvd_on_all_cpus();
1762         sev_guest_df_flush(NULL);
1763         kfree(data);
1764
1765         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1766         if (!decommission)
1767                 return;
1768
1769         /* decommission handle */
1770         decommission->handle = handle;
1771         sev_guest_decommission(decommission, NULL);
1772
1773         kfree(decommission);
1774 }
1775
1776 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1777                                     unsigned long ulen, unsigned long *n,
1778                                     int write)
1779 {
1780         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1781         unsigned long npages, npinned, size;
1782         unsigned long locked, lock_limit;
1783         struct page **pages;
1784         unsigned long first, last;
1785
1786         if (ulen == 0 || uaddr + ulen < uaddr)
1787                 return NULL;
1788
1789         /* Calculate number of pages. */
1790         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1791         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1792         npages = (last - first + 1);
1793
1794         locked = sev->pages_locked + npages;
1795         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1796         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1797                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1798                 return NULL;
1799         }
1800
1801         /* Avoid using vmalloc for smaller buffers. */
1802         size = npages * sizeof(struct page *);
1803         if (size > PAGE_SIZE)
1804                 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1805                                   PAGE_KERNEL);
1806         else
1807                 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
1808
1809         if (!pages)
1810                 return NULL;
1811
1812         /* Pin the user virtual address. */
1813         npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
1814         if (npinned != npages) {
1815                 pr_err("SEV: Failure locking %lu pages.\n", npages);
1816                 goto err;
1817         }
1818
1819         *n = npages;
1820         sev->pages_locked = locked;
1821
1822         return pages;
1823
1824 err:
1825         if (npinned > 0)
1826                 release_pages(pages, npinned);
1827
1828         kvfree(pages);
1829         return NULL;
1830 }
1831
1832 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1833                              unsigned long npages)
1834 {
1835         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1836
1837         release_pages(pages, npages);
1838         kvfree(pages);
1839         sev->pages_locked -= npages;
1840 }
1841
1842 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1843 {
1844         uint8_t *page_virtual;
1845         unsigned long i;
1846
1847         if (npages == 0 || pages == NULL)
1848                 return;
1849
1850         for (i = 0; i < npages; i++) {
1851                 page_virtual = kmap_atomic(pages[i]);
1852                 clflush_cache_range(page_virtual, PAGE_SIZE);
1853                 kunmap_atomic(page_virtual);
1854         }
1855 }
1856
1857 static void __unregister_enc_region_locked(struct kvm *kvm,
1858                                            struct enc_region *region)
1859 {
1860         /*
1861          * The guest may change the memory encryption attribute from C=0 -> C=1
1862          * or vice versa for this memory range. Lets make sure caches are
1863          * flushed to ensure that guest data gets written into memory with
1864          * correct C-bit.
1865          */
1866         sev_clflush_pages(region->pages, region->npages);
1867
1868         sev_unpin_memory(kvm, region->pages, region->npages);
1869         list_del(&region->list);
1870         kfree(region);
1871 }
1872
1873 static struct kvm *svm_vm_alloc(void)
1874 {
1875         struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm),
1876                                             GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1877                                             PAGE_KERNEL);
1878         return &kvm_svm->kvm;
1879 }
1880
1881 static void svm_vm_free(struct kvm *kvm)
1882 {
1883         vfree(to_kvm_svm(kvm));
1884 }
1885
1886 static void sev_vm_destroy(struct kvm *kvm)
1887 {
1888         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1889         struct list_head *head = &sev->regions_list;
1890         struct list_head *pos, *q;
1891
1892         if (!sev_guest(kvm))
1893                 return;
1894
1895         mutex_lock(&kvm->lock);
1896
1897         /*
1898          * if userspace was terminated before unregistering the memory regions
1899          * then lets unpin all the registered memory.
1900          */
1901         if (!list_empty(head)) {
1902                 list_for_each_safe(pos, q, head) {
1903                         __unregister_enc_region_locked(kvm,
1904                                 list_entry(pos, struct enc_region, list));
1905                 }
1906         }
1907
1908         mutex_unlock(&kvm->lock);
1909
1910         sev_unbind_asid(kvm, sev->handle);
1911         sev_asid_free(kvm);
1912 }
1913
1914 static void avic_vm_destroy(struct kvm *kvm)
1915 {
1916         unsigned long flags;
1917         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1918
1919         if (!avic)
1920                 return;
1921
1922         if (kvm_svm->avic_logical_id_table_page)
1923                 __free_page(kvm_svm->avic_logical_id_table_page);
1924         if (kvm_svm->avic_physical_id_table_page)
1925                 __free_page(kvm_svm->avic_physical_id_table_page);
1926
1927         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1928         hash_del(&kvm_svm->hnode);
1929         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1930 }
1931
1932 static void svm_vm_destroy(struct kvm *kvm)
1933 {
1934         avic_vm_destroy(kvm);
1935         sev_vm_destroy(kvm);
1936 }
1937
1938 static int avic_vm_init(struct kvm *kvm)
1939 {
1940         unsigned long flags;
1941         int err = -ENOMEM;
1942         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1943         struct kvm_svm *k2;
1944         struct page *p_page;
1945         struct page *l_page;
1946         u32 vm_id;
1947
1948         if (!avic)
1949                 return 0;
1950
1951         /* Allocating physical APIC ID table (4KB) */
1952         p_page = alloc_page(GFP_KERNEL_ACCOUNT);
1953         if (!p_page)
1954                 goto free_avic;
1955
1956         kvm_svm->avic_physical_id_table_page = p_page;
1957         clear_page(page_address(p_page));
1958
1959         /* Allocating logical APIC ID table (4KB) */
1960         l_page = alloc_page(GFP_KERNEL_ACCOUNT);
1961         if (!l_page)
1962                 goto free_avic;
1963
1964         kvm_svm->avic_logical_id_table_page = l_page;
1965         clear_page(page_address(l_page));
1966
1967         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1968  again:
1969         vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1970         if (vm_id == 0) { /* id is 1-based, zero is not okay */
1971                 next_vm_id_wrapped = 1;
1972                 goto again;
1973         }
1974         /* Is it still in use? Only possible if wrapped at least once */
1975         if (next_vm_id_wrapped) {
1976                 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
1977                         if (k2->avic_vm_id == vm_id)
1978                                 goto again;
1979                 }
1980         }
1981         kvm_svm->avic_vm_id = vm_id;
1982         hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
1983         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1984
1985         return 0;
1986
1987 free_avic:
1988         avic_vm_destroy(kvm);
1989         return err;
1990 }
1991
1992 static inline int
1993 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1994 {
1995         int ret = 0;
1996         unsigned long flags;
1997         struct amd_svm_iommu_ir *ir;
1998         struct vcpu_svm *svm = to_svm(vcpu);
1999
2000         if (!kvm_arch_has_assigned_device(vcpu->kvm))
2001                 return 0;
2002
2003         /*
2004          * Here, we go through the per-vcpu ir_list to update all existing
2005          * interrupt remapping table entry targeting this vcpu.
2006          */
2007         spin_lock_irqsave(&svm->ir_list_lock, flags);
2008
2009         if (list_empty(&svm->ir_list))
2010                 goto out;
2011
2012         list_for_each_entry(ir, &svm->ir_list, node) {
2013                 ret = amd_iommu_update_ga(cpu, r, ir->data);
2014                 if (ret)
2015                         break;
2016         }
2017 out:
2018         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
2019         return ret;
2020 }
2021
2022 static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2023 {
2024         u64 entry;
2025         /* ID = 0xff (broadcast), ID > 0xff (reserved) */
2026         int h_physical_id = kvm_cpu_get_apicid(cpu);
2027         struct vcpu_svm *svm = to_svm(vcpu);
2028
2029         if (!kvm_vcpu_apicv_active(vcpu))
2030                 return;
2031
2032         /*
2033          * Since the host physical APIC id is 8 bits,
2034          * we can support host APIC ID upto 255.
2035          */
2036         if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
2037                 return;
2038
2039         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2040         WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
2041
2042         entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
2043         entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
2044
2045         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2046         if (svm->avic_is_running)
2047                 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2048
2049         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2050         avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
2051                                         svm->avic_is_running);
2052 }
2053
2054 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
2055 {
2056         u64 entry;
2057         struct vcpu_svm *svm = to_svm(vcpu);
2058
2059         if (!kvm_vcpu_apicv_active(vcpu))
2060                 return;
2061
2062         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2063         if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
2064                 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
2065
2066         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2067         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2068 }
2069
2070 /**
2071  * This function is called during VCPU halt/unhalt.
2072  */
2073 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
2074 {
2075         struct vcpu_svm *svm = to_svm(vcpu);
2076
2077         svm->avic_is_running = is_run;
2078         if (is_run)
2079                 avic_vcpu_load(vcpu, vcpu->cpu);
2080         else
2081                 avic_vcpu_put(vcpu);
2082 }
2083
2084 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2085 {
2086         struct vcpu_svm *svm = to_svm(vcpu);
2087         u32 dummy;
2088         u32 eax = 1;
2089
2090         vcpu->arch.microcode_version = 0x01000065;
2091         svm->spec_ctrl = 0;
2092         svm->virt_spec_ctrl = 0;
2093
2094         if (!init_event) {
2095                 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
2096                                            MSR_IA32_APICBASE_ENABLE;
2097                 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
2098                         svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
2099         }
2100         init_vmcb(svm);
2101
2102         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
2103         kvm_rdx_write(vcpu, eax);
2104
2105         if (kvm_vcpu_apicv_active(vcpu) && !init_event)
2106                 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
2107 }
2108
2109 static int avic_init_vcpu(struct vcpu_svm *svm)
2110 {
2111         int ret;
2112
2113         if (!kvm_vcpu_apicv_active(&svm->vcpu))
2114                 return 0;
2115
2116         ret = avic_init_backing_page(&svm->vcpu);
2117         if (ret)
2118                 return ret;
2119
2120         INIT_LIST_HEAD(&svm->ir_list);
2121         spin_lock_init(&svm->ir_list_lock);
2122         svm->dfr_reg = APIC_DFR_FLAT;
2123
2124         return ret;
2125 }
2126
2127 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
2128 {
2129         struct vcpu_svm *svm;
2130         struct page *page;
2131         struct page *msrpm_pages;
2132         struct page *hsave_page;
2133         struct page *nested_msrpm_pages;
2134         int err;
2135
2136         BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
2137                 "struct kvm_vcpu must be at offset 0 for arch usercopy region");
2138
2139         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
2140         if (!svm) {
2141                 err = -ENOMEM;
2142                 goto out;
2143         }
2144
2145         svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
2146                                                      GFP_KERNEL_ACCOUNT);
2147         if (!svm->vcpu.arch.user_fpu) {
2148                 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
2149                 err = -ENOMEM;
2150                 goto free_partial_svm;
2151         }
2152
2153         svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
2154                                                      GFP_KERNEL_ACCOUNT);
2155         if (!svm->vcpu.arch.guest_fpu) {
2156                 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
2157                 err = -ENOMEM;
2158                 goto free_user_fpu;
2159         }
2160
2161         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
2162         if (err)
2163                 goto free_svm;
2164
2165         err = -ENOMEM;
2166         page = alloc_page(GFP_KERNEL_ACCOUNT);
2167         if (!page)
2168                 goto uninit;
2169
2170         msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
2171         if (!msrpm_pages)
2172                 goto free_page1;
2173
2174         nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
2175         if (!nested_msrpm_pages)
2176                 goto free_page2;
2177
2178         hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
2179         if (!hsave_page)
2180                 goto free_page3;
2181
2182         err = avic_init_vcpu(svm);
2183         if (err)
2184                 goto free_page4;
2185
2186         /* We initialize this flag to true to make sure that the is_running
2187          * bit would be set the first time the vcpu is loaded.
2188          */
2189         svm->avic_is_running = true;
2190
2191         svm->nested.hsave = page_address(hsave_page);
2192
2193         svm->msrpm = page_address(msrpm_pages);
2194         svm_vcpu_init_msrpm(svm->msrpm);
2195
2196         svm->nested.msrpm = page_address(nested_msrpm_pages);
2197         svm_vcpu_init_msrpm(svm->nested.msrpm);
2198
2199         svm->vmcb = page_address(page);
2200         clear_page(svm->vmcb);
2201         svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
2202         svm->asid_generation = 0;
2203         init_vmcb(svm);
2204
2205         svm_init_osvw(&svm->vcpu);
2206
2207         return &svm->vcpu;
2208
2209 free_page4:
2210         __free_page(hsave_page);
2211 free_page3:
2212         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
2213 free_page2:
2214         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
2215 free_page1:
2216         __free_page(page);
2217 uninit:
2218         kvm_vcpu_uninit(&svm->vcpu);
2219 free_svm:
2220         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2221 free_user_fpu:
2222         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2223 free_partial_svm:
2224         kmem_cache_free(kvm_vcpu_cache, svm);
2225 out:
2226         return ERR_PTR(err);
2227 }
2228
2229 static void svm_clear_current_vmcb(struct vmcb *vmcb)
2230 {
2231         int i;
2232
2233         for_each_online_cpu(i)
2234                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
2235 }
2236
2237 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2238 {
2239         struct vcpu_svm *svm = to_svm(vcpu);
2240
2241         /*
2242          * The vmcb page can be recycled, causing a false negative in
2243          * svm_vcpu_load(). So, ensure that no logical CPU has this
2244          * vmcb page recorded as its current vmcb.
2245          */
2246         svm_clear_current_vmcb(svm->vmcb);
2247
2248         __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
2249         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
2250         __free_page(virt_to_page(svm->nested.hsave));
2251         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2252         kvm_vcpu_uninit(vcpu);
2253         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2254         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2255         kmem_cache_free(kvm_vcpu_cache, svm);
2256 }
2257
2258 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2259 {
2260         struct vcpu_svm *svm = to_svm(vcpu);
2261         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2262         int i;
2263
2264         if (unlikely(cpu != vcpu->cpu)) {
2265                 svm->asid_generation = 0;
2266                 mark_all_dirty(svm->vmcb);
2267         }
2268
2269 #ifdef CONFIG_X86_64
2270         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2271 #endif
2272         savesegment(fs, svm->host.fs);
2273         savesegment(gs, svm->host.gs);
2274         svm->host.ldt = kvm_read_ldt();
2275
2276         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2277                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2278
2279         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2280                 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2281                 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2282                         __this_cpu_write(current_tsc_ratio, tsc_ratio);
2283                         wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2284                 }
2285         }
2286         /* This assumes that the kernel never uses MSR_TSC_AUX */
2287         if (static_cpu_has(X86_FEATURE_RDTSCP))
2288                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2289
2290         if (sd->current_vmcb != svm->vmcb) {
2291                 sd->current_vmcb = svm->vmcb;
2292                 indirect_branch_prediction_barrier();
2293         }
2294         avic_vcpu_load(vcpu, cpu);
2295 }
2296
2297 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2298 {
2299         struct vcpu_svm *svm = to_svm(vcpu);
2300         int i;
2301
2302         avic_vcpu_put(vcpu);
2303
2304         ++vcpu->stat.host_state_reload;
2305         kvm_load_ldt(svm->host.ldt);
2306 #ifdef CONFIG_X86_64
2307         loadsegment(fs, svm->host.fs);
2308         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
2309         load_gs_index(svm->host.gs);
2310 #else
2311 #ifdef CONFIG_X86_32_LAZY_GS
2312         loadsegment(gs, svm->host.gs);
2313 #endif
2314 #endif
2315         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2316                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2317 }
2318
2319 static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2320 {
2321         avic_set_running(vcpu, false);
2322 }
2323
2324 static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2325 {
2326         avic_set_running(vcpu, true);
2327 }
2328
2329 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2330 {
2331         struct vcpu_svm *svm = to_svm(vcpu);
2332         unsigned long rflags = svm->vmcb->save.rflags;
2333
2334         if (svm->nmi_singlestep) {
2335                 /* Hide our flags if they were not set by the guest */
2336                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2337                         rflags &= ~X86_EFLAGS_TF;
2338                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2339                         rflags &= ~X86_EFLAGS_RF;
2340         }
2341         return rflags;
2342 }
2343
2344 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2345 {
2346         if (to_svm(vcpu)->nmi_singlestep)
2347                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2348
2349        /*
2350         * Any change of EFLAGS.VM is accompanied by a reload of SS
2351         * (caused by either a task switch or an inter-privilege IRET),
2352         * so we do not need to update the CPL here.
2353         */
2354         to_svm(vcpu)->vmcb->save.rflags = rflags;
2355 }
2356
2357 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2358 {
2359         switch (reg) {
2360         case VCPU_EXREG_PDPTR:
2361                 BUG_ON(!npt_enabled);
2362                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
2363                 break;
2364         default:
2365                 BUG();
2366         }
2367 }
2368
2369 static void svm_set_vintr(struct vcpu_svm *svm)
2370 {
2371         set_intercept(svm, INTERCEPT_VINTR);
2372 }
2373
2374 static void svm_clear_vintr(struct vcpu_svm *svm)
2375 {
2376         clr_intercept(svm, INTERCEPT_VINTR);
2377 }
2378
2379 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2380 {
2381         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2382
2383         switch (seg) {
2384         case VCPU_SREG_CS: return &save->cs;
2385         case VCPU_SREG_DS: return &save->ds;
2386         case VCPU_SREG_ES: return &save->es;
2387         case VCPU_SREG_FS: return &save->fs;
2388         case VCPU_SREG_GS: return &save->gs;
2389         case VCPU_SREG_SS: return &save->ss;
2390         case VCPU_SREG_TR: return &save->tr;
2391         case VCPU_SREG_LDTR: return &save->ldtr;
2392         }
2393         BUG();
2394         return NULL;
2395 }
2396
2397 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2398 {
2399         struct vmcb_seg *s = svm_seg(vcpu, seg);
2400
2401         return s->base;
2402 }
2403
2404 static void svm_get_segment(struct kvm_vcpu *vcpu,
2405                             struct kvm_segment *var, int seg)
2406 {
2407         struct vmcb_seg *s = svm_seg(vcpu, seg);
2408
2409         var->base = s->base;
2410         var->limit = s->limit;
2411         var->selector = s->selector;
2412         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2413         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2414         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2415         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2416         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2417         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2418         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
2419
2420         /*
2421          * AMD CPUs circa 2014 track the G bit for all segments except CS.
2422          * However, the SVM spec states that the G bit is not observed by the
2423          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2424          * So let's synthesize a legal G bit for all segments, this helps
2425          * running KVM nested. It also helps cross-vendor migration, because
2426          * Intel's vmentry has a check on the 'G' bit.
2427          */
2428         var->g = s->limit > 0xfffff;
2429
2430         /*
2431          * AMD's VMCB does not have an explicit unusable field, so emulate it
2432          * for cross vendor migration purposes by "not present"
2433          */
2434         var->unusable = !var->present;
2435
2436         switch (seg) {
2437         case VCPU_SREG_TR:
2438                 /*
2439                  * Work around a bug where the busy flag in the tr selector
2440                  * isn't exposed
2441                  */
2442                 var->type |= 0x2;
2443                 break;
2444         case VCPU_SREG_DS:
2445         case VCPU_SREG_ES:
2446         case VCPU_SREG_FS:
2447         case VCPU_SREG_GS:
2448                 /*
2449                  * The accessed bit must always be set in the segment
2450                  * descriptor cache, although it can be cleared in the
2451                  * descriptor, the cached bit always remains at 1. Since
2452                  * Intel has a check on this, set it here to support
2453                  * cross-vendor migration.
2454                  */
2455                 if (!var->unusable)
2456                         var->type |= 0x1;
2457                 break;
2458         case VCPU_SREG_SS:
2459                 /*
2460                  * On AMD CPUs sometimes the DB bit in the segment
2461                  * descriptor is left as 1, although the whole segment has
2462                  * been made unusable. Clear it here to pass an Intel VMX
2463                  * entry check when cross vendor migrating.
2464                  */
2465                 if (var->unusable)
2466                         var->db = 0;
2467                 /* This is symmetric with svm_set_segment() */
2468                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
2469                 break;
2470         }
2471 }
2472
2473 static int svm_get_cpl(struct kvm_vcpu *vcpu)
2474 {
2475         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2476
2477         return save->cpl;
2478 }
2479
2480 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2481 {
2482         struct vcpu_svm *svm = to_svm(vcpu);
2483
2484         dt->size = svm->vmcb->save.idtr.limit;
2485         dt->address = svm->vmcb->save.idtr.base;
2486 }
2487
2488 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2489 {
2490         struct vcpu_svm *svm = to_svm(vcpu);
2491
2492         svm->vmcb->save.idtr.limit = dt->size;
2493         svm->vmcb->save.idtr.base = dt->address ;
2494         mark_dirty(svm->vmcb, VMCB_DT);
2495 }
2496
2497 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2498 {
2499         struct vcpu_svm *svm = to_svm(vcpu);
2500
2501         dt->size = svm->vmcb->save.gdtr.limit;
2502         dt->address = svm->vmcb->save.gdtr.base;
2503 }
2504
2505 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2506 {
2507         struct vcpu_svm *svm = to_svm(vcpu);
2508
2509         svm->vmcb->save.gdtr.limit = dt->size;
2510         svm->vmcb->save.gdtr.base = dt->address ;
2511         mark_dirty(svm->vmcb, VMCB_DT);
2512 }
2513
2514 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2515 {
2516 }
2517
2518 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2519 {
2520 }
2521
2522 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
2523 {
2524 }
2525
2526 static void update_cr0_intercept(struct vcpu_svm *svm)
2527 {
2528         ulong gcr0 = svm->vcpu.arch.cr0;
2529         u64 *hcr0 = &svm->vmcb->save.cr0;
2530
2531         *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2532                 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
2533
2534         mark_dirty(svm->vmcb, VMCB_CR);
2535
2536         if (gcr0 == *hcr0) {
2537                 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2538                 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2539         } else {
2540                 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2541                 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2542         }
2543 }
2544
2545 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2546 {
2547         struct vcpu_svm *svm = to_svm(vcpu);
2548
2549 #ifdef CONFIG_X86_64
2550         if (vcpu->arch.efer & EFER_LME) {
2551                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
2552                         vcpu->arch.efer |= EFER_LMA;
2553                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
2554                 }
2555
2556                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
2557                         vcpu->arch.efer &= ~EFER_LMA;
2558                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
2559                 }
2560         }
2561 #endif
2562         vcpu->arch.cr0 = cr0;
2563
2564         if (!npt_enabled)
2565                 cr0 |= X86_CR0_PG | X86_CR0_WP;
2566
2567         /*
2568          * re-enable caching here because the QEMU bios
2569          * does not do it - this results in some delay at
2570          * reboot
2571          */
2572         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2573                 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
2574         svm->vmcb->save.cr0 = cr0;
2575         mark_dirty(svm->vmcb, VMCB_CR);
2576         update_cr0_intercept(svm);
2577 }
2578
2579 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2580 {
2581         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
2582         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2583
2584         if (cr4 & X86_CR4_VMXE)
2585                 return 1;
2586
2587         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
2588                 svm_flush_tlb(vcpu, true);
2589
2590         vcpu->arch.cr4 = cr4;
2591         if (!npt_enabled)
2592                 cr4 |= X86_CR4_PAE;
2593         cr4 |= host_cr4_mce;
2594         to_svm(vcpu)->vmcb->save.cr4 = cr4;
2595         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
2596         return 0;
2597 }
2598
2599 static void svm_set_segment(struct kvm_vcpu *vcpu,
2600                             struct kvm_segment *var, int seg)
2601 {
2602         struct vcpu_svm *svm = to_svm(vcpu);
2603         struct vmcb_seg *s = svm_seg(vcpu, seg);
2604
2605         s->base = var->base;
2606         s->limit = var->limit;
2607         s->selector = var->selector;
2608         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2609         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2610         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2611         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2612         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2613         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2614         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2615         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
2616
2617         /*
2618          * This is always accurate, except if SYSRET returned to a segment
2619          * with SS.DPL != 3.  Intel does not have this quirk, and always
2620          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2621          * would entail passing the CPL to userspace and back.
2622          */
2623         if (seg == VCPU_SREG_SS)
2624                 /* This is symmetric with svm_get_segment() */
2625                 svm->vmcb->save.cpl = (var->dpl & 3);
2626
2627         mark_dirty(svm->vmcb, VMCB_SEG);
2628 }
2629
2630 static void update_bp_intercept(struct kvm_vcpu *vcpu)
2631 {
2632         struct vcpu_svm *svm = to_svm(vcpu);
2633
2634         clr_exception_intercept(svm, BP_VECTOR);
2635
2636         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
2637                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2638                         set_exception_intercept(svm, BP_VECTOR);
2639         } else
2640                 vcpu->guest_debug = 0;
2641 }
2642
2643 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
2644 {
2645         if (sd->next_asid > sd->max_asid) {
2646                 ++sd->asid_generation;
2647                 sd->next_asid = sd->min_asid;
2648                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
2649         }
2650
2651         svm->asid_generation = sd->asid_generation;
2652         svm->vmcb->control.asid = sd->next_asid++;
2653
2654         mark_dirty(svm->vmcb, VMCB_ASID);
2655 }
2656
2657 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2658 {
2659         return to_svm(vcpu)->vmcb->save.dr6;
2660 }
2661
2662 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2663 {
2664         struct vcpu_svm *svm = to_svm(vcpu);
2665
2666         svm->vmcb->save.dr6 = value;
2667         mark_dirty(svm->vmcb, VMCB_DR);
2668 }
2669
2670 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2671 {
2672         struct vcpu_svm *svm = to_svm(vcpu);
2673
2674         get_debugreg(vcpu->arch.db[0], 0);
2675         get_debugreg(vcpu->arch.db[1], 1);
2676         get_debugreg(vcpu->arch.db[2], 2);
2677         get_debugreg(vcpu->arch.db[3], 3);
2678         vcpu->arch.dr6 = svm_get_dr6(vcpu);
2679         vcpu->arch.dr7 = svm->vmcb->save.dr7;
2680
2681         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2682         set_dr_intercepts(svm);
2683 }
2684
2685 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2686 {
2687         struct vcpu_svm *svm = to_svm(vcpu);
2688
2689         svm->vmcb->save.dr7 = value;
2690         mark_dirty(svm->vmcb, VMCB_DR);
2691 }
2692
2693 static int pf_interception(struct vcpu_svm *svm)
2694 {
2695         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2696         u64 error_code = svm->vmcb->control.exit_info_1;
2697
2698         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
2699                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2700                         svm->vmcb->control.insn_bytes : NULL,
2701                         svm->vmcb->control.insn_len);
2702 }
2703
2704 static int npf_interception(struct vcpu_svm *svm)
2705 {
2706         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2707         u64 error_code = svm->vmcb->control.exit_info_1;
2708
2709         trace_kvm_page_fault(fault_address, error_code);
2710         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2711                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2712                         svm->vmcb->control.insn_bytes : NULL,
2713                         svm->vmcb->control.insn_len);
2714 }
2715
2716 static int db_interception(struct vcpu_svm *svm)
2717 {
2718         struct kvm_run *kvm_run = svm->vcpu.run;
2719         struct kvm_vcpu *vcpu = &svm->vcpu;
2720
2721         if (!(svm->vcpu.guest_debug &
2722               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2723                 !svm->nmi_singlestep) {
2724                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2725                 return 1;
2726         }
2727
2728         if (svm->nmi_singlestep) {
2729                 disable_nmi_singlestep(svm);
2730                 /* Make sure we check for pending NMIs upon entry */
2731                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2732         }
2733
2734         if (svm->vcpu.guest_debug &
2735             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2736                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2737                 kvm_run->debug.arch.pc =
2738                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2739                 kvm_run->debug.arch.exception = DB_VECTOR;
2740                 return 0;
2741         }
2742
2743         return 1;
2744 }
2745
2746 static int bp_interception(struct vcpu_svm *svm)
2747 {
2748         struct kvm_run *kvm_run = svm->vcpu.run;
2749
2750         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2751         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2752         kvm_run->debug.arch.exception = BP_VECTOR;
2753         return 0;
2754 }
2755
2756 static int ud_interception(struct vcpu_svm *svm)
2757 {
2758         return handle_ud(&svm->vcpu);
2759 }
2760
2761 static int ac_interception(struct vcpu_svm *svm)
2762 {
2763         kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2764         return 1;
2765 }
2766
2767 static int gp_interception(struct vcpu_svm *svm)
2768 {
2769         struct kvm_vcpu *vcpu = &svm->vcpu;
2770         u32 error_code = svm->vmcb->control.exit_info_1;
2771         int er;
2772
2773         WARN_ON_ONCE(!enable_vmware_backdoor);
2774
2775         er = kvm_emulate_instruction(vcpu,
2776                 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
2777         if (er == EMULATE_USER_EXIT)
2778                 return 0;
2779         else if (er != EMULATE_DONE)
2780                 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2781         return 1;
2782 }
2783
2784 static bool is_erratum_383(void)
2785 {
2786         int err, i;
2787         u64 value;
2788
2789         if (!erratum_383_found)
2790                 return false;
2791
2792         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2793         if (err)
2794                 return false;
2795
2796         /* Bit 62 may or may not be set for this mce */
2797         value &= ~(1ULL << 62);
2798
2799         if (value != 0xb600000000010015ULL)
2800                 return false;
2801
2802         /* Clear MCi_STATUS registers */
2803         for (i = 0; i < 6; ++i)
2804                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2805
2806         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2807         if (!err) {
2808                 u32 low, high;
2809
2810                 value &= ~(1ULL << 2);
2811                 low    = lower_32_bits(value);
2812                 high   = upper_32_bits(value);
2813
2814                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2815         }
2816
2817         /* Flush tlb to evict multi-match entries */
2818         __flush_tlb_all();
2819
2820         return true;
2821 }
2822
2823 static void svm_handle_mce(struct vcpu_svm *svm)
2824 {
2825         if (is_erratum_383()) {
2826                 /*
2827                  * Erratum 383 triggered. Guest state is corrupt so kill the
2828                  * guest.
2829                  */
2830                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2831
2832                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
2833
2834                 return;
2835         }
2836
2837         /*
2838          * On an #MC intercept the MCE handler is not called automatically in
2839          * the host. So do it by hand here.
2840          */
2841         asm volatile (
2842                 "int $0x12\n");
2843         /* not sure if we ever come back to this point */
2844
2845         return;
2846 }
2847
2848 static int mc_interception(struct vcpu_svm *svm)
2849 {
2850         return 1;
2851 }
2852
2853 static int shutdown_interception(struct vcpu_svm *svm)
2854 {
2855         struct kvm_run *kvm_run = svm->vcpu.run;
2856
2857         /*
2858          * VMCB is undefined after a SHUTDOWN intercept
2859          * so reinitialize it.
2860          */
2861         clear_page(svm->vmcb);
2862         init_vmcb(svm);
2863
2864         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2865         return 0;
2866 }
2867
2868 static int io_interception(struct vcpu_svm *svm)
2869 {
2870         struct kvm_vcpu *vcpu = &svm->vcpu;
2871         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2872         int size, in, string;
2873         unsigned port;
2874
2875         ++svm->vcpu.stat.io_exits;
2876         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2877         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2878         if (string)
2879                 return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
2880
2881         port = io_info >> 16;
2882         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2883         svm->next_rip = svm->vmcb->control.exit_info_2;
2884
2885         return kvm_fast_pio(&svm->vcpu, size, port, in);
2886 }
2887
2888 static int nmi_interception(struct vcpu_svm *svm)
2889 {
2890         return 1;
2891 }
2892
2893 static int intr_interception(struct vcpu_svm *svm)
2894 {
2895         ++svm->vcpu.stat.irq_exits;
2896         return 1;
2897 }
2898
2899 static int nop_on_interception(struct vcpu_svm *svm)
2900 {
2901         return 1;
2902 }
2903
2904 static int halt_interception(struct vcpu_svm *svm)
2905 {
2906         return kvm_emulate_halt(&svm->vcpu);
2907 }
2908
2909 static int vmmcall_interception(struct vcpu_svm *svm)
2910 {
2911         return kvm_emulate_hypercall(&svm->vcpu);
2912 }
2913
2914 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2915 {
2916         struct vcpu_svm *svm = to_svm(vcpu);
2917
2918         return svm->nested.nested_cr3;
2919 }
2920
2921 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2922 {
2923         struct vcpu_svm *svm = to_svm(vcpu);
2924         u64 cr3 = svm->nested.nested_cr3;
2925         u64 pdpte;
2926         int ret;
2927
2928         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
2929                                        offset_in_page(cr3) + index * 8, 8);
2930         if (ret)
2931                 return 0;
2932         return pdpte;
2933 }
2934
2935 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2936                                    unsigned long root)
2937 {
2938         struct vcpu_svm *svm = to_svm(vcpu);
2939
2940         svm->vmcb->control.nested_cr3 = __sme_set(root);
2941         mark_dirty(svm->vmcb, VMCB_NPT);
2942 }
2943
2944 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2945                                        struct x86_exception *fault)
2946 {
2947         struct vcpu_svm *svm = to_svm(vcpu);
2948
2949         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2950                 /*
2951                  * TODO: track the cause of the nested page fault, and
2952                  * correctly fill in the high bits of exit_info_1.
2953                  */
2954                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2955                 svm->vmcb->control.exit_code_hi = 0;
2956                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2957                 svm->vmcb->control.exit_info_2 = fault->address;
2958         }
2959
2960         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2961         svm->vmcb->control.exit_info_1 |= fault->error_code;
2962
2963         /*
2964          * The present bit is always zero for page structure faults on real
2965          * hardware.
2966          */
2967         if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2968                 svm->vmcb->control.exit_info_1 &= ~1;
2969
2970         nested_svm_vmexit(svm);
2971 }
2972
2973 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
2974 {
2975         WARN_ON(mmu_is_nested(vcpu));
2976
2977         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
2978         kvm_init_shadow_mmu(vcpu);
2979         vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
2980         vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
2981         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
2982         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
2983         vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
2984         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
2985         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
2986 }
2987
2988 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2989 {
2990         vcpu->arch.mmu = &vcpu->arch.root_mmu;
2991         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
2992 }
2993
2994 static int nested_svm_check_permissions(struct vcpu_svm *svm)
2995 {
2996         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2997             !is_paging(&svm->vcpu)) {
2998                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2999                 return 1;
3000         }
3001
3002         if (svm->vmcb->save.cpl) {
3003                 kvm_inject_gp(&svm->vcpu, 0);
3004                 return 1;
3005         }
3006
3007         return 0;
3008 }
3009
3010 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
3011                                       bool has_error_code, u32 error_code)
3012 {
3013         int vmexit;
3014
3015         if (!is_guest_mode(&svm->vcpu))
3016                 return 0;
3017
3018         vmexit = nested_svm_intercept(svm);
3019         if (vmexit != NESTED_EXIT_DONE)
3020                 return 0;
3021
3022         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
3023         svm->vmcb->control.exit_code_hi = 0;
3024         svm->vmcb->control.exit_info_1 = error_code;
3025
3026         /*
3027          * EXITINFO2 is undefined for all exception intercepts other
3028          * than #PF.
3029          */
3030         if (svm->vcpu.arch.exception.nested_apf)
3031                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
3032         else if (svm->vcpu.arch.exception.has_payload)
3033                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
3034         else
3035                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
3036
3037         svm->nested.exit_required = true;
3038         return vmexit;
3039 }
3040
3041 /* This function returns true if it is save to enable the irq window */
3042 static inline bool nested_svm_intr(struct vcpu_svm *svm)
3043 {
3044         if (!is_guest_mode(&svm->vcpu))
3045                 return true;
3046
3047         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3048                 return true;
3049
3050         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
3051                 return false;
3052
3053         /*
3054          * if vmexit was already requested (by intercepted exception
3055          * for instance) do not overwrite it with "external interrupt"
3056          * vmexit.
3057          */
3058         if (svm->nested.exit_required)
3059                 return false;
3060
3061         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
3062         svm->vmcb->control.exit_info_1 = 0;
3063         svm->vmcb->control.exit_info_2 = 0;
3064
3065         if (svm->nested.intercept & 1ULL) {
3066                 /*
3067                  * The #vmexit can't be emulated here directly because this
3068                  * code path runs with irqs and preemption disabled. A
3069                  * #vmexit emulation might sleep. Only signal request for
3070                  * the #vmexit here.
3071                  */
3072                 svm->nested.exit_required = true;
3073                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
3074                 return false;
3075         }
3076
3077         return true;
3078 }
3079
3080 /* This function returns true if it is save to enable the nmi window */
3081 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
3082 {
3083         if (!is_guest_mode(&svm->vcpu))
3084                 return true;
3085
3086         if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
3087                 return true;
3088
3089         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
3090         svm->nested.exit_required = true;
3091
3092         return false;
3093 }
3094
3095 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
3096 {
3097         unsigned port, size, iopm_len;
3098         u16 val, mask;
3099         u8 start_bit;
3100         u64 gpa;
3101
3102         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
3103                 return NESTED_EXIT_HOST;
3104
3105         port = svm->vmcb->control.exit_info_1 >> 16;
3106         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
3107                 SVM_IOIO_SIZE_SHIFT;
3108         gpa  = svm->nested.vmcb_iopm + (port / 8);
3109         start_bit = port % 8;
3110         iopm_len = (start_bit + size > 8) ? 2 : 1;
3111         mask = (0xf >> (4 - size)) << start_bit;
3112         val = 0;
3113
3114         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
3115                 return NESTED_EXIT_DONE;
3116
3117         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3118 }
3119
3120 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
3121 {
3122         u32 offset, msr, value;
3123         int write, mask;
3124
3125         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3126                 return NESTED_EXIT_HOST;
3127
3128         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3129         offset = svm_msrpm_offset(msr);
3130         write  = svm->vmcb->control.exit_info_1 & 1;
3131         mask   = 1 << ((2 * (msr & 0xf)) + write);
3132
3133         if (offset == MSR_INVALID)
3134                 return NESTED_EXIT_DONE;
3135
3136         /* Offset is in 32 bit units but need in 8 bit units */
3137         offset *= 4;
3138
3139         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
3140                 return NESTED_EXIT_DONE;
3141
3142         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3143 }
3144
3145 /* DB exceptions for our internal use must not cause vmexit */
3146 static int nested_svm_intercept_db(struct vcpu_svm *svm)
3147 {
3148         unsigned long dr6;
3149
3150         /* if we're not singlestepping, it's not ours */
3151         if (!svm->nmi_singlestep)
3152                 return NESTED_EXIT_DONE;
3153
3154         /* if it's not a singlestep exception, it's not ours */
3155         if (kvm_get_dr(&svm->vcpu, 6, &dr6))
3156                 return NESTED_EXIT_DONE;
3157         if (!(dr6 & DR6_BS))
3158                 return NESTED_EXIT_DONE;
3159
3160         /* if the guest is singlestepping, it should get the vmexit */
3161         if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
3162                 disable_nmi_singlestep(svm);
3163                 return NESTED_EXIT_DONE;
3164         }
3165
3166         /* it's ours, the nested hypervisor must not see this one */
3167         return NESTED_EXIT_HOST;
3168 }
3169
3170 static int nested_svm_exit_special(struct vcpu_svm *svm)
3171 {
3172         u32 exit_code = svm->vmcb->control.exit_code;
3173
3174         switch (exit_code) {
3175         case SVM_EXIT_INTR:
3176         case SVM_EXIT_NMI:
3177         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
3178                 return NESTED_EXIT_HOST;
3179         case SVM_EXIT_NPF:
3180                 /* For now we are always handling NPFs when using them */
3181                 if (npt_enabled)
3182                         return NESTED_EXIT_HOST;
3183                 break;
3184         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
3185                 /* When we're shadowing, trap PFs, but not async PF */
3186                 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
3187                         return NESTED_EXIT_HOST;
3188                 break;
3189         default:
3190                 break;
3191         }
3192
3193         return NESTED_EXIT_CONTINUE;
3194 }
3195
3196 /*
3197  * If this function returns true, this #vmexit was already handled
3198  */
3199 static int nested_svm_intercept(struct vcpu_svm *svm)
3200 {
3201         u32 exit_code = svm->vmcb->control.exit_code;
3202         int vmexit = NESTED_EXIT_HOST;
3203
3204         switch (exit_code) {
3205         case SVM_EXIT_MSR:
3206                 vmexit = nested_svm_exit_handled_msr(svm);
3207                 break;
3208         case SVM_EXIT_IOIO:
3209                 vmexit = nested_svm_intercept_ioio(svm);
3210                 break;
3211         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
3212                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
3213                 if (svm->nested.intercept_cr & bit)
3214                         vmexit = NESTED_EXIT_DONE;
3215                 break;
3216         }
3217         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
3218                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
3219                 if (svm->nested.intercept_dr & bit)
3220                         vmexit = NESTED_EXIT_DONE;
3221                 break;
3222         }
3223         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
3224                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
3225                 if (svm->nested.intercept_exceptions & excp_bits) {
3226                         if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
3227                                 vmexit = nested_svm_intercept_db(svm);
3228                         else
3229                                 vmexit = NESTED_EXIT_DONE;
3230                 }
3231                 /* async page fault always cause vmexit */
3232                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
3233                          svm->vcpu.arch.exception.nested_apf != 0)
3234                         vmexit = NESTED_EXIT_DONE;
3235                 break;
3236         }
3237         case SVM_EXIT_ERR: {
3238                 vmexit = NESTED_EXIT_DONE;
3239                 break;
3240         }
3241         default: {
3242                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
3243                 if (svm->nested.intercept & exit_bits)
3244                         vmexit = NESTED_EXIT_DONE;
3245         }
3246         }
3247
3248         return vmexit;
3249 }
3250
3251 static int nested_svm_exit_handled(struct vcpu_svm *svm)
3252 {
3253         int vmexit;
3254
3255         vmexit = nested_svm_intercept(svm);
3256
3257         if (vmexit == NESTED_EXIT_DONE)
3258                 nested_svm_vmexit(svm);
3259
3260         return vmexit;
3261 }
3262
3263 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3264 {
3265         struct vmcb_control_area *dst  = &dst_vmcb->control;
3266         struct vmcb_control_area *from = &from_vmcb->control;
3267
3268         dst->intercept_cr         = from->intercept_cr;
3269         dst->intercept_dr         = from->intercept_dr;
3270         dst->intercept_exceptions = from->intercept_exceptions;
3271         dst->intercept            = from->intercept;
3272         dst->iopm_base_pa         = from->iopm_base_pa;
3273         dst->msrpm_base_pa        = from->msrpm_base_pa;
3274         dst->tsc_offset           = from->tsc_offset;
3275         dst->asid                 = from->asid;
3276         dst->tlb_ctl              = from->tlb_ctl;
3277         dst->int_ctl              = from->int_ctl;
3278         dst->int_vector           = from->int_vector;
3279         dst->int_state            = from->int_state;
3280         dst->exit_code            = from->exit_code;
3281         dst->exit_code_hi         = from->exit_code_hi;
3282         dst->exit_info_1          = from->exit_info_1;
3283         dst->exit_info_2          = from->exit_info_2;
3284         dst->exit_int_info        = from->exit_int_info;
3285         dst->exit_int_info_err    = from->exit_int_info_err;
3286         dst->nested_ctl           = from->nested_ctl;
3287         dst->event_inj            = from->event_inj;
3288         dst->event_inj_err        = from->event_inj_err;
3289         dst->nested_cr3           = from->nested_cr3;
3290         dst->virt_ext              = from->virt_ext;
3291         dst->pause_filter_count   = from->pause_filter_count;
3292         dst->pause_filter_thresh  = from->pause_filter_thresh;
3293 }
3294
3295 static int nested_svm_vmexit(struct vcpu_svm *svm)
3296 {
3297         int rc;
3298         struct vmcb *nested_vmcb;
3299         struct vmcb *hsave = svm->nested.hsave;
3300         struct vmcb *vmcb = svm->vmcb;
3301         struct kvm_host_map map;
3302
3303         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3304                                        vmcb->control.exit_info_1,
3305                                        vmcb->control.exit_info_2,
3306                                        vmcb->control.exit_int_info,
3307                                        vmcb->control.exit_int_info_err,
3308                                        KVM_ISA_SVM);
3309
3310         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
3311         if (rc) {
3312                 if (rc == -EINVAL)
3313                         kvm_inject_gp(&svm->vcpu, 0);
3314                 return 1;
3315         }
3316
3317         nested_vmcb = map.hva;
3318
3319         /* Exit Guest-Mode */
3320         leave_guest_mode(&svm->vcpu);
3321         svm->nested.vmcb = 0;
3322
3323         /* Give the current vmcb to the guest */
3324         disable_gif(svm);
3325
3326         nested_vmcb->save.es     = vmcb->save.es;
3327         nested_vmcb->save.cs     = vmcb->save.cs;
3328         nested_vmcb->save.ss     = vmcb->save.ss;
3329         nested_vmcb->save.ds     = vmcb->save.ds;
3330         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
3331         nested_vmcb->save.idtr   = vmcb->save.idtr;
3332         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
3333         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
3334         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
3335         nested_vmcb->save.cr2    = vmcb->save.cr2;
3336         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
3337         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
3338         nested_vmcb->save.rip    = vmcb->save.rip;
3339         nested_vmcb->save.rsp    = vmcb->save.rsp;
3340         nested_vmcb->save.rax    = vmcb->save.rax;
3341         nested_vmcb->save.dr7    = vmcb->save.dr7;
3342         nested_vmcb->save.dr6    = vmcb->save.dr6;
3343         nested_vmcb->save.cpl    = vmcb->save.cpl;
3344
3345         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
3346         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
3347         nested_vmcb->control.int_state         = vmcb->control.int_state;
3348         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
3349         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
3350         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
3351         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
3352         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
3353         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
3354
3355         if (svm->nrips_enabled)
3356                 nested_vmcb->control.next_rip  = vmcb->control.next_rip;
3357
3358         /*
3359          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3360          * to make sure that we do not lose injected events. So check event_inj
3361          * here and copy it to exit_int_info if it is valid.
3362          * Exit_int_info and event_inj can't be both valid because the case
3363          * below only happens on a VMRUN instruction intercept which has
3364          * no valid exit_int_info set.
3365          */
3366         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3367                 struct vmcb_control_area *nc = &nested_vmcb->control;
3368
3369                 nc->exit_int_info     = vmcb->control.event_inj;
3370                 nc->exit_int_info_err = vmcb->control.event_inj_err;
3371         }
3372
3373         nested_vmcb->control.tlb_ctl           = 0;
3374         nested_vmcb->control.event_inj         = 0;
3375         nested_vmcb->control.event_inj_err     = 0;
3376
3377         nested_vmcb->control.pause_filter_count =
3378                 svm->vmcb->control.pause_filter_count;
3379         nested_vmcb->control.pause_filter_thresh =
3380                 svm->vmcb->control.pause_filter_thresh;
3381
3382         /* We always set V_INTR_MASKING and remember the old value in hflags */
3383         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3384                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3385
3386         /* Restore the original control entries */
3387         copy_vmcb_control_area(vmcb, hsave);
3388
3389         svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
3390         kvm_clear_exception_queue(&svm->vcpu);
3391         kvm_clear_interrupt_queue(&svm->vcpu);
3392
3393         svm->nested.nested_cr3 = 0;
3394
3395         /* Restore selected save entries */
3396         svm->vmcb->save.es = hsave->save.es;
3397         svm->vmcb->save.cs = hsave->save.cs;
3398         svm->vmcb->save.ss = hsave->save.ss;
3399         svm->vmcb->save.ds = hsave->save.ds;
3400         svm->vmcb->save.gdtr = hsave->save.gdtr;
3401         svm->vmcb->save.idtr = hsave->save.idtr;
3402         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
3403         svm_set_efer(&svm->vcpu, hsave->save.efer);
3404         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3405         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3406         if (npt_enabled) {
3407                 svm->vmcb->save.cr3 = hsave->save.cr3;
3408                 svm->vcpu.arch.cr3 = hsave->save.cr3;
3409         } else {
3410                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
3411         }
3412         kvm_rax_write(&svm->vcpu, hsave->save.rax);
3413         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
3414         kvm_rip_write(&svm->vcpu, hsave->save.rip);
3415         svm->vmcb->save.dr7 = 0;
3416         svm->vmcb->save.cpl = 0;
3417         svm->vmcb->control.exit_int_info = 0;
3418
3419         mark_all_dirty(svm->vmcb);
3420
3421         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3422
3423         nested_svm_uninit_mmu_context(&svm->vcpu);
3424         kvm_mmu_reset_context(&svm->vcpu);
3425         kvm_mmu_load(&svm->vcpu);
3426
3427         /*
3428          * Drop what we picked up for L2 via svm_complete_interrupts() so it
3429          * doesn't end up in L1.
3430          */
3431         svm->vcpu.arch.nmi_injected = false;
3432         kvm_clear_exception_queue(&svm->vcpu);
3433         kvm_clear_interrupt_queue(&svm->vcpu);
3434
3435         return 0;
3436 }
3437
3438 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
3439 {
3440         /*
3441          * This function merges the msr permission bitmaps of kvm and the
3442          * nested vmcb. It is optimized in that it only merges the parts where
3443          * the kvm msr permission bitmap may contain zero bits
3444          */
3445         int i;
3446
3447         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3448                 return true;
3449
3450         for (i = 0; i < MSRPM_OFFSETS; i++) {
3451                 u32 value, p;
3452                 u64 offset;
3453
3454                 if (msrpm_offsets[i] == 0xffffffff)
3455                         break;
3456
3457                 p      = msrpm_offsets[i];
3458                 offset = svm->nested.vmcb_msrpm + (p * 4);
3459
3460                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
3461                         return false;
3462
3463                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3464         }
3465
3466         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
3467
3468         return true;
3469 }
3470
3471 static bool nested_vmcb_checks(struct vmcb *vmcb)
3472 {
3473         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3474                 return false;
3475
3476         if (vmcb->control.asid == 0)
3477                 return false;
3478
3479         if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3480             !npt_enabled)
3481                 return false;
3482
3483         return true;
3484 }
3485
3486 static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3487                                  struct vmcb *nested_vmcb, struct kvm_host_map *map)
3488 {
3489         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
3490                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3491         else
3492                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3493
3494         if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
3495                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3496                 nested_svm_init_mmu_context(&svm->vcpu);
3497         }
3498
3499         /* Load the nested guest state */
3500         svm->vmcb->save.es = nested_vmcb->save.es;
3501         svm->vmcb->save.cs = nested_vmcb->save.cs;
3502         svm->vmcb->save.ss = nested_vmcb->save.ss;
3503         svm->vmcb->save.ds = nested_vmcb->save.ds;
3504         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3505         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
3506         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
3507         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3508         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3509         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3510         if (npt_enabled) {
3511                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3512                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
3513         } else
3514                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
3515
3516         /* Guest paging mode is active - reset mmu */
3517         kvm_mmu_reset_context(&svm->vcpu);
3518
3519         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
3520         kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
3521         kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
3522         kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
3523
3524         /* In case we don't even reach vcpu_run, the fields are not updated */
3525         svm->vmcb->save.rax = nested_vmcb->save.rax;
3526         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3527         svm->vmcb->save.rip = nested_vmcb->save.rip;
3528         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3529         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3530         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3531
3532         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
3533         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
3534
3535         /* cache intercepts */
3536         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
3537         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
3538         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3539         svm->nested.intercept            = nested_vmcb->control.intercept;
3540
3541         svm_flush_tlb(&svm->vcpu, true);
3542         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3543         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3544                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3545         else
3546                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3547
3548         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3549                 /* We only want the cr8 intercept bits of the guest */
3550                 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3551                 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3552         }
3553
3554         /* We don't want to see VMMCALLs from a nested guest */
3555         clr_intercept(svm, INTERCEPT_VMMCALL);
3556
3557         svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
3558         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
3559
3560         svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3561         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3562         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3563         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3564         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3565
3566         svm->vmcb->control.pause_filter_count =
3567                 nested_vmcb->control.pause_filter_count;
3568         svm->vmcb->control.pause_filter_thresh =
3569                 nested_vmcb->control.pause_filter_thresh;
3570
3571         kvm_vcpu_unmap(&svm->vcpu, map, true);
3572
3573         /* Enter Guest-Mode */
3574         enter_guest_mode(&svm->vcpu);
3575
3576         /*
3577          * Merge guest and host intercepts - must be called  with vcpu in
3578          * guest-mode to take affect here
3579          */
3580         recalc_intercepts(svm);
3581
3582         svm->nested.vmcb = vmcb_gpa;
3583
3584         enable_gif(svm);
3585
3586         mark_all_dirty(svm->vmcb);
3587 }
3588
3589 static int nested_svm_vmrun(struct vcpu_svm *svm)
3590 {
3591         int ret;
3592         struct vmcb *nested_vmcb;
3593         struct vmcb *hsave = svm->nested.hsave;
3594         struct vmcb *vmcb = svm->vmcb;
3595         struct kvm_host_map map;
3596         u64 vmcb_gpa;
3597
3598         vmcb_gpa = svm->vmcb->save.rax;
3599
3600         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
3601         if (ret == -EINVAL) {
3602                 kvm_inject_gp(&svm->vcpu, 0);
3603                 return 1;
3604         } else if (ret) {
3605                 return kvm_skip_emulated_instruction(&svm->vcpu);
3606         }
3607
3608         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3609
3610         nested_vmcb = map.hva;
3611
3612         if (!nested_vmcb_checks(nested_vmcb)) {
3613                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
3614                 nested_vmcb->control.exit_code_hi = 0;
3615                 nested_vmcb->control.exit_info_1  = 0;
3616                 nested_vmcb->control.exit_info_2  = 0;
3617
3618                 kvm_vcpu_unmap(&svm->vcpu, &map, true);
3619
3620                 return ret;
3621         }
3622
3623         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3624                                nested_vmcb->save.rip,
3625                                nested_vmcb->control.int_ctl,
3626                                nested_vmcb->control.event_inj,
3627                                nested_vmcb->control.nested_ctl);
3628
3629         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3630                                     nested_vmcb->control.intercept_cr >> 16,
3631                                     nested_vmcb->control.intercept_exceptions,
3632                                     nested_vmcb->control.intercept);
3633
3634         /* Clear internal status */
3635         kvm_clear_exception_queue(&svm->vcpu);
3636         kvm_clear_interrupt_queue(&svm->vcpu);
3637
3638         /*
3639          * Save the old vmcb, so we don't need to pick what we save, but can
3640          * restore everything when a VMEXIT occurs
3641          */
3642         hsave->save.es     = vmcb->save.es;
3643         hsave->save.cs     = vmcb->save.cs;
3644         hsave->save.ss     = vmcb->save.ss;
3645         hsave->save.ds     = vmcb->save.ds;
3646         hsave->save.gdtr   = vmcb->save.gdtr;
3647         hsave->save.idtr   = vmcb->save.idtr;
3648         hsave->save.efer   = svm->vcpu.arch.efer;
3649         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
3650         hsave->save.cr4    = svm->vcpu.arch.cr4;
3651         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3652         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
3653         hsave->save.rsp    = vmcb->save.rsp;
3654         hsave->save.rax    = vmcb->save.rax;
3655         if (npt_enabled)
3656                 hsave->save.cr3    = vmcb->save.cr3;
3657         else
3658                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
3659
3660         copy_vmcb_control_area(hsave, vmcb);
3661
3662         enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
3663
3664         if (!nested_svm_vmrun_msrpm(svm)) {
3665                 svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
3666                 svm->vmcb->control.exit_code_hi = 0;
3667                 svm->vmcb->control.exit_info_1  = 0;
3668                 svm->vmcb->control.exit_info_2  = 0;
3669
3670                 nested_svm_vmexit(svm);
3671         }
3672
3673         return ret;
3674 }
3675
3676 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
3677 {
3678         to_vmcb->save.fs = from_vmcb->save.fs;
3679         to_vmcb->save.gs = from_vmcb->save.gs;
3680         to_vmcb->save.tr = from_vmcb->save.tr;
3681         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3682         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3683         to_vmcb->save.star = from_vmcb->save.star;
3684         to_vmcb->save.lstar = from_vmcb->save.lstar;
3685         to_vmcb->save.cstar = from_vmcb->save.cstar;
3686         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3687         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3688         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3689         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
3690 }
3691
3692 static int vmload_interception(struct vcpu_svm *svm)
3693 {
3694         struct vmcb *nested_vmcb;
3695         struct kvm_host_map map;
3696         int ret;
3697
3698         if (nested_svm_check_permissions(svm))
3699                 return 1;
3700
3701         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
3702         if (ret) {
3703                 if (ret == -EINVAL)
3704                         kvm_inject_gp(&svm->vcpu, 0);
3705                 return 1;
3706         }
3707
3708         nested_vmcb = map.hva;
3709
3710         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3711
3712         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
3713         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3714
3715         return ret;
3716 }
3717
3718 static int vmsave_interception(struct vcpu_svm *svm)
3719 {
3720         struct vmcb *nested_vmcb;
3721         struct kvm_host_map map;
3722         int ret;
3723
3724         if (nested_svm_check_permissions(svm))
3725                 return 1;
3726
3727         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
3728         if (ret) {
3729                 if (ret == -EINVAL)
3730                         kvm_inject_gp(&svm->vcpu, 0);
3731                 return 1;
3732         }
3733
3734         nested_vmcb = map.hva;
3735
3736         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3737
3738         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
3739         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3740
3741         return ret;
3742 }
3743
3744 static int vmrun_interception(struct vcpu_svm *svm)
3745 {
3746         if (nested_svm_check_permissions(svm))
3747                 return 1;
3748
3749         return nested_svm_vmrun(svm);
3750 }
3751
3752 static int stgi_interception(struct vcpu_svm *svm)
3753 {
3754         int ret;
3755
3756         if (nested_svm_check_permissions(svm))
3757                 return 1;
3758
3759         /*
3760          * If VGIF is enabled, the STGI intercept is only added to
3761          * detect the opening of the SMI/NMI window; remove it now.
3762          */
3763         if (vgif_enabled(svm))
3764                 clr_intercept(svm, INTERCEPT_STGI);
3765
3766         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3767         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3768
3769         enable_gif(svm);
3770
3771         return ret;
3772 }
3773
3774 static int clgi_interception(struct vcpu_svm *svm)
3775 {
3776         int ret;
3777
3778         if (nested_svm_check_permissions(svm))
3779                 return 1;
3780
3781         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3782
3783         disable_gif(svm);
3784
3785         /* After a CLGI no interrupts should come */
3786         if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3787                 svm_clear_vintr(svm);
3788                 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3789                 mark_dirty(svm->vmcb, VMCB_INTR);
3790         }
3791
3792         return ret;
3793 }
3794
3795 static int invlpga_interception(struct vcpu_svm *svm)
3796 {
3797         struct kvm_vcpu *vcpu = &svm->vcpu;
3798
3799         trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
3800                           kvm_rax_read(&svm->vcpu));
3801
3802         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
3803         kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
3804
3805         return kvm_skip_emulated_instruction(&svm->vcpu);
3806 }
3807
3808 static int skinit_interception(struct vcpu_svm *svm)
3809 {
3810         trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
3811
3812         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3813         return 1;
3814 }
3815
3816 static int wbinvd_interception(struct vcpu_svm *svm)
3817 {
3818         return kvm_emulate_wbinvd(&svm->vcpu);
3819 }
3820
3821 static int xsetbv_interception(struct vcpu_svm *svm)
3822 {
3823         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3824         u32 index = kvm_rcx_read(&svm->vcpu);
3825
3826         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3827                 return kvm_skip_emulated_instruction(&svm->vcpu);
3828         }
3829
3830         return 1;
3831 }
3832
3833 static int task_switch_interception(struct vcpu_svm *svm)
3834 {
3835         u16 tss_selector;
3836         int reason;
3837         int int_type = svm->vmcb->control.exit_int_info &
3838                 SVM_EXITINTINFO_TYPE_MASK;
3839         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
3840         uint32_t type =
3841                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3842         uint32_t idt_v =
3843                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
3844         bool has_error_code = false;
3845         u32 error_code = 0;
3846
3847         tss_selector = (u16)svm->vmcb->control.exit_info_1;
3848
3849         if (svm->vmcb->control.exit_info_2 &
3850             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
3851                 reason = TASK_SWITCH_IRET;
3852         else if (svm->vmcb->control.exit_info_2 &
3853                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3854                 reason = TASK_SWITCH_JMP;
3855         else if (idt_v)
3856                 reason = TASK_SWITCH_GATE;
3857         else
3858                 reason = TASK_SWITCH_CALL;
3859
3860         if (reason == TASK_SWITCH_GATE) {
3861                 switch (type) {
3862                 case SVM_EXITINTINFO_TYPE_NMI:
3863                         svm->vcpu.arch.nmi_injected = false;
3864                         break;
3865                 case SVM_EXITINTINFO_TYPE_EXEPT:
3866                         if (svm->vmcb->control.exit_info_2 &
3867                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3868                                 has_error_code = true;
3869                                 error_code =
3870                                         (u32)svm->vmcb->control.exit_info_2;
3871                         }
3872                         kvm_clear_exception_queue(&svm->vcpu);
3873                         break;
3874                 case SVM_EXITINTINFO_TYPE_INTR:
3875                         kvm_clear_interrupt_queue(&svm->vcpu);
3876                         break;
3877                 default:
3878                         break;
3879                 }
3880         }
3881
3882         if (reason != TASK_SWITCH_GATE ||
3883             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3884             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
3885              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
3886                 if (skip_emulated_instruction(&svm->vcpu) != EMULATE_DONE)
3887                         goto fail;
3888         }
3889
3890         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3891                 int_vec = -1;
3892
3893         if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
3894                                 has_error_code, error_code) == EMULATE_FAIL)
3895                 goto fail;
3896
3897         return 1;
3898
3899 fail:
3900         svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3901         svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3902         svm->vcpu.run->internal.ndata = 0;
3903         return 0;
3904 }
3905
3906 static int cpuid_interception(struct vcpu_svm *svm)
3907 {
3908         return kvm_emulate_cpuid(&svm->vcpu);
3909 }
3910
3911 static int iret_interception(struct vcpu_svm *svm)
3912 {
3913         ++svm->vcpu.stat.nmi_window_exits;
3914         clr_intercept(svm, INTERCEPT_IRET);
3915         svm->vcpu.arch.hflags |= HF_IRET_MASK;
3916         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
3917         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3918         return 1;
3919 }
3920
3921 static int invlpg_interception(struct vcpu_svm *svm)
3922 {
3923         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3924                 return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3925
3926         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3927         return kvm_skip_emulated_instruction(&svm->vcpu);
3928 }
3929
3930 static int emulate_on_interception(struct vcpu_svm *svm)
3931 {
3932         return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3933 }
3934
3935 static int rsm_interception(struct vcpu_svm *svm)
3936 {
3937         return kvm_emulate_instruction_from_buffer(&svm->vcpu,
3938                                         rsm_ins_bytes, 2) == EMULATE_DONE;
3939 }
3940
3941 static int rdpmc_interception(struct vcpu_svm *svm)
3942 {
3943         int err;
3944
3945         if (!nrips)
3946                 return emulate_on_interception(svm);
3947
3948         err = kvm_rdpmc(&svm->vcpu);
3949         return kvm_complete_insn_gp(&svm->vcpu, err);
3950 }
3951
3952 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3953                                             unsigned long val)
3954 {
3955         unsigned long cr0 = svm->vcpu.arch.cr0;
3956         bool ret = false;
3957         u64 intercept;
3958
3959         intercept = svm->nested.intercept;
3960
3961         if (!is_guest_mode(&svm->vcpu) ||
3962             (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3963                 return false;
3964
3965         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3966         val &= ~SVM_CR0_SELECTIVE_MASK;
3967
3968         if (cr0 ^ val) {
3969                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3970                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3971         }
3972
3973         return ret;
3974 }
3975
3976 #define CR_VALID (1ULL << 63)
3977
3978 static int cr_interception(struct vcpu_svm *svm)
3979 {
3980         int reg, cr;
3981         unsigned long val;
3982         int err;
3983
3984         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3985                 return emulate_on_interception(svm);
3986
3987         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3988                 return emulate_on_interception(svm);
3989
3990         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3991         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3992                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3993         else
3994                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
3995
3996         err = 0;
3997         if (cr >= 16) { /* mov to cr */
3998                 cr -= 16;
3999                 val = kvm_register_read(&svm->vcpu, reg);
4000                 switch (cr) {
4001                 case 0:
4002                         if (!check_selective_cr0_intercepted(svm, val))
4003                                 err = kvm_set_cr0(&svm->vcpu, val);
4004                         else
4005                                 return 1;
4006
4007                         break;
4008                 case 3:
4009                         err = kvm_set_cr3(&svm->vcpu, val);
4010                         break;
4011                 case 4:
4012                         err = kvm_set_cr4(&svm->vcpu, val);
4013                         break;
4014                 case 8:
4015                         err = kvm_set_cr8(&svm->vcpu, val);
4016                         break;
4017                 default:
4018                         WARN(1, "unhandled write to CR%d", cr);
4019                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4020                         return 1;
4021                 }
4022         } else { /* mov from cr */
4023                 switch (cr) {
4024                 case 0:
4025                         val = kvm_read_cr0(&svm->vcpu);
4026                         break;
4027                 case 2:
4028                         val = svm->vcpu.arch.cr2;
4029                         break;
4030                 case 3:
4031                         val = kvm_read_cr3(&svm->vcpu);
4032                         break;
4033                 case 4:
4034                         val = kvm_read_cr4(&svm->vcpu);
4035                         break;
4036                 case 8:
4037                         val = kvm_get_cr8(&svm->vcpu);
4038                         break;
4039                 default:
4040                         WARN(1, "unhandled read from CR%d", cr);
4041                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4042                         return 1;
4043                 }
4044                 kvm_register_write(&svm->vcpu, reg, val);
4045         }
4046         return kvm_complete_insn_gp(&svm->vcpu, err);
4047 }
4048
4049 static int dr_interception(struct vcpu_svm *svm)
4050 {
4051         int reg, dr;
4052         unsigned long val;
4053
4054         if (svm->vcpu.guest_debug == 0) {
4055                 /*
4056                  * No more DR vmexits; force a reload of the debug registers
4057                  * and reenter on this instruction.  The next vmexit will
4058                  * retrieve the full state of the debug registers.
4059                  */
4060                 clr_dr_intercepts(svm);
4061                 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
4062                 return 1;
4063         }
4064
4065         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
4066                 return emulate_on_interception(svm);
4067
4068         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
4069         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
4070
4071         if (dr >= 16) { /* mov to DRn */
4072                 if (!kvm_require_dr(&svm->vcpu, dr - 16))
4073                         return 1;
4074                 val = kvm_register_read(&svm->vcpu, reg);
4075                 kvm_set_dr(&svm->vcpu, dr - 16, val);
4076         } else {
4077                 if (!kvm_require_dr(&svm->vcpu, dr))
4078                         return 1;
4079                 kvm_get_dr(&svm->vcpu, dr, &val);
4080                 kvm_register_write(&svm->vcpu, reg, val);
4081         }
4082
4083         return kvm_skip_emulated_instruction(&svm->vcpu);
4084 }
4085
4086 static int cr8_write_interception(struct vcpu_svm *svm)
4087 {
4088         struct kvm_run *kvm_run = svm->vcpu.run;
4089         int r;
4090
4091         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
4092         /* instruction emulation calls kvm_set_cr8() */
4093         r = cr_interception(svm);
4094         if (lapic_in_kernel(&svm->vcpu))
4095                 return r;
4096         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
4097                 return r;
4098         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
4099         return 0;
4100 }
4101
4102 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
4103 {
4104         msr->data = 0;
4105
4106         switch (msr->index) {
4107         case MSR_F10H_DECFG:
4108                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
4109                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
4110                 break;
4111         default:
4112                 return 1;
4113         }
4114
4115         return 0;
4116 }
4117
4118 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4119 {
4120         struct vcpu_svm *svm = to_svm(vcpu);
4121
4122         switch (msr_info->index) {
4123         case MSR_STAR:
4124                 msr_info->data = svm->vmcb->save.star;
4125                 break;
4126 #ifdef CONFIG_X86_64
4127         case MSR_LSTAR:
4128                 msr_info->data = svm->vmcb->save.lstar;
4129                 break;
4130         case MSR_CSTAR:
4131                 msr_info->data = svm->vmcb->save.cstar;
4132                 break;
4133         case MSR_KERNEL_GS_BASE:
4134                 msr_info->data = svm->vmcb->save.kernel_gs_base;
4135                 break;
4136         case MSR_SYSCALL_MASK:
4137                 msr_info->data = svm->vmcb->save.sfmask;
4138                 break;
4139 #endif
4140         case MSR_IA32_SYSENTER_CS:
4141                 msr_info->data = svm->vmcb->save.sysenter_cs;
4142                 break;
4143         case MSR_IA32_SYSENTER_EIP:
4144                 msr_info->data = svm->sysenter_eip;
4145                 break;
4146         case MSR_IA32_SYSENTER_ESP:
4147                 msr_info->data = svm->sysenter_esp;
4148                 break;
4149         case MSR_TSC_AUX:
4150                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4151                         return 1;
4152                 msr_info->data = svm->tsc_aux;
4153                 break;
4154         /*
4155          * Nobody will change the following 5 values in the VMCB so we can
4156          * safely return them on rdmsr. They will always be 0 until LBRV is
4157          * implemented.
4158          */
4159         case MSR_IA32_DEBUGCTLMSR:
4160                 msr_info->data = svm->vmcb->save.dbgctl;
4161                 break;
4162         case MSR_IA32_LASTBRANCHFROMIP:
4163                 msr_info->data = svm->vmcb->save.br_from;
4164                 break;
4165         case MSR_IA32_LASTBRANCHTOIP:
4166                 msr_info->data = svm->vmcb->save.br_to;
4167                 break;
4168         case MSR_IA32_LASTINTFROMIP:
4169                 msr_info->data = svm->vmcb->save.last_excp_from;
4170                 break;
4171         case MSR_IA32_LASTINTTOIP:
4172                 msr_info->data = svm->vmcb->save.last_excp_to;
4173                 break;
4174         case MSR_VM_HSAVE_PA:
4175                 msr_info->data = svm->nested.hsave_msr;
4176                 break;
4177         case MSR_VM_CR:
4178                 msr_info->data = svm->nested.vm_cr_msr;
4179                 break;
4180         case MSR_IA32_SPEC_CTRL:
4181                 if (!msr_info->host_initiated &&
4182                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4183                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4184                         return 1;
4185
4186                 msr_info->data = svm->spec_ctrl;
4187                 break;
4188         case MSR_AMD64_VIRT_SPEC_CTRL:
4189                 if (!msr_info->host_initiated &&
4190                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4191                         return 1;
4192
4193                 msr_info->data = svm->virt_spec_ctrl;
4194                 break;
4195         case MSR_F15H_IC_CFG: {
4196
4197                 int family, model;
4198
4199                 family = guest_cpuid_family(vcpu);
4200                 model  = guest_cpuid_model(vcpu);
4201
4202                 if (family < 0 || model < 0)
4203                         return kvm_get_msr_common(vcpu, msr_info);
4204
4205                 msr_info->data = 0;
4206
4207                 if (family == 0x15 &&
4208                     (model >= 0x2 && model < 0x20))
4209                         msr_info->data = 0x1E;
4210                 }
4211                 break;
4212         case MSR_F10H_DECFG:
4213                 msr_info->data = svm->msr_decfg;
4214                 break;
4215         default:
4216                 return kvm_get_msr_common(vcpu, msr_info);
4217         }
4218         return 0;
4219 }
4220
4221 static int rdmsr_interception(struct vcpu_svm *svm)
4222 {
4223         return kvm_emulate_rdmsr(&svm->vcpu);
4224 }
4225
4226 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
4227 {
4228         struct vcpu_svm *svm = to_svm(vcpu);
4229         int svm_dis, chg_mask;
4230
4231         if (data & ~SVM_VM_CR_VALID_MASK)
4232                 return 1;
4233
4234         chg_mask = SVM_VM_CR_VALID_MASK;
4235
4236         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
4237                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
4238
4239         svm->nested.vm_cr_msr &= ~chg_mask;
4240         svm->nested.vm_cr_msr |= (data & chg_mask);
4241
4242         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
4243
4244         /* check for svm_disable while efer.svme is set */
4245         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
4246                 return 1;
4247
4248         return 0;
4249 }
4250
4251 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4252 {
4253         struct vcpu_svm *svm = to_svm(vcpu);
4254
4255         u32 ecx = msr->index;
4256         u64 data = msr->data;
4257         switch (ecx) {
4258         case MSR_IA32_CR_PAT:
4259                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4260                         return 1;
4261                 vcpu->arch.pat = data;
4262                 svm->vmcb->save.g_pat = data;
4263                 mark_dirty(svm->vmcb, VMCB_NPT);
4264                 break;
4265         case MSR_IA32_SPEC_CTRL:
4266                 if (!msr->host_initiated &&
4267                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4268                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4269                         return 1;
4270
4271                 /* The STIBP bit doesn't fault even if it's not advertised */
4272                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
4273                         return 1;
4274
4275                 svm->spec_ctrl = data;
4276
4277                 if (!data)
4278                         break;
4279
4280                 /*
4281                  * For non-nested:
4282                  * When it's written (to non-zero) for the first time, pass
4283                  * it through.
4284                  *
4285                  * For nested:
4286                  * The handling of the MSR bitmap for L2 guests is done in
4287                  * nested_svm_vmrun_msrpm.
4288                  * We update the L1 MSR bit as well since it will end up
4289                  * touching the MSR anyway now.
4290                  */
4291                 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
4292                 break;
4293         case MSR_IA32_PRED_CMD:
4294                 if (!msr->host_initiated &&
4295                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
4296                         return 1;
4297
4298                 if (data & ~PRED_CMD_IBPB)
4299                         return 1;
4300
4301                 if (!data)
4302                         break;
4303
4304                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4305                 if (is_guest_mode(vcpu))
4306                         break;
4307                 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
4308                 break;
4309         case MSR_AMD64_VIRT_SPEC_CTRL:
4310                 if (!msr->host_initiated &&
4311                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4312                         return 1;
4313
4314                 if (data & ~SPEC_CTRL_SSBD)
4315                         return 1;
4316
4317                 svm->virt_spec_ctrl = data;
4318                 break;
4319         case MSR_STAR:
4320                 svm->vmcb->save.star = data;
4321                 break;
4322 #ifdef CONFIG_X86_64
4323         case MSR_LSTAR:
4324                 svm->vmcb->save.lstar = data;
4325                 break;
4326         case MSR_CSTAR:
4327                 svm->vmcb->save.cstar = data;
4328                 break;
4329         case MSR_KERNEL_GS_BASE:
4330                 svm->vmcb->save.kernel_gs_base = data;
4331                 break;
4332         case MSR_SYSCALL_MASK:
4333                 svm->vmcb->save.sfmask = data;
4334                 break;
4335 #endif
4336         case MSR_IA32_SYSENTER_CS:
4337                 svm->vmcb->save.sysenter_cs = data;
4338                 break;
4339         case MSR_IA32_SYSENTER_EIP:
4340                 svm->sysenter_eip = data;
4341                 svm->vmcb->save.sysenter_eip = data;
4342                 break;
4343         case MSR_IA32_SYSENTER_ESP:
4344                 svm->sysenter_esp = data;
4345                 svm->vmcb->save.sysenter_esp = data;
4346                 break;
4347         case MSR_TSC_AUX:
4348                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4349                         return 1;
4350
4351                 /*
4352                  * This is rare, so we update the MSR here instead of using
4353                  * direct_access_msrs.  Doing that would require a rdmsr in
4354                  * svm_vcpu_put.
4355                  */
4356                 svm->tsc_aux = data;
4357                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4358                 break;
4359         case MSR_IA32_DEBUGCTLMSR:
4360                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
4361                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4362                                     __func__, data);
4363                         break;
4364                 }
4365                 if (data & DEBUGCTL_RESERVED_BITS)
4366                         return 1;
4367
4368                 svm->vmcb->save.dbgctl = data;
4369                 mark_dirty(svm->vmcb, VMCB_LBR);
4370                 if (data & (1ULL<<0))
4371                         svm_enable_lbrv(svm);
4372                 else
4373                         svm_disable_lbrv(svm);
4374                 break;
4375         case MSR_VM_HSAVE_PA:
4376                 svm->nested.hsave_msr = data;
4377                 break;
4378         case MSR_VM_CR:
4379                 return svm_set_vm_cr(vcpu, data);
4380         case MSR_VM_IGNNE:
4381                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
4382                 break;
4383         case MSR_F10H_DECFG: {
4384                 struct kvm_msr_entry msr_entry;
4385
4386                 msr_entry.index = msr->index;
4387                 if (svm_get_msr_feature(&msr_entry))
4388                         return 1;
4389
4390                 /* Check the supported bits */
4391                 if (data & ~msr_entry.data)
4392                         return 1;
4393
4394                 /* Don't allow the guest to change a bit, #GP */
4395                 if (!msr->host_initiated && (data ^ msr_entry.data))
4396                         return 1;
4397
4398                 svm->msr_decfg = data;
4399                 break;
4400         }
4401         case MSR_IA32_APICBASE:
4402                 if (kvm_vcpu_apicv_active(vcpu))
4403                         avic_update_vapic_bar(to_svm(vcpu), data);
4404                 /* Fall through */
4405         default:
4406                 return kvm_set_msr_common(vcpu, msr);
4407         }
4408         return 0;
4409 }
4410
4411 static int wrmsr_interception(struct vcpu_svm *svm)
4412 {
4413         return kvm_emulate_wrmsr(&svm->vcpu);
4414 }
4415
4416 static int msr_interception(struct vcpu_svm *svm)
4417 {
4418         if (svm->vmcb->control.exit_info_1)
4419                 return wrmsr_interception(svm);
4420         else
4421                 return rdmsr_interception(svm);
4422 }
4423
4424 static int interrupt_window_interception(struct vcpu_svm *svm)
4425 {
4426         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4427         svm_clear_vintr(svm);
4428         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
4429         mark_dirty(svm->vmcb, VMCB_INTR);
4430         ++svm->vcpu.stat.irq_window_exits;
4431         return 1;
4432 }
4433
4434 static int pause_interception(struct vcpu_svm *svm)
4435 {
4436         struct kvm_vcpu *vcpu = &svm->vcpu;
4437         bool in_kernel = (svm_get_cpl(vcpu) == 0);
4438
4439         if (pause_filter_thresh)
4440                 grow_ple_window(vcpu);
4441
4442         kvm_vcpu_on_spin(vcpu, in_kernel);
4443         return 1;
4444 }
4445
4446 static int nop_interception(struct vcpu_svm *svm)
4447 {
4448         return kvm_skip_emulated_instruction(&(svm->vcpu));
4449 }
4450
4451 static int monitor_interception(struct vcpu_svm *svm)
4452 {
4453         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4454         return nop_interception(svm);
4455 }
4456
4457 static int mwait_interception(struct vcpu_svm *svm)
4458 {
4459         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4460         return nop_interception(svm);
4461 }
4462
4463 enum avic_ipi_failure_cause {
4464         AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4465         AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4466         AVIC_IPI_FAILURE_INVALID_TARGET,
4467         AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4468 };
4469
4470 static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4471 {
4472         u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4473         u32 icrl = svm->vmcb->control.exit_info_1;
4474         u32 id = svm->vmcb->control.exit_info_2 >> 32;
4475         u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
4476         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4477
4478         trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4479
4480         switch (id) {
4481         case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4482                 /*
4483                  * AVIC hardware handles the generation of
4484                  * IPIs when the specified Message Type is Fixed
4485                  * (also known as fixed delivery mode) and
4486                  * the Trigger Mode is edge-triggered. The hardware
4487                  * also supports self and broadcast delivery modes
4488                  * specified via the Destination Shorthand(DSH)
4489                  * field of the ICRL. Logical and physical APIC ID
4490                  * formats are supported. All other IPI types cause
4491                  * a #VMEXIT, which needs to emulated.
4492                  */
4493                 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4494                 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4495                 break;
4496         case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4497                 int i;
4498                 struct kvm_vcpu *vcpu;
4499                 struct kvm *kvm = svm->vcpu.kvm;
4500                 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4501
4502                 /*
4503                  * At this point, we expect that the AVIC HW has already
4504                  * set the appropriate IRR bits on the valid target
4505                  * vcpus. So, we just need to kick the appropriate vcpu.
4506                  */
4507                 kvm_for_each_vcpu(i, vcpu, kvm) {
4508                         bool m = kvm_apic_match_dest(vcpu, apic,
4509                                                      icrl & KVM_APIC_SHORT_MASK,
4510                                                      GET_APIC_DEST_FIELD(icrh),
4511                                                      icrl & KVM_APIC_DEST_MASK);
4512
4513                         if (m && !avic_vcpu_is_running(vcpu))
4514                                 kvm_vcpu_wake_up(vcpu);
4515                 }
4516                 break;
4517         }
4518         case AVIC_IPI_FAILURE_INVALID_TARGET:
4519                 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
4520                           index, svm->vcpu.vcpu_id, icrh, icrl);
4521                 break;
4522         case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4523                 WARN_ONCE(1, "Invalid backing page\n");
4524                 break;
4525         default:
4526                 pr_err("Unknown IPI interception\n");
4527         }
4528
4529         return 1;
4530 }
4531
4532 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4533 {
4534         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
4535         int index;
4536         u32 *logical_apic_id_table;
4537         int dlid = GET_APIC_LOGICAL_ID(ldr);
4538
4539         if (!dlid)
4540                 return NULL;
4541
4542         if (flat) { /* flat */
4543                 index = ffs(dlid) - 1;
4544                 if (index > 7)
4545                         return NULL;
4546         } else { /* cluster */
4547                 int cluster = (dlid & 0xf0) >> 4;
4548                 int apic = ffs(dlid & 0x0f) - 1;
4549
4550                 if ((apic < 0) || (apic > 7) ||
4551                     (cluster >= 0xf))
4552                         return NULL;
4553                 index = (cluster << 2) + apic;
4554         }
4555
4556         logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
4557
4558         return &logical_apic_id_table[index];
4559 }
4560
4561 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
4562 {
4563         bool flat;
4564         u32 *entry, new_entry;
4565
4566         flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4567         entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4568         if (!entry)
4569                 return -EINVAL;
4570
4571         new_entry = READ_ONCE(*entry);
4572         new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4573         new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4574         new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4575         WRITE_ONCE(*entry, new_entry);
4576
4577         return 0;
4578 }
4579
4580 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
4581 {
4582         struct vcpu_svm *svm = to_svm(vcpu);
4583         bool flat = svm->dfr_reg == APIC_DFR_FLAT;
4584         u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
4585
4586         if (entry)
4587                 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
4588 }
4589
4590 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4591 {
4592         int ret = 0;
4593         struct vcpu_svm *svm = to_svm(vcpu);
4594         u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4595
4596         if (ldr == svm->ldr_reg)
4597                 return 0;
4598
4599         avic_invalidate_logical_id_entry(vcpu);
4600
4601         if (ldr)
4602                 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
4603
4604         if (!ret)
4605                 svm->ldr_reg = ldr;
4606
4607         return ret;
4608 }
4609
4610 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4611 {
4612         u64 *old, *new;
4613         struct vcpu_svm *svm = to_svm(vcpu);
4614         u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4615         u32 id = (apic_id_reg >> 24) & 0xff;
4616
4617         if (vcpu->vcpu_id == id)
4618                 return 0;
4619
4620         old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4621         new = avic_get_physical_id_entry(vcpu, id);
4622         if (!new || !old)
4623                 return 1;
4624
4625         /* We need to move physical_id_entry to new offset */
4626         *new = *old;
4627         *old = 0ULL;
4628         to_svm(vcpu)->avic_physical_id_cache = new;
4629
4630         /*
4631          * Also update the guest physical APIC ID in the logical
4632          * APIC ID table entry if already setup the LDR.
4633          */
4634         if (svm->ldr_reg)
4635                 avic_handle_ldr_update(vcpu);
4636
4637         return 0;
4638 }
4639
4640 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4641 {
4642         struct vcpu_svm *svm = to_svm(vcpu);
4643         u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4644
4645         if (svm->dfr_reg == dfr)
4646                 return;
4647
4648         avic_invalidate_logical_id_entry(vcpu);
4649         svm->dfr_reg = dfr;
4650 }
4651
4652 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4653 {
4654         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4655         u32 offset = svm->vmcb->control.exit_info_1 &
4656                                 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4657
4658         switch (offset) {
4659         case APIC_ID:
4660                 if (avic_handle_apic_id_update(&svm->vcpu))
4661                         return 0;
4662                 break;
4663         case APIC_LDR:
4664                 if (avic_handle_ldr_update(&svm->vcpu))
4665                         return 0;
4666                 break;
4667         case APIC_DFR:
4668                 avic_handle_dfr_update(&svm->vcpu);
4669                 break;
4670         default:
4671                 break;
4672         }
4673
4674         kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4675
4676         return 1;
4677 }
4678
4679 static bool is_avic_unaccelerated_access_trap(u32 offset)
4680 {
4681         bool ret = false;
4682
4683         switch (offset) {
4684         case APIC_ID:
4685         case APIC_EOI:
4686         case APIC_RRR:
4687         case APIC_LDR:
4688         case APIC_DFR:
4689         case APIC_SPIV:
4690         case APIC_ESR:
4691         case APIC_ICR:
4692         case APIC_LVTT:
4693         case APIC_LVTTHMR:
4694         case APIC_LVTPC:
4695         case APIC_LVT0:
4696         case APIC_LVT1:
4697         case APIC_LVTERR:
4698         case APIC_TMICT:
4699         case APIC_TDCR:
4700                 ret = true;
4701                 break;
4702         default:
4703                 break;
4704         }
4705         return ret;
4706 }
4707
4708 static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4709 {
4710         int ret = 0;
4711         u32 offset = svm->vmcb->control.exit_info_1 &
4712                      AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4713         u32 vector = svm->vmcb->control.exit_info_2 &
4714                      AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4715         bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4716                      AVIC_UNACCEL_ACCESS_WRITE_MASK;
4717         bool trap = is_avic_unaccelerated_access_trap(offset);
4718
4719         trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4720                                             trap, write, vector);
4721         if (trap) {
4722                 /* Handling Trap */
4723                 WARN_ONCE(!write, "svm: Handling trap read.\n");
4724                 ret = avic_unaccel_trap_write(svm);
4725         } else {
4726                 /* Handling Fault */
4727                 ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4728         }
4729
4730         return ret;
4731 }
4732
4733 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4734         [SVM_EXIT_READ_CR0]                     = cr_interception,
4735         [SVM_EXIT_READ_CR3]                     = cr_interception,
4736         [SVM_EXIT_READ_CR4]                     = cr_interception,
4737         [SVM_EXIT_READ_CR8]                     = cr_interception,
4738         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
4739         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
4740         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
4741         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
4742         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
4743         [SVM_EXIT_READ_DR0]                     = dr_interception,
4744         [SVM_EXIT_READ_DR1]                     = dr_interception,
4745         [SVM_EXIT_READ_DR2]                     = dr_interception,
4746         [SVM_EXIT_READ_DR3]                     = dr_interception,
4747         [SVM_EXIT_READ_DR4]                     = dr_interception,
4748         [SVM_EXIT_READ_DR5]                     = dr_interception,
4749         [SVM_EXIT_READ_DR6]                     = dr_interception,
4750         [SVM_EXIT_READ_DR7]                     = dr_interception,
4751         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
4752         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
4753         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
4754         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
4755         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
4756         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
4757         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
4758         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
4759         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
4760         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
4761         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
4762         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
4763         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
4764         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
4765         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
4766         [SVM_EXIT_INTR]                         = intr_interception,
4767         [SVM_EXIT_NMI]                          = nmi_interception,
4768         [SVM_EXIT_SMI]                          = nop_on_interception,
4769         [SVM_EXIT_INIT]                         = nop_on_interception,
4770         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
4771         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
4772         [SVM_EXIT_CPUID]                        = cpuid_interception,
4773         [SVM_EXIT_IRET]                         = iret_interception,
4774         [SVM_EXIT_INVD]                         = emulate_on_interception,
4775         [SVM_EXIT_PAUSE]                        = pause_interception,
4776         [SVM_EXIT_HLT]                          = halt_interception,
4777         [SVM_EXIT_INVLPG]                       = invlpg_interception,
4778         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
4779         [SVM_EXIT_IOIO]                         = io_interception,
4780         [SVM_EXIT_MSR]                          = msr_interception,
4781         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
4782         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
4783         [SVM_EXIT_VMRUN]                        = vmrun_interception,
4784         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
4785         [SVM_EXIT_VMLOAD]                       = vmload_interception,
4786         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
4787         [SVM_EXIT_STGI]                         = stgi_interception,
4788         [SVM_EXIT_CLGI]                         = clgi_interception,
4789         [SVM_EXIT_SKINIT]                       = skinit_interception,
4790         [SVM_EXIT_WBINVD]                       = wbinvd_interception,
4791         [SVM_EXIT_MONITOR]                      = monitor_interception,
4792         [SVM_EXIT_MWAIT]                        = mwait_interception,
4793         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
4794         [SVM_EXIT_NPF]                          = npf_interception,
4795         [SVM_EXIT_RSM]                          = rsm_interception,
4796         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
4797         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
4798 };
4799
4800 static void dump_vmcb(struct kvm_vcpu *vcpu)
4801 {
4802         struct vcpu_svm *svm = to_svm(vcpu);
4803         struct vmcb_control_area *control = &svm->vmcb->control;
4804         struct vmcb_save_area *save = &svm->vmcb->save;
4805
4806         if (!dump_invalid_vmcb) {
4807                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
4808                 return;
4809         }
4810
4811         pr_err("VMCB Control Area:\n");
4812         pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4813         pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4814         pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4815         pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4816         pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4817         pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4818         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4819         pr_err("%-20s%d\n", "pause filter threshold:",
4820                control->pause_filter_thresh);
4821         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4822         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4823         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4824         pr_err("%-20s%d\n", "asid:", control->asid);
4825         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4826         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4827         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4828         pr_err("%-20s%08x\n", "int_state:", control->int_state);
4829         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4830         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4831         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4832         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4833         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4834         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4835         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
4836         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
4837         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4838         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
4839         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
4840         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
4841         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4842         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4843         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
4844         pr_err("VMCB State Save Area:\n");
4845         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4846                "es:",
4847                save->es.selector, save->es.attrib,
4848                save->es.limit, save->es.base);
4849         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4850                "cs:",
4851                save->cs.selector, save->cs.attrib,
4852                save->cs.limit, save->cs.base);
4853         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4854                "ss:",
4855                save->ss.selector, save->ss.attrib,
4856                save->ss.limit, save->ss.base);
4857         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4858                "ds:",
4859                save->ds.selector, save->ds.attrib,
4860                save->ds.limit, save->ds.base);
4861         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4862                "fs:",
4863                save->fs.selector, save->fs.attrib,
4864                save->fs.limit, save->fs.base);
4865         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4866                "gs:",
4867                save->gs.selector, save->gs.attrib,
4868                save->gs.limit, save->gs.base);
4869         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4870                "gdtr:",
4871                save->gdtr.selector, save->gdtr.attrib,
4872                save->gdtr.limit, save->gdtr.base);
4873         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4874                "ldtr:",
4875                save->ldtr.selector, save->ldtr.attrib,
4876                save->ldtr.limit, save->ldtr.base);
4877         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4878                "idtr:",
4879                save->idtr.selector, save->idtr.attrib,
4880                save->idtr.limit, save->idtr.base);
4881         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4882                "tr:",
4883                save->tr.selector, save->tr.attrib,
4884                save->tr.limit, save->tr.base);
4885         pr_err("cpl:            %d                efer:         %016llx\n",
4886                 save->cpl, save->efer);
4887         pr_err("%-15s %016llx %-13s %016llx\n",
4888                "cr0:", save->cr0, "cr2:", save->cr2);
4889         pr_err("%-15s %016llx %-13s %016llx\n",
4890                "cr3:", save->cr3, "cr4:", save->cr4);
4891         pr_err("%-15s %016llx %-13s %016llx\n",
4892                "dr6:", save->dr6, "dr7:", save->dr7);
4893         pr_err("%-15s %016llx %-13s %016llx\n",
4894                "rip:", save->rip, "rflags:", save->rflags);
4895         pr_err("%-15s %016llx %-13s %016llx\n",
4896                "rsp:", save->rsp, "rax:", save->rax);
4897         pr_err("%-15s %016llx %-13s %016llx\n",
4898                "star:", save->star, "lstar:", save->lstar);
4899         pr_err("%-15s %016llx %-13s %016llx\n",
4900                "cstar:", save->cstar, "sfmask:", save->sfmask);
4901         pr_err("%-15s %016llx %-13s %016llx\n",
4902                "kernel_gs_base:", save->kernel_gs_base,
4903                "sysenter_cs:", save->sysenter_cs);
4904         pr_err("%-15s %016llx %-13s %016llx\n",
4905                "sysenter_esp:", save->sysenter_esp,
4906                "sysenter_eip:", save->sysenter_eip);
4907         pr_err("%-15s %016llx %-13s %016llx\n",
4908                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4909         pr_err("%-15s %016llx %-13s %016llx\n",
4910                "br_from:", save->br_from, "br_to:", save->br_to);
4911         pr_err("%-15s %016llx %-13s %016llx\n",
4912                "excp_from:", save->last_excp_from,
4913                "excp_to:", save->last_excp_to);
4914 }
4915
4916 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4917 {
4918         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4919
4920         *info1 = control->exit_info_1;
4921         *info2 = control->exit_info_2;
4922 }
4923
4924 static int handle_exit(struct kvm_vcpu *vcpu)
4925 {
4926         struct vcpu_svm *svm = to_svm(vcpu);
4927         struct kvm_run *kvm_run = vcpu->run;
4928         u32 exit_code = svm->vmcb->control.exit_code;
4929
4930         trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4931
4932         if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
4933                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4934         if (npt_enabled)
4935                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
4936
4937         if (unlikely(svm->nested.exit_required)) {
4938                 nested_svm_vmexit(svm);
4939                 svm->nested.exit_required = false;
4940
4941                 return 1;
4942         }
4943
4944         if (is_guest_mode(vcpu)) {
4945                 int vmexit;
4946
4947                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4948                                         svm->vmcb->control.exit_info_1,
4949                                         svm->vmcb->control.exit_info_2,
4950                                         svm->vmcb->control.exit_int_info,
4951                                         svm->vmcb->control.exit_int_info_err,
4952                                         KVM_ISA_SVM);
4953
4954                 vmexit = nested_svm_exit_special(svm);
4955
4956                 if (vmexit == NESTED_EXIT_CONTINUE)
4957                         vmexit = nested_svm_exit_handled(svm);
4958
4959                 if (vmexit == NESTED_EXIT_DONE)
4960                         return 1;
4961         }
4962
4963         svm_complete_interrupts(svm);
4964
4965         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4966                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4967                 kvm_run->fail_entry.hardware_entry_failure_reason
4968                         = svm->vmcb->control.exit_code;
4969                 dump_vmcb(vcpu);
4970                 return 0;
4971         }
4972
4973         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
4974             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
4975             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4976             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
4977                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
4978                        "exit_code 0x%x\n",
4979                        __func__, svm->vmcb->control.exit_int_info,
4980                        exit_code);
4981
4982         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
4983             || !svm_exit_handlers[exit_code]) {
4984                 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
4985                 dump_vmcb(vcpu);
4986                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4987                 vcpu->run->internal.suberror =
4988                         KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
4989                 vcpu->run->internal.ndata = 1;
4990                 vcpu->run->internal.data[0] = exit_code;
4991                 return 0;
4992         }
4993
4994         return svm_exit_handlers[exit_code](svm);
4995 }
4996
4997 static void reload_tss(struct kvm_vcpu *vcpu)
4998 {
4999         int cpu = raw_smp_processor_id();
5000
5001         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5002         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
5003         load_TR_desc();
5004 }
5005
5006 static void pre_sev_run(struct vcpu_svm *svm, int cpu)
5007 {
5008         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5009         int asid = sev_get_asid(svm->vcpu.kvm);
5010
5011         /* Assign the asid allocated with this SEV guest */
5012         svm->vmcb->control.asid = asid;
5013
5014         /*
5015          * Flush guest TLB:
5016          *
5017          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
5018          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
5019          */
5020         if (sd->sev_vmcbs[asid] == svm->vmcb &&
5021             svm->last_cpu == cpu)
5022                 return;
5023
5024         svm->last_cpu = cpu;
5025         sd->sev_vmcbs[asid] = svm->vmcb;
5026         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5027         mark_dirty(svm->vmcb, VMCB_ASID);
5028 }
5029
5030 static void pre_svm_run(struct vcpu_svm *svm)
5031 {
5032         int cpu = raw_smp_processor_id();
5033
5034         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5035
5036         if (sev_guest(svm->vcpu.kvm))
5037                 return pre_sev_run(svm, cpu);
5038
5039         /* FIXME: handle wraparound of asid_generation */
5040         if (svm->asid_generation != sd->asid_generation)
5041                 new_asid(svm, sd);
5042 }
5043
5044 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
5045 {
5046         struct vcpu_svm *svm = to_svm(vcpu);
5047
5048         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
5049         vcpu->arch.hflags |= HF_NMI_MASK;
5050         set_intercept(svm, INTERCEPT_IRET);
5051         ++vcpu->stat.nmi_injections;
5052 }
5053
5054 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
5055 {
5056         struct vmcb_control_area *control;
5057
5058         /* The following fields are ignored when AVIC is enabled */
5059         control = &svm->vmcb->control;
5060         control->int_vector = irq;
5061         control->int_ctl &= ~V_INTR_PRIO_MASK;
5062         control->int_ctl |= V_IRQ_MASK |
5063                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
5064         mark_dirty(svm->vmcb, VMCB_INTR);
5065 }
5066
5067 static void svm_set_irq(struct kvm_vcpu *vcpu)
5068 {
5069         struct vcpu_svm *svm = to_svm(vcpu);
5070
5071         BUG_ON(!(gif_set(svm)));
5072
5073         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
5074         ++vcpu->stat.irq_injections;
5075
5076         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
5077                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
5078 }
5079
5080 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
5081 {
5082         return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
5083 }
5084
5085 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
5086 {
5087         struct vcpu_svm *svm = to_svm(vcpu);
5088
5089         if (svm_nested_virtualize_tpr(vcpu) ||
5090             kvm_vcpu_apicv_active(vcpu))
5091                 return;
5092
5093         clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5094
5095         if (irr == -1)
5096                 return;
5097
5098         if (tpr >= irr)
5099                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5100 }
5101
5102 static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
5103 {
5104         return;
5105 }
5106
5107 static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
5108 {
5109         return avic && irqchip_split(vcpu->kvm);
5110 }
5111
5112 static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
5113 {
5114 }
5115
5116 static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
5117 {
5118 }
5119
5120 /* Note: Currently only used by Hyper-V. */
5121 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
5122 {
5123         struct vcpu_svm *svm = to_svm(vcpu);
5124         struct vmcb *vmcb = svm->vmcb;
5125
5126         if (kvm_vcpu_apicv_active(vcpu))
5127                 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
5128         else
5129                 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
5130         mark_dirty(vmcb, VMCB_AVIC);
5131 }
5132
5133 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
5134 {
5135         return;
5136 }
5137
5138 static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
5139 {
5140         kvm_lapic_set_irr(vec, vcpu->arch.apic);
5141         smp_mb__after_atomic();
5142
5143         if (avic_vcpu_is_running(vcpu)) {
5144                 int cpuid = vcpu->cpu;
5145
5146                 if (cpuid != get_cpu())
5147                         wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
5148                 put_cpu();
5149         } else
5150                 kvm_vcpu_wake_up(vcpu);
5151 }
5152
5153 static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
5154 {
5155         return false;
5156 }
5157
5158 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5159 {
5160         unsigned long flags;
5161         struct amd_svm_iommu_ir *cur;
5162
5163         spin_lock_irqsave(&svm->ir_list_lock, flags);
5164         list_for_each_entry(cur, &svm->ir_list, node) {
5165                 if (cur->data != pi->ir_data)
5166                         continue;
5167                 list_del(&cur->node);
5168                 kfree(cur);
5169                 break;
5170         }
5171         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5172 }
5173
5174 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5175 {
5176         int ret = 0;
5177         unsigned long flags;
5178         struct amd_svm_iommu_ir *ir;
5179
5180         /**
5181          * In some cases, the existing irte is updaed and re-set,
5182          * so we need to check here if it's already been * added
5183          * to the ir_list.
5184          */
5185         if (pi->ir_data && (pi->prev_ga_tag != 0)) {
5186                 struct kvm *kvm = svm->vcpu.kvm;
5187                 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
5188                 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
5189                 struct vcpu_svm *prev_svm;
5190
5191                 if (!prev_vcpu) {
5192                         ret = -EINVAL;
5193                         goto out;
5194                 }
5195
5196                 prev_svm = to_svm(prev_vcpu);
5197                 svm_ir_list_del(prev_svm, pi);
5198         }
5199
5200         /**
5201          * Allocating new amd_iommu_pi_data, which will get
5202          * add to the per-vcpu ir_list.
5203          */
5204         ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
5205         if (!ir) {
5206                 ret = -ENOMEM;
5207                 goto out;
5208         }
5209         ir->data = pi->ir_data;
5210
5211         spin_lock_irqsave(&svm->ir_list_lock, flags);
5212         list_add(&ir->node, &svm->ir_list);
5213         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5214 out:
5215         return ret;
5216 }
5217
5218 /**
5219  * Note:
5220  * The HW cannot support posting multicast/broadcast
5221  * interrupts to a vCPU. So, we still use legacy interrupt
5222  * remapping for these kind of interrupts.
5223  *
5224  * For lowest-priority interrupts, we only support
5225  * those with single CPU as the destination, e.g. user
5226  * configures the interrupts via /proc/irq or uses
5227  * irqbalance to make the interrupts single-CPU.
5228  */
5229 static int
5230 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
5231                  struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
5232 {
5233         struct kvm_lapic_irq irq;
5234         struct kvm_vcpu *vcpu = NULL;
5235
5236         kvm_set_msi_irq(kvm, e, &irq);
5237
5238         if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
5239             !kvm_irq_is_postable(&irq)) {
5240                 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
5241                          __func__, irq.vector);
5242                 return -1;
5243         }
5244
5245         pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
5246                  irq.vector);
5247         *svm = to_svm(vcpu);
5248         vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
5249         vcpu_info->vector = irq.vector;
5250
5251         return 0;
5252 }
5253
5254 /*
5255  * svm_update_pi_irte - set IRTE for Posted-Interrupts
5256  *
5257  * @kvm: kvm
5258  * @host_irq: host irq of the interrupt
5259  * @guest_irq: gsi of the interrupt
5260  * @set: set or unset PI
5261  * returns 0 on success, < 0 on failure
5262  */
5263 static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
5264                               uint32_t guest_irq, bool set)
5265 {
5266         struct kvm_kernel_irq_routing_entry *e;
5267         struct kvm_irq_routing_table *irq_rt;
5268         int idx, ret = -EINVAL;
5269
5270         if (!kvm_arch_has_assigned_device(kvm) ||
5271             !irq_remapping_cap(IRQ_POSTING_CAP))
5272                 return 0;
5273
5274         pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
5275                  __func__, host_irq, guest_irq, set);
5276
5277         idx = srcu_read_lock(&kvm->irq_srcu);
5278         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
5279         WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
5280
5281         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
5282                 struct vcpu_data vcpu_info;
5283                 struct vcpu_svm *svm = NULL;
5284
5285                 if (e->type != KVM_IRQ_ROUTING_MSI)
5286                         continue;
5287
5288                 /**
5289                  * Here, we setup with legacy mode in the following cases:
5290                  * 1. When cannot target interrupt to a specific vcpu.
5291                  * 2. Unsetting posted interrupt.
5292                  * 3. APIC virtialization is disabled for the vcpu.
5293                  * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
5294                  */
5295                 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
5296                     kvm_vcpu_apicv_active(&svm->vcpu)) {
5297                         struct amd_iommu_pi_data pi;
5298
5299                         /* Try to enable guest_mode in IRTE */
5300                         pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
5301                                             AVIC_HPA_MASK);
5302                         pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
5303                                                      svm->vcpu.vcpu_id);
5304                         pi.is_guest_mode = true;
5305                         pi.vcpu_data = &vcpu_info;
5306                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5307
5308                         /**
5309                          * Here, we successfully setting up vcpu affinity in
5310                          * IOMMU guest mode. Now, we need to store the posted
5311                          * interrupt information in a per-vcpu ir_list so that
5312                          * we can reference to them directly when we update vcpu
5313                          * scheduling information in IOMMU irte.
5314                          */
5315                         if (!ret && pi.is_guest_mode)
5316                                 svm_ir_list_add(svm, &pi);
5317                 } else {
5318                         /* Use legacy mode in IRTE */
5319                         struct amd_iommu_pi_data pi;
5320
5321                         /**
5322                          * Here, pi is used to:
5323                          * - Tell IOMMU to use legacy mode for this interrupt.
5324                          * - Retrieve ga_tag of prior interrupt remapping data.
5325                          */
5326                         pi.is_guest_mode = false;
5327                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5328
5329                         /**
5330                          * Check if the posted interrupt was previously
5331                          * setup with the guest_mode by checking if the ga_tag
5332                          * was cached. If so, we need to clean up the per-vcpu
5333                          * ir_list.
5334                          */
5335                         if (!ret && pi.prev_ga_tag) {
5336                                 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
5337                                 struct kvm_vcpu *vcpu;
5338
5339                                 vcpu = kvm_get_vcpu_by_id(kvm, id);
5340                                 if (vcpu)
5341                                         svm_ir_list_del(to_svm(vcpu), &pi);
5342                         }
5343                 }
5344
5345                 if (!ret && svm) {
5346                         trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
5347                                                  e->gsi, vcpu_info.vector,
5348                                                  vcpu_info.pi_desc_addr, set);
5349                 }
5350
5351                 if (ret < 0) {
5352                         pr_err("%s: failed to update PI IRTE\n", __func__);
5353                         goto out;
5354                 }
5355         }
5356
5357         ret = 0;
5358 out:
5359         srcu_read_unlock(&kvm->irq_srcu, idx);
5360         return ret;
5361 }
5362
5363 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
5364 {
5365         struct vcpu_svm *svm = to_svm(vcpu);
5366         struct vmcb *vmcb = svm->vmcb;
5367         int ret;
5368         ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5369               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5370         ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5371
5372         return ret;
5373 }
5374
5375 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5376 {
5377         struct vcpu_svm *svm = to_svm(vcpu);
5378
5379         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5380 }
5381
5382 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5383 {
5384         struct vcpu_svm *svm = to_svm(vcpu);
5385
5386         if (masked) {
5387                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
5388                 set_intercept(svm, INTERCEPT_IRET);
5389         } else {
5390                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
5391                 clr_intercept(svm, INTERCEPT_IRET);
5392         }
5393 }
5394
5395 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5396 {
5397         struct vcpu_svm *svm = to_svm(vcpu);
5398         struct vmcb *vmcb = svm->vmcb;
5399         int ret;
5400
5401         if (!gif_set(svm) ||
5402              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5403                 return 0;
5404
5405         ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
5406
5407         if (is_guest_mode(vcpu))
5408                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5409
5410         return ret;
5411 }
5412
5413 static void enable_irq_window(struct kvm_vcpu *vcpu)
5414 {
5415         struct vcpu_svm *svm = to_svm(vcpu);
5416
5417         if (kvm_vcpu_apicv_active(vcpu))
5418                 return;
5419
5420         /*
5421          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5422          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
5423          * get that intercept, this function will be called again though and
5424          * we'll get the vintr intercept. However, if the vGIF feature is
5425          * enabled, the STGI interception will not occur. Enable the irq
5426          * window under the assumption that the hardware will set the GIF.
5427          */
5428         if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
5429                 svm_set_vintr(svm);
5430                 svm_inject_irq(svm, 0x0);
5431         }
5432 }
5433
5434 static void enable_nmi_window(struct kvm_vcpu *vcpu)
5435 {
5436         struct vcpu_svm *svm = to_svm(vcpu);
5437
5438         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5439             == HF_NMI_MASK)
5440                 return; /* IRET will cause a vm exit */
5441
5442         if (!gif_set(svm)) {
5443                 if (vgif_enabled(svm))
5444                         set_intercept(svm, INTERCEPT_STGI);
5445                 return; /* STGI will cause a vm exit */
5446         }
5447
5448         if (svm->nested.exit_required)
5449                 return; /* we're not going to run the guest yet */
5450
5451         /*
5452          * Something prevents NMI from been injected. Single step over possible
5453          * problem (IRET or exception injection or interrupt shadow)
5454          */
5455         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
5456         svm->nmi_singlestep = true;
5457         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
5458 }
5459
5460 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5461 {
5462         return 0;
5463 }
5464
5465 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5466 {
5467         return 0;
5468 }
5469
5470 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
5471 {
5472         struct vcpu_svm *svm = to_svm(vcpu);
5473
5474         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5475                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5476         else
5477                 svm->asid_generation--;
5478 }
5479
5480 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
5481 {
5482         struct vcpu_svm *svm = to_svm(vcpu);
5483
5484         invlpga(gva, svm->vmcb->control.asid);
5485 }
5486
5487 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5488 {
5489 }
5490
5491 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5492 {
5493         struct vcpu_svm *svm = to_svm(vcpu);
5494
5495         if (svm_nested_virtualize_tpr(vcpu))
5496                 return;
5497
5498         if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
5499                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
5500                 kvm_set_cr8(vcpu, cr8);
5501         }
5502 }
5503
5504 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5505 {
5506         struct vcpu_svm *svm = to_svm(vcpu);
5507         u64 cr8;
5508
5509         if (svm_nested_virtualize_tpr(vcpu) ||
5510             kvm_vcpu_apicv_active(vcpu))
5511                 return;
5512
5513         cr8 = kvm_get_cr8(vcpu);
5514         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5515         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5516 }
5517
5518 static void svm_complete_interrupts(struct vcpu_svm *svm)
5519 {
5520         u8 vector;
5521         int type;
5522         u32 exitintinfo = svm->vmcb->control.exit_int_info;
5523         unsigned int3_injected = svm->int3_injected;
5524
5525         svm->int3_injected = 0;
5526
5527         /*
5528          * If we've made progress since setting HF_IRET_MASK, we've
5529          * executed an IRET and can allow NMI injection.
5530          */
5531         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5532             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
5533                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
5534                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5535         }
5536
5537         svm->vcpu.arch.nmi_injected = false;
5538         kvm_clear_exception_queue(&svm->vcpu);
5539         kvm_clear_interrupt_queue(&svm->vcpu);
5540
5541         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5542                 return;
5543
5544         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5545
5546         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5547         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5548
5549         switch (type) {
5550         case SVM_EXITINTINFO_TYPE_NMI:
5551                 svm->vcpu.arch.nmi_injected = true;
5552                 break;
5553         case SVM_EXITINTINFO_TYPE_EXEPT:
5554                 /*
5555                  * In case of software exceptions, do not reinject the vector,
5556                  * but re-execute the instruction instead. Rewind RIP first
5557                  * if we emulated INT3 before.
5558                  */
5559                 if (kvm_exception_is_soft(vector)) {
5560                         if (vector == BP_VECTOR && int3_injected &&
5561                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5562                                 kvm_rip_write(&svm->vcpu,
5563                                               kvm_rip_read(&svm->vcpu) -
5564                                               int3_injected);
5565                         break;
5566                 }
5567                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5568                         u32 err = svm->vmcb->control.exit_int_info_err;
5569                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
5570
5571                 } else
5572                         kvm_requeue_exception(&svm->vcpu, vector);
5573                 break;
5574         case SVM_EXITINTINFO_TYPE_INTR:
5575                 kvm_queue_interrupt(&svm->vcpu, vector, false);
5576                 break;
5577         default:
5578                 break;
5579         }
5580 }
5581
5582 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5583 {
5584         struct vcpu_svm *svm = to_svm(vcpu);
5585         struct vmcb_control_area *control = &svm->vmcb->control;
5586
5587         control->exit_int_info = control->event_inj;
5588         control->exit_int_info_err = control->event_inj_err;
5589         control->event_inj = 0;
5590         svm_complete_interrupts(svm);
5591 }
5592
5593 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5594 {
5595         struct vcpu_svm *svm = to_svm(vcpu);
5596
5597         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5598         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5599         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5600
5601         /*
5602          * A vmexit emulation is required before the vcpu can be executed
5603          * again.
5604          */
5605         if (unlikely(svm->nested.exit_required))
5606                 return;
5607
5608         /*
5609          * Disable singlestep if we're injecting an interrupt/exception.
5610          * We don't want our modified rflags to be pushed on the stack where
5611          * we might not be able to easily reset them if we disabled NMI
5612          * singlestep later.
5613          */
5614         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5615                 /*
5616                  * Event injection happens before external interrupts cause a
5617                  * vmexit and interrupts are disabled here, so smp_send_reschedule
5618                  * is enough to force an immediate vmexit.
5619                  */
5620                 disable_nmi_singlestep(svm);
5621                 smp_send_reschedule(vcpu->cpu);
5622         }
5623
5624         pre_svm_run(svm);
5625
5626         sync_lapic_to_cr8(vcpu);
5627
5628         svm->vmcb->save.cr2 = vcpu->arch.cr2;
5629
5630         clgi();
5631         kvm_load_guest_xcr0(vcpu);
5632
5633         if (lapic_in_kernel(vcpu) &&
5634                 vcpu->arch.apic->lapic_timer.timer_advance_ns)
5635                 kvm_wait_lapic_expire(vcpu);
5636
5637         /*
5638          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
5639          * it's non-zero. Since vmentry is serialising on affected CPUs, there
5640          * is no need to worry about the conditional branch over the wrmsr
5641          * being speculatively taken.
5642          */
5643         x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
5644
5645         local_irq_enable();
5646
5647         asm volatile (
5648                 "push %%" _ASM_BP "; \n\t"
5649                 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5650                 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5651                 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5652                 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5653                 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5654                 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
5655 #ifdef CONFIG_X86_64
5656                 "mov %c[r8](%[svm]),  %%r8  \n\t"
5657                 "mov %c[r9](%[svm]),  %%r9  \n\t"
5658                 "mov %c[r10](%[svm]), %%r10 \n\t"
5659                 "mov %c[r11](%[svm]), %%r11 \n\t"
5660                 "mov %c[r12](%[svm]), %%r12 \n\t"
5661                 "mov %c[r13](%[svm]), %%r13 \n\t"
5662                 "mov %c[r14](%[svm]), %%r14 \n\t"
5663                 "mov %c[r15](%[svm]), %%r15 \n\t"
5664 #endif
5665
5666                 /* Enter guest mode */
5667                 "push %%" _ASM_AX " \n\t"
5668                 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
5669                 __ex("vmload %%" _ASM_AX) "\n\t"
5670                 __ex("vmrun %%" _ASM_AX) "\n\t"
5671                 __ex("vmsave %%" _ASM_AX) "\n\t"
5672                 "pop %%" _ASM_AX " \n\t"
5673
5674                 /* Save guest registers, load host registers */
5675                 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5676                 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5677                 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5678                 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5679                 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5680                 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
5681 #ifdef CONFIG_X86_64
5682                 "mov %%r8,  %c[r8](%[svm]) \n\t"
5683                 "mov %%r9,  %c[r9](%[svm]) \n\t"
5684                 "mov %%r10, %c[r10](%[svm]) \n\t"
5685                 "mov %%r11, %c[r11](%[svm]) \n\t"
5686                 "mov %%r12, %c[r12](%[svm]) \n\t"
5687                 "mov %%r13, %c[r13](%[svm]) \n\t"
5688                 "mov %%r14, %c[r14](%[svm]) \n\t"
5689                 "mov %%r15, %c[r15](%[svm]) \n\t"
5690                 /*
5691                 * Clear host registers marked as clobbered to prevent
5692                 * speculative use.
5693                 */
5694                 "xor %%r8d, %%r8d \n\t"
5695                 "xor %%r9d, %%r9d \n\t"
5696                 "xor %%r10d, %%r10d \n\t"
5697                 "xor %%r11d, %%r11d \n\t"
5698                 "xor %%r12d, %%r12d \n\t"
5699                 "xor %%r13d, %%r13d \n\t"
5700                 "xor %%r14d, %%r14d \n\t"
5701                 "xor %%r15d, %%r15d \n\t"
5702 #endif
5703                 "xor %%ebx, %%ebx \n\t"
5704                 "xor %%ecx, %%ecx \n\t"
5705                 "xor %%edx, %%edx \n\t"
5706                 "xor %%esi, %%esi \n\t"
5707                 "xor %%edi, %%edi \n\t"
5708                 "pop %%" _ASM_BP
5709                 :
5710                 : [svm]"a"(svm),
5711                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
5712                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5713                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5714                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5715                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5716                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5717                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
5718 #ifdef CONFIG_X86_64
5719                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5720                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5721                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5722                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5723                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5724                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5725                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5726                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
5727 #endif
5728                 : "cc", "memory"
5729 #ifdef CONFIG_X86_64
5730                 , "rbx", "rcx", "rdx", "rsi", "rdi"
5731                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
5732 #else
5733                 , "ebx", "ecx", "edx", "esi", "edi"
5734 #endif
5735                 );
5736
5737         /* Eliminate branch target predictions from guest mode */
5738         vmexit_fill_RSB();
5739
5740 #ifdef CONFIG_X86_64
5741         wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5742 #else
5743         loadsegment(fs, svm->host.fs);
5744 #ifndef CONFIG_X86_32_LAZY_GS
5745         loadsegment(gs, svm->host.gs);
5746 #endif
5747 #endif
5748
5749         /*
5750          * We do not use IBRS in the kernel. If this vCPU has used the
5751          * SPEC_CTRL MSR it may have left it on; save the value and
5752          * turn it off. This is much more efficient than blindly adding
5753          * it to the atomic save/restore list. Especially as the former
5754          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
5755          *
5756          * For non-nested case:
5757          * If the L01 MSR bitmap does not intercept the MSR, then we need to
5758          * save it.
5759          *
5760          * For nested case:
5761          * If the L02 MSR bitmap does not intercept the MSR, then we need to
5762          * save it.
5763          */
5764         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
5765                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5766
5767         reload_tss(vcpu);
5768
5769         local_irq_disable();
5770
5771         x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
5772
5773         vcpu->arch.cr2 = svm->vmcb->save.cr2;
5774         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5775         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5776         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5777
5778         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5779                 kvm_before_interrupt(&svm->vcpu);
5780
5781         kvm_put_guest_xcr0(vcpu);
5782         stgi();
5783
5784         /* Any pending NMI will happen here */
5785
5786         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5787                 kvm_after_interrupt(&svm->vcpu);
5788
5789         sync_cr8_to_lapic(vcpu);
5790
5791         svm->next_rip = 0;
5792
5793         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5794
5795         /* if exit due to PF check for async PF */
5796         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
5797                 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
5798
5799         if (npt_enabled) {
5800                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5801                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5802         }
5803
5804         /*
5805          * We need to handle MC intercepts here before the vcpu has a chance to
5806          * change the physical cpu
5807          */
5808         if (unlikely(svm->vmcb->control.exit_code ==
5809                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
5810                 svm_handle_mce(svm);
5811
5812         mark_all_clean(svm->vmcb);
5813 }
5814 STACK_FRAME_NON_STANDARD(svm_vcpu_run);
5815
5816 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5817 {
5818         struct vcpu_svm *svm = to_svm(vcpu);
5819
5820         svm->vmcb->save.cr3 = __sme_set(root);
5821         mark_dirty(svm->vmcb, VMCB_CR);
5822 }
5823
5824 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5825 {
5826         struct vcpu_svm *svm = to_svm(vcpu);
5827
5828         svm->vmcb->control.nested_cr3 = __sme_set(root);
5829         mark_dirty(svm->vmcb, VMCB_NPT);
5830
5831         /* Also sync guest cr3 here in case we live migrate */
5832         svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
5833         mark_dirty(svm->vmcb, VMCB_CR);
5834 }
5835
5836 static int is_disabled(void)
5837 {
5838         u64 vm_cr;
5839
5840         rdmsrl(MSR_VM_CR, vm_cr);
5841         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5842                 return 1;
5843
5844         return 0;
5845 }
5846
5847 static void
5848 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5849 {
5850         /*
5851          * Patch in the VMMCALL instruction:
5852          */
5853         hypercall[0] = 0x0f;
5854         hypercall[1] = 0x01;
5855         hypercall[2] = 0xd9;
5856 }
5857
5858 static int __init svm_check_processor_compat(void)
5859 {
5860         return 0;
5861 }
5862
5863 static bool svm_cpu_has_accelerated_tpr(void)
5864 {
5865         return false;
5866 }
5867
5868 static bool svm_has_emulated_msr(int index)
5869 {
5870         switch (index) {
5871         case MSR_IA32_MCG_EXT_CTL:
5872         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
5873                 return false;
5874         default:
5875                 break;
5876         }
5877
5878         return true;
5879 }
5880
5881 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5882 {
5883         return 0;
5884 }
5885
5886 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5887 {
5888         struct vcpu_svm *svm = to_svm(vcpu);
5889
5890         /* Update nrips enabled cache */
5891         svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
5892
5893         if (!kvm_vcpu_apicv_active(vcpu))
5894                 return;
5895
5896         guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
5897 }
5898
5899 #define F(x) bit(X86_FEATURE_##x)
5900
5901 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5902 {
5903         switch (func) {
5904         case 0x1:
5905                 if (avic)
5906                         entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5907                 break;
5908         case 0x80000001:
5909                 if (nested)
5910                         entry->ecx |= (1 << 2); /* Set SVM bit */
5911                 break;
5912         case 0x80000008:
5913                 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5914                      boot_cpu_has(X86_FEATURE_AMD_SSBD))
5915                         entry->ebx |= F(VIRT_SSBD);
5916                 break;
5917         case 0x8000000A:
5918                 entry->eax = 1; /* SVM revision 1 */
5919                 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5920                                    ASID emulation to nested SVM */
5921                 entry->ecx = 0; /* Reserved */
5922                 entry->edx = 0; /* Per default do not support any
5923                                    additional features */
5924
5925                 /* Support next_rip if host supports it */
5926                 if (boot_cpu_has(X86_FEATURE_NRIPS))
5927                         entry->edx |= F(NRIPS);
5928
5929                 /* Support NPT for the guest if enabled */
5930                 if (npt_enabled)
5931                         entry->edx |= F(NPT);
5932
5933                 break;
5934         case 0x8000001F:
5935                 /* Support memory encryption cpuid if host supports it */
5936                 if (boot_cpu_has(X86_FEATURE_SEV))
5937                         cpuid(0x8000001f, &entry->eax, &entry->ebx,
5938                                 &entry->ecx, &entry->edx);
5939
5940         }
5941 }
5942
5943 static int svm_get_lpage_level(void)
5944 {
5945         return PT_PDPE_LEVEL;
5946 }
5947
5948 static bool svm_rdtscp_supported(void)
5949 {
5950         return boot_cpu_has(X86_FEATURE_RDTSCP);
5951 }
5952
5953 static bool svm_invpcid_supported(void)
5954 {
5955         return false;
5956 }
5957
5958 static bool svm_mpx_supported(void)
5959 {
5960         return false;
5961 }
5962
5963 static bool svm_xsaves_supported(void)
5964 {
5965         return false;
5966 }
5967
5968 static bool svm_umip_emulated(void)
5969 {
5970         return false;
5971 }
5972
5973 static bool svm_pt_supported(void)
5974 {
5975         return false;
5976 }
5977
5978 static bool svm_has_wbinvd_exit(void)
5979 {
5980         return true;
5981 }
5982
5983 #define PRE_EX(exit)  { .exit_code = (exit), \
5984                         .stage = X86_ICPT_PRE_EXCEPT, }
5985 #define POST_EX(exit) { .exit_code = (exit), \
5986                         .stage = X86_ICPT_POST_EXCEPT, }
5987 #define POST_MEM(exit) { .exit_code = (exit), \
5988                         .stage = X86_ICPT_POST_MEMACCESS, }
5989
5990 static const struct __x86_intercept {
5991         u32 exit_code;
5992         enum x86_intercept_stage stage;
5993 } x86_intercept_map[] = {
5994         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
5995         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
5996         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
5997         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
5998         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
5999         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
6000         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
6001         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
6002         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
6003         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
6004         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
6005         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
6006         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
6007         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
6008         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
6009         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
6010         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
6011         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
6012         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
6013         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
6014         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
6015         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
6016         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
6017         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
6018         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
6019         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
6020         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
6021         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
6022         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
6023         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
6024         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
6025         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
6026         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
6027         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
6028         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
6029         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
6030         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
6031         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
6032         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
6033         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
6034         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
6035         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
6036         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
6037         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
6038         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
6039         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
6040         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
6041 };
6042
6043 #undef PRE_EX
6044 #undef POST_EX
6045 #undef POST_MEM
6046
6047 static int svm_check_intercept(struct kvm_vcpu *vcpu,
6048                                struct x86_instruction_info *info,
6049                                enum x86_intercept_stage stage)
6050 {
6051         struct vcpu_svm *svm = to_svm(vcpu);
6052         int vmexit, ret = X86EMUL_CONTINUE;
6053         struct __x86_intercept icpt_info;
6054         struct vmcb *vmcb = svm->vmcb;
6055
6056         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
6057                 goto out;
6058
6059         icpt_info = x86_intercept_map[info->intercept];
6060
6061         if (stage != icpt_info.stage)
6062                 goto out;
6063
6064         switch (icpt_info.exit_code) {
6065         case SVM_EXIT_READ_CR0:
6066                 if (info->intercept == x86_intercept_cr_read)
6067                         icpt_info.exit_code += info->modrm_reg;
6068                 break;
6069         case SVM_EXIT_WRITE_CR0: {
6070                 unsigned long cr0, val;
6071                 u64 intercept;
6072
6073                 if (info->intercept == x86_intercept_cr_write)
6074                         icpt_info.exit_code += info->modrm_reg;
6075
6076                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
6077                     info->intercept == x86_intercept_clts)
6078                         break;
6079
6080                 intercept = svm->nested.intercept;
6081
6082                 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
6083                         break;
6084
6085                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
6086                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
6087
6088                 if (info->intercept == x86_intercept_lmsw) {
6089                         cr0 &= 0xfUL;
6090                         val &= 0xfUL;
6091                         /* lmsw can't clear PE - catch this here */
6092                         if (cr0 & X86_CR0_PE)
6093                                 val |= X86_CR0_PE;
6094                 }
6095
6096                 if (cr0 ^ val)
6097                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
6098
6099                 break;
6100         }
6101         case SVM_EXIT_READ_DR0:
6102         case SVM_EXIT_WRITE_DR0:
6103                 icpt_info.exit_code += info->modrm_reg;
6104                 break;
6105         case SVM_EXIT_MSR:
6106                 if (info->intercept == x86_intercept_wrmsr)
6107                         vmcb->control.exit_info_1 = 1;
6108                 else
6109                         vmcb->control.exit_info_1 = 0;
6110                 break;
6111         case SVM_EXIT_PAUSE:
6112                 /*
6113                  * We get this for NOP only, but pause
6114                  * is rep not, check this here
6115                  */
6116                 if (info->rep_prefix != REPE_PREFIX)
6117                         goto out;
6118                 break;
6119         case SVM_EXIT_IOIO: {
6120                 u64 exit_info;
6121                 u32 bytes;
6122
6123                 if (info->intercept == x86_intercept_in ||
6124                     info->intercept == x86_intercept_ins) {
6125                         exit_info = ((info->src_val & 0xffff) << 16) |
6126                                 SVM_IOIO_TYPE_MASK;
6127                         bytes = info->dst_bytes;
6128                 } else {
6129                         exit_info = (info->dst_val & 0xffff) << 16;
6130                         bytes = info->src_bytes;
6131                 }
6132
6133                 if (info->intercept == x86_intercept_outs ||
6134                     info->intercept == x86_intercept_ins)
6135                         exit_info |= SVM_IOIO_STR_MASK;
6136
6137                 if (info->rep_prefix)
6138                         exit_info |= SVM_IOIO_REP_MASK;
6139
6140                 bytes = min(bytes, 4u);
6141
6142                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
6143
6144                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
6145
6146                 vmcb->control.exit_info_1 = exit_info;
6147                 vmcb->control.exit_info_2 = info->next_rip;
6148
6149                 break;
6150         }
6151         default:
6152                 break;
6153         }
6154
6155         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
6156         if (static_cpu_has(X86_FEATURE_NRIPS))
6157                 vmcb->control.next_rip  = info->next_rip;
6158         vmcb->control.exit_code = icpt_info.exit_code;
6159         vmexit = nested_svm_exit_handled(svm);
6160
6161         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
6162                                            : X86EMUL_CONTINUE;
6163
6164 out:
6165         return ret;
6166 }
6167
6168 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6169 {
6170
6171 }
6172
6173 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
6174 {
6175         if (pause_filter_thresh)
6176                 shrink_ple_window(vcpu);
6177 }
6178
6179 static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
6180 {
6181         if (avic_handle_apic_id_update(vcpu) != 0)
6182                 return;
6183         avic_handle_dfr_update(vcpu);
6184         avic_handle_ldr_update(vcpu);
6185 }
6186
6187 static void svm_setup_mce(struct kvm_vcpu *vcpu)
6188 {
6189         /* [63:9] are reserved. */
6190         vcpu->arch.mcg_cap &= 0x1ff;
6191 }
6192
6193 static int svm_smi_allowed(struct kvm_vcpu *vcpu)
6194 {
6195         struct vcpu_svm *svm = to_svm(vcpu);
6196
6197         /* Per APM Vol.2 15.22.2 "Response to SMI" */
6198         if (!gif_set(svm))
6199                 return 0;
6200
6201         if (is_guest_mode(&svm->vcpu) &&
6202             svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
6203                 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
6204                 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
6205                 svm->nested.exit_required = true;
6206                 return 0;
6207         }
6208
6209         return 1;
6210 }
6211
6212 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
6213 {
6214         struct vcpu_svm *svm = to_svm(vcpu);
6215         int ret;
6216
6217         if (is_guest_mode(vcpu)) {
6218                 /* FED8h - SVM Guest */
6219                 put_smstate(u64, smstate, 0x7ed8, 1);
6220                 /* FEE0h - SVM Guest VMCB Physical Address */
6221                 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
6222
6223                 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
6224                 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
6225                 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
6226
6227                 ret = nested_svm_vmexit(svm);
6228                 if (ret)
6229                         return ret;
6230         }
6231         return 0;
6232 }
6233
6234 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
6235 {
6236         struct vcpu_svm *svm = to_svm(vcpu);
6237         struct vmcb *nested_vmcb;
6238         struct kvm_host_map map;
6239         u64 guest;
6240         u64 vmcb;
6241
6242         guest = GET_SMSTATE(u64, smstate, 0x7ed8);
6243         vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
6244
6245         if (guest) {
6246                 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
6247                         return 1;
6248                 nested_vmcb = map.hva;
6249                 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
6250         }
6251         return 0;
6252 }
6253
6254 static int enable_smi_window(struct kvm_vcpu *vcpu)
6255 {
6256         struct vcpu_svm *svm = to_svm(vcpu);
6257
6258         if (!gif_set(svm)) {
6259                 if (vgif_enabled(svm))
6260                         set_intercept(svm, INTERCEPT_STGI);
6261                 /* STGI will cause a vm exit */
6262                 return 1;
6263         }
6264         return 0;
6265 }
6266
6267 static int sev_asid_new(void)
6268 {
6269         int pos;
6270
6271         /*
6272          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
6273          */
6274         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
6275         if (pos >= max_sev_asid)
6276                 return -EBUSY;
6277
6278         set_bit(pos, sev_asid_bitmap);
6279         return pos + 1;
6280 }
6281
6282 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6283 {
6284         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6285         int asid, ret;
6286
6287         ret = -EBUSY;
6288         if (unlikely(sev->active))
6289                 return ret;
6290
6291         asid = sev_asid_new();
6292         if (asid < 0)
6293                 return ret;
6294
6295         ret = sev_platform_init(&argp->error);
6296         if (ret)
6297                 goto e_free;
6298
6299         sev->active = true;
6300         sev->asid = asid;
6301         INIT_LIST_HEAD(&sev->regions_list);
6302
6303         return 0;
6304
6305 e_free:
6306         __sev_asid_free(asid);
6307         return ret;
6308 }
6309
6310 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
6311 {
6312         struct sev_data_activate *data;
6313         int asid = sev_get_asid(kvm);
6314         int ret;
6315
6316         wbinvd_on_all_cpus();
6317
6318         ret = sev_guest_df_flush(error);
6319         if (ret)
6320                 return ret;
6321
6322         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6323         if (!data)
6324                 return -ENOMEM;
6325
6326         /* activate ASID on the given handle */
6327         data->handle = handle;
6328         data->asid   = asid;
6329         ret = sev_guest_activate(data, error);
6330         kfree(data);
6331
6332         return ret;
6333 }
6334
6335 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
6336 {
6337         struct fd f;
6338         int ret;
6339
6340         f = fdget(fd);
6341         if (!f.file)
6342                 return -EBADF;
6343
6344         ret = sev_issue_cmd_external_user(f.file, id, data, error);
6345
6346         fdput(f);
6347         return ret;
6348 }
6349
6350 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
6351 {
6352         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6353
6354         return __sev_issue_cmd(sev->fd, id, data, error);
6355 }
6356
6357 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
6358 {
6359         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6360         struct sev_data_launch_start *start;
6361         struct kvm_sev_launch_start params;
6362         void *dh_blob, *session_blob;
6363         int *error = &argp->error;
6364         int ret;
6365
6366         if (!sev_guest(kvm))
6367                 return -ENOTTY;
6368
6369         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6370                 return -EFAULT;
6371
6372         start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
6373         if (!start)
6374                 return -ENOMEM;
6375
6376         dh_blob = NULL;
6377         if (params.dh_uaddr) {
6378                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
6379                 if (IS_ERR(dh_blob)) {
6380                         ret = PTR_ERR(dh_blob);
6381                         goto e_free;
6382                 }
6383
6384                 start->dh_cert_address = __sme_set(__pa(dh_blob));
6385                 start->dh_cert_len = params.dh_len;
6386         }
6387
6388         session_blob = NULL;
6389         if (params.session_uaddr) {
6390                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
6391                 if (IS_ERR(session_blob)) {
6392                         ret = PTR_ERR(session_blob);
6393                         goto e_free_dh;
6394                 }
6395
6396                 start->session_address = __sme_set(__pa(session_blob));
6397                 start->session_len = params.session_len;
6398         }
6399
6400         start->handle = params.handle;
6401         start->policy = params.policy;
6402
6403         /* create memory encryption context */
6404         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
6405         if (ret)
6406                 goto e_free_session;
6407
6408         /* Bind ASID to this guest */
6409         ret = sev_bind_asid(kvm, start->handle, error);
6410         if (ret)
6411                 goto e_free_session;
6412
6413         /* return handle to userspace */
6414         params.handle = start->handle;
6415         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
6416                 sev_unbind_asid(kvm, start->handle);
6417                 ret = -EFAULT;
6418                 goto e_free_session;
6419         }
6420
6421         sev->handle = start->handle;
6422         sev->fd = argp->sev_fd;
6423
6424 e_free_session:
6425         kfree(session_blob);
6426 e_free_dh:
6427         kfree(dh_blob);
6428 e_free:
6429         kfree(start);
6430         return ret;
6431 }
6432
6433 static unsigned long get_num_contig_pages(unsigned long idx,
6434                                 struct page **inpages, unsigned long npages)
6435 {
6436         unsigned long paddr, next_paddr;
6437         unsigned long i = idx + 1, pages = 1;
6438
6439         /* find the number of contiguous pages starting from idx */
6440         paddr = __sme_page_pa(inpages[idx]);
6441         while (i < npages) {
6442                 next_paddr = __sme_page_pa(inpages[i++]);
6443                 if ((paddr + PAGE_SIZE) == next_paddr) {
6444                         pages++;
6445                         paddr = next_paddr;
6446                         continue;
6447                 }
6448                 break;
6449         }
6450
6451         return pages;
6452 }
6453
6454 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6455 {
6456         unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
6457         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6458         struct kvm_sev_launch_update_data params;
6459         struct sev_data_launch_update_data *data;
6460         struct page **inpages;
6461         int ret;
6462
6463         if (!sev_guest(kvm))
6464                 return -ENOTTY;
6465
6466         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6467                 return -EFAULT;
6468
6469         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6470         if (!data)
6471                 return -ENOMEM;
6472
6473         vaddr = params.uaddr;
6474         size = params.len;
6475         vaddr_end = vaddr + size;
6476
6477         /* Lock the user memory. */
6478         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6479         if (!inpages) {
6480                 ret = -ENOMEM;
6481                 goto e_free;
6482         }
6483
6484         /*
6485          * The LAUNCH_UPDATE command will perform in-place encryption of the
6486          * memory content (i.e it will write the same memory region with C=1).
6487          * It's possible that the cache may contain the data with C=0, i.e.,
6488          * unencrypted so invalidate it first.
6489          */
6490         sev_clflush_pages(inpages, npages);
6491
6492         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6493                 int offset, len;
6494
6495                 /*
6496                  * If the user buffer is not page-aligned, calculate the offset
6497                  * within the page.
6498                  */
6499                 offset = vaddr & (PAGE_SIZE - 1);
6500
6501                 /* Calculate the number of pages that can be encrypted in one go. */
6502                 pages = get_num_contig_pages(i, inpages, npages);
6503
6504                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6505
6506                 data->handle = sev->handle;
6507                 data->len = len;
6508                 data->address = __sme_page_pa(inpages[i]) + offset;
6509                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6510                 if (ret)
6511                         goto e_unpin;
6512
6513                 size -= len;
6514                 next_vaddr = vaddr + len;
6515         }
6516
6517 e_unpin:
6518         /* content of memory is updated, mark pages dirty */
6519         for (i = 0; i < npages; i++) {
6520                 set_page_dirty_lock(inpages[i]);
6521                 mark_page_accessed(inpages[i]);
6522         }
6523         /* unlock the user pages */
6524         sev_unpin_memory(kvm, inpages, npages);
6525 e_free:
6526         kfree(data);
6527         return ret;
6528 }
6529
6530 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6531 {
6532         void __user *measure = (void __user *)(uintptr_t)argp->data;
6533         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6534         struct sev_data_launch_measure *data;
6535         struct kvm_sev_launch_measure params;
6536         void __user *p = NULL;
6537         void *blob = NULL;
6538         int ret;
6539
6540         if (!sev_guest(kvm))
6541                 return -ENOTTY;
6542
6543         if (copy_from_user(&params, measure, sizeof(params)))
6544                 return -EFAULT;
6545
6546         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6547         if (!data)
6548                 return -ENOMEM;
6549
6550         /* User wants to query the blob length */
6551         if (!params.len)
6552                 goto cmd;
6553
6554         p = (void __user *)(uintptr_t)params.uaddr;
6555         if (p) {
6556                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6557                         ret = -EINVAL;
6558                         goto e_free;
6559                 }
6560
6561                 ret = -ENOMEM;
6562                 blob = kmalloc(params.len, GFP_KERNEL);
6563                 if (!blob)
6564                         goto e_free;
6565
6566                 data->address = __psp_pa(blob);
6567                 data->len = params.len;
6568         }
6569
6570 cmd:
6571         data->handle = sev->handle;
6572         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6573
6574         /*
6575          * If we query the session length, FW responded with expected data.
6576          */
6577         if (!params.len)
6578                 goto done;
6579
6580         if (ret)
6581                 goto e_free_blob;
6582
6583         if (blob) {
6584                 if (copy_to_user(p, blob, params.len))
6585                         ret = -EFAULT;
6586         }
6587
6588 done:
6589         params.len = data->len;
6590         if (copy_to_user(measure, &params, sizeof(params)))
6591                 ret = -EFAULT;
6592 e_free_blob:
6593         kfree(blob);
6594 e_free:
6595         kfree(data);
6596         return ret;
6597 }
6598
6599 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6600 {
6601         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6602         struct sev_data_launch_finish *data;
6603         int ret;
6604
6605         if (!sev_guest(kvm))
6606                 return -ENOTTY;
6607
6608         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6609         if (!data)
6610                 return -ENOMEM;
6611
6612         data->handle = sev->handle;
6613         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6614
6615         kfree(data);
6616         return ret;
6617 }
6618
6619 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6620 {
6621         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6622         struct kvm_sev_guest_status params;
6623         struct sev_data_guest_status *data;
6624         int ret;
6625
6626         if (!sev_guest(kvm))
6627                 return -ENOTTY;
6628
6629         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6630         if (!data)
6631                 return -ENOMEM;
6632
6633         data->handle = sev->handle;
6634         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6635         if (ret)
6636                 goto e_free;
6637
6638         params.policy = data->policy;
6639         params.state = data->state;
6640         params.handle = data->handle;
6641
6642         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6643                 ret = -EFAULT;
6644 e_free:
6645         kfree(data);
6646         return ret;
6647 }
6648
6649 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6650                                unsigned long dst, int size,
6651                                int *error, bool enc)
6652 {
6653         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6654         struct sev_data_dbg *data;
6655         int ret;
6656
6657         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6658         if (!data)
6659                 return -ENOMEM;
6660
6661         data->handle = sev->handle;
6662         data->dst_addr = dst;
6663         data->src_addr = src;
6664         data->len = size;
6665
6666         ret = sev_issue_cmd(kvm,
6667                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6668                             data, error);
6669         kfree(data);
6670         return ret;
6671 }
6672
6673 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6674                              unsigned long dst_paddr, int sz, int *err)
6675 {
6676         int offset;
6677
6678         /*
6679          * Its safe to read more than we are asked, caller should ensure that
6680          * destination has enough space.
6681          */
6682         src_paddr = round_down(src_paddr, 16);
6683         offset = src_paddr & 15;
6684         sz = round_up(sz + offset, 16);
6685
6686         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6687 }
6688
6689 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6690                                   unsigned long __user dst_uaddr,
6691                                   unsigned long dst_paddr,
6692                                   int size, int *err)
6693 {
6694         struct page *tpage = NULL;
6695         int ret, offset;
6696
6697         /* if inputs are not 16-byte then use intermediate buffer */
6698         if (!IS_ALIGNED(dst_paddr, 16) ||
6699             !IS_ALIGNED(paddr,     16) ||
6700             !IS_ALIGNED(size,      16)) {
6701                 tpage = (void *)alloc_page(GFP_KERNEL);
6702                 if (!tpage)
6703                         return -ENOMEM;
6704
6705                 dst_paddr = __sme_page_pa(tpage);
6706         }
6707
6708         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6709         if (ret)
6710                 goto e_free;
6711
6712         if (tpage) {
6713                 offset = paddr & 15;
6714                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6715                                  page_address(tpage) + offset, size))
6716                         ret = -EFAULT;
6717         }
6718
6719 e_free:
6720         if (tpage)
6721                 __free_page(tpage);
6722
6723         return ret;
6724 }
6725
6726 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6727                                   unsigned long __user vaddr,
6728                                   unsigned long dst_paddr,
6729                                   unsigned long __user dst_vaddr,
6730                                   int size, int *error)
6731 {
6732         struct page *src_tpage = NULL;
6733         struct page *dst_tpage = NULL;
6734         int ret, len = size;
6735
6736         /* If source buffer is not aligned then use an intermediate buffer */
6737         if (!IS_ALIGNED(vaddr, 16)) {
6738                 src_tpage = alloc_page(GFP_KERNEL);
6739                 if (!src_tpage)
6740                         return -ENOMEM;
6741
6742                 if (copy_from_user(page_address(src_tpage),
6743                                 (void __user *)(uintptr_t)vaddr, size)) {
6744                         __free_page(src_tpage);
6745                         return -EFAULT;
6746                 }
6747
6748                 paddr = __sme_page_pa(src_tpage);
6749         }
6750
6751         /*
6752          *  If destination buffer or length is not aligned then do read-modify-write:
6753          *   - decrypt destination in an intermediate buffer
6754          *   - copy the source buffer in an intermediate buffer
6755          *   - use the intermediate buffer as source buffer
6756          */
6757         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6758                 int dst_offset;
6759
6760                 dst_tpage = alloc_page(GFP_KERNEL);
6761                 if (!dst_tpage) {
6762                         ret = -ENOMEM;
6763                         goto e_free;
6764                 }
6765
6766                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6767                                         __sme_page_pa(dst_tpage), size, error);
6768                 if (ret)
6769                         goto e_free;
6770
6771                 /*
6772                  *  If source is kernel buffer then use memcpy() otherwise
6773                  *  copy_from_user().
6774                  */
6775                 dst_offset = dst_paddr & 15;
6776
6777                 if (src_tpage)
6778                         memcpy(page_address(dst_tpage) + dst_offset,
6779                                page_address(src_tpage), size);
6780                 else {
6781                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
6782                                            (void __user *)(uintptr_t)vaddr, size)) {
6783                                 ret = -EFAULT;
6784                                 goto e_free;
6785                         }
6786                 }
6787
6788                 paddr = __sme_page_pa(dst_tpage);
6789                 dst_paddr = round_down(dst_paddr, 16);
6790                 len = round_up(size, 16);
6791         }
6792
6793         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6794
6795 e_free:
6796         if (src_tpage)
6797                 __free_page(src_tpage);
6798         if (dst_tpage)
6799                 __free_page(dst_tpage);
6800         return ret;
6801 }
6802
6803 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6804 {
6805         unsigned long vaddr, vaddr_end, next_vaddr;
6806         unsigned long dst_vaddr;
6807         struct page **src_p, **dst_p;
6808         struct kvm_sev_dbg debug;
6809         unsigned long n;
6810         unsigned int size;
6811         int ret;
6812
6813         if (!sev_guest(kvm))
6814                 return -ENOTTY;
6815
6816         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6817                 return -EFAULT;
6818
6819         if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
6820                 return -EINVAL;
6821         if (!debug.dst_uaddr)
6822                 return -EINVAL;
6823
6824         vaddr = debug.src_uaddr;
6825         size = debug.len;
6826         vaddr_end = vaddr + size;
6827         dst_vaddr = debug.dst_uaddr;
6828
6829         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6830                 int len, s_off, d_off;
6831
6832                 /* lock userspace source and destination page */
6833                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6834                 if (!src_p)
6835                         return -EFAULT;
6836
6837                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6838                 if (!dst_p) {
6839                         sev_unpin_memory(kvm, src_p, n);
6840                         return -EFAULT;
6841                 }
6842
6843                 /*
6844                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6845                  * memory content (i.e it will write the same memory region with C=1).
6846                  * It's possible that the cache may contain the data with C=0, i.e.,
6847                  * unencrypted so invalidate it first.
6848                  */
6849                 sev_clflush_pages(src_p, 1);
6850                 sev_clflush_pages(dst_p, 1);
6851
6852                 /*
6853                  * Since user buffer may not be page aligned, calculate the
6854                  * offset within the page.
6855                  */
6856                 s_off = vaddr & ~PAGE_MASK;
6857                 d_off = dst_vaddr & ~PAGE_MASK;
6858                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6859
6860                 if (dec)
6861                         ret = __sev_dbg_decrypt_user(kvm,
6862                                                      __sme_page_pa(src_p[0]) + s_off,
6863                                                      dst_vaddr,
6864                                                      __sme_page_pa(dst_p[0]) + d_off,
6865                                                      len, &argp->error);
6866                 else
6867                         ret = __sev_dbg_encrypt_user(kvm,
6868                                                      __sme_page_pa(src_p[0]) + s_off,
6869                                                      vaddr,
6870                                                      __sme_page_pa(dst_p[0]) + d_off,
6871                                                      dst_vaddr,
6872                                                      len, &argp->error);
6873
6874                 sev_unpin_memory(kvm, src_p, n);
6875                 sev_unpin_memory(kvm, dst_p, n);
6876
6877                 if (ret)
6878                         goto err;
6879
6880                 next_vaddr = vaddr + len;
6881                 dst_vaddr = dst_vaddr + len;
6882                 size -= len;
6883         }
6884 err:
6885         return ret;
6886 }
6887
6888 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6889 {
6890         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6891         struct sev_data_launch_secret *data;
6892         struct kvm_sev_launch_secret params;
6893         struct page **pages;
6894         void *blob, *hdr;
6895         unsigned long n;
6896         int ret, offset;
6897
6898         if (!sev_guest(kvm))
6899                 return -ENOTTY;
6900
6901         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6902                 return -EFAULT;
6903
6904         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6905         if (!pages)
6906                 return -ENOMEM;
6907
6908         /*
6909          * The secret must be copied into contiguous memory region, lets verify
6910          * that userspace memory pages are contiguous before we issue command.
6911          */
6912         if (get_num_contig_pages(0, pages, n) != n) {
6913                 ret = -EINVAL;
6914                 goto e_unpin_memory;
6915         }
6916
6917         ret = -ENOMEM;
6918         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6919         if (!data)
6920                 goto e_unpin_memory;
6921
6922         offset = params.guest_uaddr & (PAGE_SIZE - 1);
6923         data->guest_address = __sme_page_pa(pages[0]) + offset;
6924         data->guest_len = params.guest_len;
6925
6926         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6927         if (IS_ERR(blob)) {
6928                 ret = PTR_ERR(blob);
6929                 goto e_free;
6930         }
6931
6932         data->trans_address = __psp_pa(blob);
6933         data->trans_len = params.trans_len;
6934
6935         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6936         if (IS_ERR(hdr)) {
6937                 ret = PTR_ERR(hdr);
6938                 goto e_free_blob;
6939         }
6940         data->hdr_address = __psp_pa(hdr);
6941         data->hdr_len = params.hdr_len;
6942
6943         data->handle = sev->handle;
6944         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6945
6946         kfree(hdr);
6947
6948 e_free_blob:
6949         kfree(blob);
6950 e_free:
6951         kfree(data);
6952 e_unpin_memory:
6953         sev_unpin_memory(kvm, pages, n);
6954         return ret;
6955 }
6956
6957 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6958 {
6959         struct kvm_sev_cmd sev_cmd;
6960         int r;
6961
6962         if (!svm_sev_enabled())
6963                 return -ENOTTY;
6964
6965         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6966                 return -EFAULT;
6967
6968         mutex_lock(&kvm->lock);
6969
6970         switch (sev_cmd.id) {
6971         case KVM_SEV_INIT:
6972                 r = sev_guest_init(kvm, &sev_cmd);
6973                 break;
6974         case KVM_SEV_LAUNCH_START:
6975                 r = sev_launch_start(kvm, &sev_cmd);
6976                 break;
6977         case KVM_SEV_LAUNCH_UPDATE_DATA:
6978                 r = sev_launch_update_data(kvm, &sev_cmd);
6979                 break;
6980         case KVM_SEV_LAUNCH_MEASURE:
6981                 r = sev_launch_measure(kvm, &sev_cmd);
6982                 break;
6983         case KVM_SEV_LAUNCH_FINISH:
6984                 r = sev_launch_finish(kvm, &sev_cmd);
6985                 break;
6986         case KVM_SEV_GUEST_STATUS:
6987                 r = sev_guest_status(kvm, &sev_cmd);
6988                 break;
6989         case KVM_SEV_DBG_DECRYPT:
6990                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6991                 break;
6992         case KVM_SEV_DBG_ENCRYPT:
6993                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6994                 break;
6995         case KVM_SEV_LAUNCH_SECRET:
6996                 r = sev_launch_secret(kvm, &sev_cmd);
6997                 break;
6998         default:
6999                 r = -EINVAL;
7000                 goto out;
7001         }
7002
7003         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
7004                 r = -EFAULT;
7005
7006 out:
7007         mutex_unlock(&kvm->lock);
7008         return r;
7009 }
7010
7011 static int svm_register_enc_region(struct kvm *kvm,
7012                                    struct kvm_enc_region *range)
7013 {
7014         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7015         struct enc_region *region;
7016         int ret = 0;
7017
7018         if (!sev_guest(kvm))
7019                 return -ENOTTY;
7020
7021         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
7022                 return -EINVAL;
7023
7024         region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
7025         if (!region)
7026                 return -ENOMEM;
7027
7028         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
7029         if (!region->pages) {
7030                 ret = -ENOMEM;
7031                 goto e_free;
7032         }
7033
7034         /*
7035          * The guest may change the memory encryption attribute from C=0 -> C=1
7036          * or vice versa for this memory range. Lets make sure caches are
7037          * flushed to ensure that guest data gets written into memory with
7038          * correct C-bit.
7039          */
7040         sev_clflush_pages(region->pages, region->npages);
7041
7042         region->uaddr = range->addr;
7043         region->size = range->size;
7044
7045         mutex_lock(&kvm->lock);
7046         list_add_tail(&region->list, &sev->regions_list);
7047         mutex_unlock(&kvm->lock);
7048
7049         return ret;
7050
7051 e_free:
7052         kfree(region);
7053         return ret;
7054 }
7055
7056 static struct enc_region *
7057 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
7058 {
7059         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7060         struct list_head *head = &sev->regions_list;
7061         struct enc_region *i;
7062
7063         list_for_each_entry(i, head, list) {
7064                 if (i->uaddr == range->addr &&
7065                     i->size == range->size)
7066                         return i;
7067         }
7068
7069         return NULL;
7070 }
7071
7072
7073 static int svm_unregister_enc_region(struct kvm *kvm,
7074                                      struct kvm_enc_region *range)
7075 {
7076         struct enc_region *region;
7077         int ret;
7078
7079         mutex_lock(&kvm->lock);
7080
7081         if (!sev_guest(kvm)) {
7082                 ret = -ENOTTY;
7083                 goto failed;
7084         }
7085
7086         region = find_enc_region(kvm, range);
7087         if (!region) {
7088                 ret = -EINVAL;
7089                 goto failed;
7090         }
7091
7092         __unregister_enc_region_locked(kvm, region);
7093
7094         mutex_unlock(&kvm->lock);
7095         return 0;
7096
7097 failed:
7098         mutex_unlock(&kvm->lock);
7099         return ret;
7100 }
7101
7102 static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
7103                                    uint16_t *vmcs_version)
7104 {
7105         /* Intel-only feature */
7106         return -ENODEV;
7107 }
7108
7109 static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7110 {
7111         unsigned long cr4 = kvm_read_cr4(vcpu);
7112         bool smep = cr4 & X86_CR4_SMEP;
7113         bool smap = cr4 & X86_CR4_SMAP;
7114         bool is_user = svm_get_cpl(vcpu) == 3;
7115
7116         /*
7117          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
7118          *
7119          * Errata:
7120          * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
7121          * possible that CPU microcode implementing DecodeAssist will fail
7122          * to read bytes of instruction which caused #NPF. In this case,
7123          * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
7124          * return 0 instead of the correct guest instruction bytes.
7125          *
7126          * This happens because CPU microcode reading instruction bytes
7127          * uses a special opcode which attempts to read data using CPL=0
7128          * priviledges. The microcode reads CS:RIP and if it hits a SMAP
7129          * fault, it gives up and returns no instruction bytes.
7130          *
7131          * Detection:
7132          * We reach here in case CPU supports DecodeAssist, raised #NPF and
7133          * returned 0 in GuestIntrBytes field of the VMCB.
7134          * First, errata can only be triggered in case vCPU CR4.SMAP=1.
7135          * Second, if vCPU CR4.SMEP=1, errata could only be triggered
7136          * in case vCPU CPL==3 (Because otherwise guest would have triggered
7137          * a SMEP fault instead of #NPF).
7138          * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
7139          * As most guests enable SMAP if they have also enabled SMEP, use above
7140          * logic in order to attempt minimize false-positive of detecting errata
7141          * while still preserving all cases semantic correctness.
7142          *
7143          * Workaround:
7144          * To determine what instruction the guest was executing, the hypervisor
7145          * will have to decode the instruction at the instruction pointer.
7146          *
7147          * In non SEV guest, hypervisor will be able to read the guest
7148          * memory to decode the instruction pointer when insn_len is zero
7149          * so we return true to indicate that decoding is possible.
7150          *
7151          * But in the SEV guest, the guest memory is encrypted with the
7152          * guest specific key and hypervisor will not be able to decode the
7153          * instruction pointer so we will not able to workaround it. Lets
7154          * print the error and request to kill the guest.
7155          */
7156         if (smap && (!smep || is_user)) {
7157                 if (!sev_guest(vcpu->kvm))
7158                         return true;
7159
7160                 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
7161                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7162         }
7163
7164         return false;
7165 }
7166
7167 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
7168 {
7169         struct vcpu_svm *svm = to_svm(vcpu);
7170
7171         /*
7172          * TODO: Last condition latch INIT signals on vCPU when
7173          * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
7174          * To properly emulate the INIT intercept, SVM should implement
7175          * kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
7176          * there if an INIT signal is pending.
7177          */
7178         return !gif_set(svm) ||
7179                    (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
7180 }
7181
7182 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7183         .cpu_has_kvm_support = has_svm,
7184         .disabled_by_bios = is_disabled,
7185         .hardware_setup = svm_hardware_setup,
7186         .hardware_unsetup = svm_hardware_unsetup,
7187         .check_processor_compatibility = svm_check_processor_compat,
7188         .hardware_enable = svm_hardware_enable,
7189         .hardware_disable = svm_hardware_disable,
7190         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
7191         .has_emulated_msr = svm_has_emulated_msr,
7192
7193         .vcpu_create = svm_create_vcpu,
7194         .vcpu_free = svm_free_vcpu,
7195         .vcpu_reset = svm_vcpu_reset,
7196
7197         .vm_alloc = svm_vm_alloc,
7198         .vm_free = svm_vm_free,
7199         .vm_init = avic_vm_init,
7200         .vm_destroy = svm_vm_destroy,
7201
7202         .prepare_guest_switch = svm_prepare_guest_switch,
7203         .vcpu_load = svm_vcpu_load,
7204         .vcpu_put = svm_vcpu_put,
7205         .vcpu_blocking = svm_vcpu_blocking,
7206         .vcpu_unblocking = svm_vcpu_unblocking,
7207
7208         .update_bp_intercept = update_bp_intercept,
7209         .get_msr_feature = svm_get_msr_feature,
7210         .get_msr = svm_get_msr,
7211         .set_msr = svm_set_msr,
7212         .get_segment_base = svm_get_segment_base,
7213         .get_segment = svm_get_segment,
7214         .set_segment = svm_set_segment,
7215         .get_cpl = svm_get_cpl,
7216         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
7217         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
7218         .decache_cr3 = svm_decache_cr3,
7219         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
7220         .set_cr0 = svm_set_cr0,
7221         .set_cr3 = svm_set_cr3,
7222         .set_cr4 = svm_set_cr4,
7223         .set_efer = svm_set_efer,
7224         .get_idt = svm_get_idt,
7225         .set_idt = svm_set_idt,
7226         .get_gdt = svm_get_gdt,
7227         .set_gdt = svm_set_gdt,
7228         .get_dr6 = svm_get_dr6,
7229         .set_dr6 = svm_set_dr6,
7230         .set_dr7 = svm_set_dr7,
7231         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
7232         .cache_reg = svm_cache_reg,
7233         .get_rflags = svm_get_rflags,
7234         .set_rflags = svm_set_rflags,
7235
7236         .tlb_flush = svm_flush_tlb,
7237         .tlb_flush_gva = svm_flush_tlb_gva,
7238
7239         .run = svm_vcpu_run,
7240         .handle_exit = handle_exit,
7241         .skip_emulated_instruction = skip_emulated_instruction,
7242         .set_interrupt_shadow = svm_set_interrupt_shadow,
7243         .get_interrupt_shadow = svm_get_interrupt_shadow,
7244         .patch_hypercall = svm_patch_hypercall,
7245         .set_irq = svm_set_irq,
7246         .set_nmi = svm_inject_nmi,
7247         .queue_exception = svm_queue_exception,
7248         .cancel_injection = svm_cancel_injection,
7249         .interrupt_allowed = svm_interrupt_allowed,
7250         .nmi_allowed = svm_nmi_allowed,
7251         .get_nmi_mask = svm_get_nmi_mask,
7252         .set_nmi_mask = svm_set_nmi_mask,
7253         .enable_nmi_window = enable_nmi_window,
7254         .enable_irq_window = enable_irq_window,
7255         .update_cr8_intercept = update_cr8_intercept,
7256         .set_virtual_apic_mode = svm_set_virtual_apic_mode,
7257         .get_enable_apicv = svm_get_enable_apicv,
7258         .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
7259         .load_eoi_exitmap = svm_load_eoi_exitmap,
7260         .hwapic_irr_update = svm_hwapic_irr_update,
7261         .hwapic_isr_update = svm_hwapic_isr_update,
7262         .sync_pir_to_irr = kvm_lapic_find_highest_irr,
7263         .apicv_post_state_restore = avic_post_state_restore,
7264
7265         .set_tss_addr = svm_set_tss_addr,
7266         .set_identity_map_addr = svm_set_identity_map_addr,
7267         .get_tdp_level = get_npt_level,
7268         .get_mt_mask = svm_get_mt_mask,
7269
7270         .get_exit_info = svm_get_exit_info,
7271
7272         .get_lpage_level = svm_get_lpage_level,
7273
7274         .cpuid_update = svm_cpuid_update,
7275
7276         .rdtscp_supported = svm_rdtscp_supported,
7277         .invpcid_supported = svm_invpcid_supported,
7278         .mpx_supported = svm_mpx_supported,
7279         .xsaves_supported = svm_xsaves_supported,
7280         .umip_emulated = svm_umip_emulated,
7281         .pt_supported = svm_pt_supported,
7282
7283         .set_supported_cpuid = svm_set_supported_cpuid,
7284
7285         .has_wbinvd_exit = svm_has_wbinvd_exit,
7286
7287         .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7288         .write_l1_tsc_offset = svm_write_l1_tsc_offset,
7289
7290         .set_tdp_cr3 = set_tdp_cr3,
7291
7292         .check_intercept = svm_check_intercept,
7293         .handle_exit_irqoff = svm_handle_exit_irqoff,
7294
7295         .request_immediate_exit = __kvm_request_immediate_exit,
7296
7297         .sched_in = svm_sched_in,
7298
7299         .pmu_ops = &amd_pmu_ops,
7300         .deliver_posted_interrupt = svm_deliver_avic_intr,
7301         .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
7302         .update_pi_irte = svm_update_pi_irte,
7303         .setup_mce = svm_setup_mce,
7304
7305         .smi_allowed = svm_smi_allowed,
7306         .pre_enter_smm = svm_pre_enter_smm,
7307         .pre_leave_smm = svm_pre_leave_smm,
7308         .enable_smi_window = enable_smi_window,
7309
7310         .mem_enc_op = svm_mem_enc_op,
7311         .mem_enc_reg_region = svm_register_enc_region,
7312         .mem_enc_unreg_region = svm_unregister_enc_region,
7313
7314         .nested_enable_evmcs = nested_enable_evmcs,
7315         .nested_get_evmcs_version = NULL,
7316
7317         .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7318
7319         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
7320 };
7321
7322 static int __init svm_init(void)
7323 {
7324         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
7325                         __alignof__(struct vcpu_svm), THIS_MODULE);
7326 }
7327
7328 static void __exit svm_exit(void)
7329 {
7330         kvm_exit();
7331 }
7332
7333 module_init(svm_init)
7334 module_exit(svm_exit)