KVM: SVM: Add support for SEV LAUNCH_SECRET command
[linux-block.git] / arch / x86 / kvm / svm.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
9611c187 7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6aa8b732
AK
8 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
44a95dae
SS
17
18#define pr_fmt(fmt) "SVM: " fmt
19
edf88417
AK
20#include <linux/kvm_host.h>
21
85f455f7 22#include "irq.h"
1d737c8a 23#include "mmu.h"
5fdbf976 24#include "kvm_cache_regs.h"
fe4c7b19 25#include "x86.h"
66f7b72e 26#include "cpuid.h"
25462f7f 27#include "pmu.h"
e495606d 28
6aa8b732 29#include <linux/module.h>
ae759544 30#include <linux/mod_devicetable.h>
9d8f549d 31#include <linux/kernel.h>
6aa8b732
AK
32#include <linux/vmalloc.h>
33#include <linux/highmem.h>
e8edc6e0 34#include <linux/sched.h>
af658dca 35#include <linux/trace_events.h>
5a0e3ad6 36#include <linux/slab.h>
5881f737
SS
37#include <linux/amd-iommu.h>
38#include <linux/hashtable.h>
c207aee4 39#include <linux/frame.h>
e9df0942 40#include <linux/psp-sev.h>
1654efcb 41#include <linux/file.h>
89c50580
BS
42#include <linux/pagemap.h>
43#include <linux/swap.h>
6aa8b732 44
8221c137 45#include <asm/apic.h>
1018faa6 46#include <asm/perf_event.h>
67ec6607 47#include <asm/tlbflush.h>
e495606d 48#include <asm/desc.h>
facb0139 49#include <asm/debugreg.h>
631bc487 50#include <asm/kvm_para.h>
411b44ba 51#include <asm/irq_remapping.h>
6aa8b732 52
63d1142f 53#include <asm/virtext.h>
229456fc 54#include "trace.h"
63d1142f 55
4ecac3fd
AK
56#define __ex(x) __kvm_handle_fault_on_reboot(x)
57
6aa8b732
AK
58MODULE_AUTHOR("Qumranet");
59MODULE_LICENSE("GPL");
60
ae759544
JT
61static const struct x86_cpu_id svm_cpu_id[] = {
62 X86_FEATURE_MATCH(X86_FEATURE_SVM),
63 {}
64};
65MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
66
6aa8b732
AK
67#define IOPM_ALLOC_ORDER 2
68#define MSRPM_ALLOC_ORDER 1
69
6aa8b732
AK
70#define SEG_TYPE_LDT 2
71#define SEG_TYPE_BUSY_TSS16 3
72
6bc31bdc
AP
73#define SVM_FEATURE_NPT (1 << 0)
74#define SVM_FEATURE_LBRV (1 << 1)
75#define SVM_FEATURE_SVML (1 << 2)
76#define SVM_FEATURE_NRIP (1 << 3)
ddce97aa
AP
77#define SVM_FEATURE_TSC_RATE (1 << 4)
78#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
79#define SVM_FEATURE_FLUSH_ASID (1 << 6)
80#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
6bc31bdc 81#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
80b7706e 82
340d3bc3
SS
83#define SVM_AVIC_DOORBELL 0xc001011b
84
410e4d57
JR
85#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
86#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
87#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
88
24e09cbf
JR
89#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
90
fbc0db76 91#define TSC_RATIO_RSVD 0xffffff0000000000ULL
92a1f12d
JR
92#define TSC_RATIO_MIN 0x0000000000000001ULL
93#define TSC_RATIO_MAX 0x000000ffffffffffULL
fbc0db76 94
5446a979 95#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
44a95dae
SS
96
97/*
98 * 0xff is broadcast, so the max index allowed for physical APIC ID
99 * table is 0xfe. APIC IDs above 0xff are reserved.
100 */
101#define AVIC_MAX_PHYSICAL_ID_COUNT 255
102
18f40c53
SS
103#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
104#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
105#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
106
5ea11f2b
SS
107/* AVIC GATAG is encoded using VM and VCPU IDs */
108#define AVIC_VCPU_ID_BITS 8
109#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
110
111#define AVIC_VM_ID_BITS 24
112#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
113#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
114
115#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
116 (y & AVIC_VCPU_ID_MASK))
117#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
118#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
119
67ec6607
JR
120static bool erratum_383_found __read_mostly;
121
6c8166a7
AK
122static const u32 host_save_user_msrs[] = {
123#ifdef CONFIG_X86_64
124 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
125 MSR_FS_BASE,
126#endif
127 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
46896c73 128 MSR_TSC_AUX,
6c8166a7
AK
129};
130
131#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
132
133struct kvm_vcpu;
134
e6aa9abd
JR
135struct nested_state {
136 struct vmcb *hsave;
137 u64 hsave_msr;
4a810181 138 u64 vm_cr_msr;
e6aa9abd
JR
139 u64 vmcb;
140
141 /* These are the merged vectors */
142 u32 *msrpm;
143
144 /* gpa pointers to the real vectors */
145 u64 vmcb_msrpm;
ce2ac085 146 u64 vmcb_iopm;
aad42c64 147
cd3ff653
JR
148 /* A VMEXIT is required but not yet emulated */
149 bool exit_required;
150
aad42c64 151 /* cache for intercepts of the guest */
4ee546b4 152 u32 intercept_cr;
3aed041a 153 u32 intercept_dr;
aad42c64
JR
154 u32 intercept_exceptions;
155 u64 intercept;
156
5bd2edc3
JR
157 /* Nested Paging related state */
158 u64 nested_cr3;
e6aa9abd
JR
159};
160
323c3d80
JR
161#define MSRPM_OFFSETS 16
162static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
163
2b036c6b
BO
164/*
165 * Set osvw_len to higher value when updated Revision Guides
166 * are published and we know what the new status bits are
167 */
168static uint64_t osvw_len = 4, osvw_status;
169
6c8166a7
AK
170struct vcpu_svm {
171 struct kvm_vcpu vcpu;
172 struct vmcb *vmcb;
173 unsigned long vmcb_pa;
174 struct svm_cpu_data *svm_data;
175 uint64_t asid_generation;
176 uint64_t sysenter_esp;
177 uint64_t sysenter_eip;
46896c73 178 uint64_t tsc_aux;
6c8166a7
AK
179
180 u64 next_rip;
181
182 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
afe9e66f 183 struct {
dacccfdd
AK
184 u16 fs;
185 u16 gs;
186 u16 ldt;
afe9e66f
AK
187 u64 gs_base;
188 } host;
6c8166a7
AK
189
190 u32 *msrpm;
6c8166a7 191
bd3d1ec3
AK
192 ulong nmi_iret_rip;
193
e6aa9abd 194 struct nested_state nested;
6be7d306
JK
195
196 bool nmi_singlestep;
ab2f4d73 197 u64 nmi_singlestep_guest_rflags;
66b7138f
JK
198
199 unsigned int3_injected;
200 unsigned long int3_rip;
fbc0db76 201
6092d3d3
JR
202 /* cached guest cpuid flags for faster access */
203 bool nrips_enabled : 1;
44a95dae 204
18f40c53 205 u32 ldr_reg;
44a95dae
SS
206 struct page *avic_backing_page;
207 u64 *avic_physical_id_cache;
8221c137 208 bool avic_is_running;
411b44ba
SS
209
210 /*
211 * Per-vcpu list of struct amd_svm_iommu_ir:
212 * This is used mainly to store interrupt remapping information used
213 * when update the vcpu affinity. This avoids the need to scan for
214 * IRTE and try to match ga_tag in the IOMMU driver.
215 */
216 struct list_head ir_list;
217 spinlock_t ir_list_lock;
70cd94e6
BS
218
219 /* which host CPU was used for running this vcpu */
220 unsigned int last_cpu;
411b44ba
SS
221};
222
223/*
224 * This is a wrapper of struct amd_iommu_ir_data.
225 */
226struct amd_svm_iommu_ir {
227 struct list_head node; /* Used by SVM for per-vcpu ir_list */
228 void *data; /* Storing pointer to struct amd_ir_data */
6c8166a7
AK
229};
230
44a95dae
SS
231#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
232#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
233
234#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
235#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
236#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
237#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
238
fbc0db76
JR
239static DEFINE_PER_CPU(u64, current_tsc_ratio);
240#define TSC_RATIO_DEFAULT 0x0100000000ULL
241
455716fa
JR
242#define MSR_INVALID 0xffffffffU
243
09941fbb 244static const struct svm_direct_access_msrs {
ac72a9b7
JR
245 u32 index; /* Index of the MSR */
246 bool always; /* True if intercept is always on */
247} direct_access_msrs[] = {
8c06585d 248 { .index = MSR_STAR, .always = true },
ac72a9b7
JR
249 { .index = MSR_IA32_SYSENTER_CS, .always = true },
250#ifdef CONFIG_X86_64
251 { .index = MSR_GS_BASE, .always = true },
252 { .index = MSR_FS_BASE, .always = true },
253 { .index = MSR_KERNEL_GS_BASE, .always = true },
254 { .index = MSR_LSTAR, .always = true },
255 { .index = MSR_CSTAR, .always = true },
256 { .index = MSR_SYSCALL_MASK, .always = true },
257#endif
258 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
259 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
260 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
261 { .index = MSR_IA32_LASTINTTOIP, .always = false },
262 { .index = MSR_INVALID, .always = false },
6c8166a7
AK
263};
264
709ddebf
JR
265/* enable NPT for AMD64 and X86 with PAE */
266#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
267static bool npt_enabled = true;
268#else
e0231715 269static bool npt_enabled;
709ddebf 270#endif
6c7dac72 271
e2358851
DB
272/* allow nested paging (virtualized MMU) for all guests */
273static int npt = true;
6c7dac72 274module_param(npt, int, S_IRUGO);
e3da3acd 275
e2358851
DB
276/* allow nested virtualization in KVM/SVM */
277static int nested = true;
236de055
AG
278module_param(nested, int, S_IRUGO);
279
44a95dae
SS
280/* enable / disable AVIC */
281static int avic;
5b8abf1f 282#ifdef CONFIG_X86_LOCAL_APIC
44a95dae 283module_param(avic, int, S_IRUGO);
5b8abf1f 284#endif
44a95dae 285
89c8a498
JN
286/* enable/disable Virtual VMLOAD VMSAVE */
287static int vls = true;
288module_param(vls, int, 0444);
289
640bd6e5
JN
290/* enable/disable Virtual GIF */
291static int vgif = true;
292module_param(vgif, int, 0444);
5ea11f2b 293
e9df0942
BS
294/* enable/disable SEV support */
295static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
296module_param(sev, int, 0444);
297
79a8059d 298static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
44874f84 299static void svm_flush_tlb(struct kvm_vcpu *vcpu);
a5c3832d 300static void svm_complete_interrupts(struct vcpu_svm *svm);
04d2cc77 301
410e4d57 302static int nested_svm_exit_handled(struct vcpu_svm *svm);
b8e88bc8 303static int nested_svm_intercept(struct vcpu_svm *svm);
cf74a78b 304static int nested_svm_vmexit(struct vcpu_svm *svm);
cf74a78b
AG
305static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
306 bool has_error_code, u32 error_code);
307
8d28fec4 308enum {
116a0a23
JR
309 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
310 pause filter count */
f56838e4 311 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
d48086d1 312 VMCB_ASID, /* ASID */
decdbf6a 313 VMCB_INTR, /* int_ctl, int_vector */
b2747166 314 VMCB_NPT, /* npt_en, nCR3, gPAT */
dcca1a65 315 VMCB_CR, /* CR0, CR3, CR4, EFER */
72214b96 316 VMCB_DR, /* DR6, DR7 */
17a703cb 317 VMCB_DT, /* GDT, IDT */
060d0c9a 318 VMCB_SEG, /* CS, DS, SS, ES, CPL */
0574dec0 319 VMCB_CR2, /* CR2 only */
b53ba3f9 320 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
44a95dae
SS
321 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
322 * AVIC PHYSICAL_TABLE pointer,
323 * AVIC LOGICAL_TABLE pointer
324 */
8d28fec4
RJ
325 VMCB_DIRTY_MAX,
326};
327
0574dec0
JR
328/* TPR and CR2 are always written before VMRUN */
329#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
8d28fec4 330
44a95dae
SS
331#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
332
ed3cd233 333static unsigned int max_sev_asid;
1654efcb
BS
334static unsigned int min_sev_asid;
335static unsigned long *sev_asid_bitmap;
89c50580 336#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
1654efcb
BS
337
338static inline bool svm_sev_enabled(void)
339{
340 return max_sev_asid;
341}
342
343static inline bool sev_guest(struct kvm *kvm)
344{
345 struct kvm_sev_info *sev = &kvm->arch.sev_info;
346
347 return sev->active;
348}
ed3cd233 349
70cd94e6
BS
350static inline int sev_get_asid(struct kvm *kvm)
351{
352 struct kvm_sev_info *sev = &kvm->arch.sev_info;
353
354 return sev->asid;
355}
356
8d28fec4
RJ
357static inline void mark_all_dirty(struct vmcb *vmcb)
358{
359 vmcb->control.clean = 0;
360}
361
362static inline void mark_all_clean(struct vmcb *vmcb)
363{
364 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
365 & ~VMCB_ALWAYS_DIRTY_MASK;
366}
367
368static inline void mark_dirty(struct vmcb *vmcb, int bit)
369{
370 vmcb->control.clean &= ~(1 << bit);
371}
372
a2fa3e9f
GH
373static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
374{
fb3f0f51 375 return container_of(vcpu, struct vcpu_svm, vcpu);
a2fa3e9f
GH
376}
377
44a95dae
SS
378static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
379{
380 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
381 mark_dirty(svm->vmcb, VMCB_AVIC);
382}
383
340d3bc3
SS
384static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
385{
386 struct vcpu_svm *svm = to_svm(vcpu);
387 u64 *entry = svm->avic_physical_id_cache;
388
389 if (!entry)
390 return false;
391
392 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
393}
394
384c6368
JR
395static void recalc_intercepts(struct vcpu_svm *svm)
396{
397 struct vmcb_control_area *c, *h;
398 struct nested_state *g;
399
116a0a23
JR
400 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
401
384c6368
JR
402 if (!is_guest_mode(&svm->vcpu))
403 return;
404
405 c = &svm->vmcb->control;
406 h = &svm->nested.hsave->control;
407 g = &svm->nested;
408
4ee546b4 409 c->intercept_cr = h->intercept_cr | g->intercept_cr;
3aed041a 410 c->intercept_dr = h->intercept_dr | g->intercept_dr;
384c6368
JR
411 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
412 c->intercept = h->intercept | g->intercept;
413}
414
4ee546b4
RJ
415static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
416{
417 if (is_guest_mode(&svm->vcpu))
418 return svm->nested.hsave;
419 else
420 return svm->vmcb;
421}
422
423static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
424{
425 struct vmcb *vmcb = get_host_vmcb(svm);
426
427 vmcb->control.intercept_cr |= (1U << bit);
428
429 recalc_intercepts(svm);
430}
431
432static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
433{
434 struct vmcb *vmcb = get_host_vmcb(svm);
435
436 vmcb->control.intercept_cr &= ~(1U << bit);
437
438 recalc_intercepts(svm);
439}
440
441static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
442{
443 struct vmcb *vmcb = get_host_vmcb(svm);
444
445 return vmcb->control.intercept_cr & (1U << bit);
446}
447
5315c716 448static inline void set_dr_intercepts(struct vcpu_svm *svm)
3aed041a
JR
449{
450 struct vmcb *vmcb = get_host_vmcb(svm);
451
5315c716
PB
452 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
453 | (1 << INTERCEPT_DR1_READ)
454 | (1 << INTERCEPT_DR2_READ)
455 | (1 << INTERCEPT_DR3_READ)
456 | (1 << INTERCEPT_DR4_READ)
457 | (1 << INTERCEPT_DR5_READ)
458 | (1 << INTERCEPT_DR6_READ)
459 | (1 << INTERCEPT_DR7_READ)
460 | (1 << INTERCEPT_DR0_WRITE)
461 | (1 << INTERCEPT_DR1_WRITE)
462 | (1 << INTERCEPT_DR2_WRITE)
463 | (1 << INTERCEPT_DR3_WRITE)
464 | (1 << INTERCEPT_DR4_WRITE)
465 | (1 << INTERCEPT_DR5_WRITE)
466 | (1 << INTERCEPT_DR6_WRITE)
467 | (1 << INTERCEPT_DR7_WRITE);
3aed041a
JR
468
469 recalc_intercepts(svm);
470}
471
5315c716 472static inline void clr_dr_intercepts(struct vcpu_svm *svm)
3aed041a
JR
473{
474 struct vmcb *vmcb = get_host_vmcb(svm);
475
5315c716 476 vmcb->control.intercept_dr = 0;
3aed041a
JR
477
478 recalc_intercepts(svm);
479}
480
18c918c5
JR
481static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
482{
483 struct vmcb *vmcb = get_host_vmcb(svm);
484
485 vmcb->control.intercept_exceptions |= (1U << bit);
486
487 recalc_intercepts(svm);
488}
489
490static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
491{
492 struct vmcb *vmcb = get_host_vmcb(svm);
493
494 vmcb->control.intercept_exceptions &= ~(1U << bit);
495
496 recalc_intercepts(svm);
497}
498
8a05a1b8
JR
499static inline void set_intercept(struct vcpu_svm *svm, int bit)
500{
501 struct vmcb *vmcb = get_host_vmcb(svm);
502
503 vmcb->control.intercept |= (1ULL << bit);
504
505 recalc_intercepts(svm);
506}
507
508static inline void clr_intercept(struct vcpu_svm *svm, int bit)
509{
510 struct vmcb *vmcb = get_host_vmcb(svm);
511
512 vmcb->control.intercept &= ~(1ULL << bit);
513
514 recalc_intercepts(svm);
515}
516
640bd6e5
JN
517static inline bool vgif_enabled(struct vcpu_svm *svm)
518{
519 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
520}
521
2af9194d
JR
522static inline void enable_gif(struct vcpu_svm *svm)
523{
640bd6e5
JN
524 if (vgif_enabled(svm))
525 svm->vmcb->control.int_ctl |= V_GIF_MASK;
526 else
527 svm->vcpu.arch.hflags |= HF_GIF_MASK;
2af9194d
JR
528}
529
530static inline void disable_gif(struct vcpu_svm *svm)
531{
640bd6e5
JN
532 if (vgif_enabled(svm))
533 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
534 else
535 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
2af9194d
JR
536}
537
538static inline bool gif_set(struct vcpu_svm *svm)
539{
640bd6e5
JN
540 if (vgif_enabled(svm))
541 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
542 else
543 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
2af9194d
JR
544}
545
4866d5e3 546static unsigned long iopm_base;
6aa8b732
AK
547
548struct kvm_ldttss_desc {
549 u16 limit0;
550 u16 base0;
e0231715
JR
551 unsigned base1:8, type:5, dpl:2, p:1;
552 unsigned limit1:4, zero0:3, g:1, base2:8;
6aa8b732
AK
553 u32 base3;
554 u32 zero1;
555} __attribute__((packed));
556
557struct svm_cpu_data {
558 int cpu;
559
5008fdf5
AK
560 u64 asid_generation;
561 u32 max_asid;
562 u32 next_asid;
4faefff3 563 u32 min_asid;
6aa8b732
AK
564 struct kvm_ldttss_desc *tss_desc;
565
566 struct page *save_area;
70cd94e6
BS
567
568 /* index = sev_asid, value = vmcb pointer */
569 struct vmcb **sev_vmcbs;
6aa8b732
AK
570};
571
572static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
573
574struct svm_init_data {
575 int cpu;
576 int r;
577};
578
09941fbb 579static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
6aa8b732 580
9d8f549d 581#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
6aa8b732
AK
582#define MSRS_RANGE_SIZE 2048
583#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
584
455716fa
JR
585static u32 svm_msrpm_offset(u32 msr)
586{
587 u32 offset;
588 int i;
589
590 for (i = 0; i < NUM_MSR_MAPS; i++) {
591 if (msr < msrpm_ranges[i] ||
592 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
593 continue;
594
595 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
596 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
597
598 /* Now we have the u8 offset - but need the u32 offset */
599 return offset / 4;
600 }
601
602 /* MSR not in any range */
603 return MSR_INVALID;
604}
605
6aa8b732
AK
606#define MAX_INST_SIZE 15
607
6aa8b732
AK
608static inline void clgi(void)
609{
4ecac3fd 610 asm volatile (__ex(SVM_CLGI));
6aa8b732
AK
611}
612
613static inline void stgi(void)
614{
4ecac3fd 615 asm volatile (__ex(SVM_STGI));
6aa8b732
AK
616}
617
618static inline void invlpga(unsigned long addr, u32 asid)
619{
e0231715 620 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
6aa8b732
AK
621}
622
855feb67 623static int get_npt_level(struct kvm_vcpu *vcpu)
4b16184c
JR
624{
625#ifdef CONFIG_X86_64
2a7266a8 626 return PT64_ROOT_4LEVEL;
4b16184c
JR
627#else
628 return PT32E_ROOT_LEVEL;
629#endif
630}
631
6aa8b732
AK
632static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
633{
6dc696d4 634 vcpu->arch.efer = efer;
709ddebf 635 if (!npt_enabled && !(efer & EFER_LMA))
2b5203ee 636 efer &= ~EFER_LME;
6aa8b732 637
9962d032 638 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
dcca1a65 639 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
6aa8b732
AK
640}
641
6aa8b732
AK
642static int is_external_interrupt(u32 info)
643{
644 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
645 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
646}
647
37ccdcbe 648static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
2809f5d2
GC
649{
650 struct vcpu_svm *svm = to_svm(vcpu);
651 u32 ret = 0;
652
653 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
37ccdcbe
PB
654 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
655 return ret;
2809f5d2
GC
656}
657
658static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
659{
660 struct vcpu_svm *svm = to_svm(vcpu);
661
662 if (mask == 0)
663 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
664 else
665 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
666
667}
668
6aa8b732
AK
669static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
670{
a2fa3e9f
GH
671 struct vcpu_svm *svm = to_svm(vcpu);
672
f104765b 673 if (svm->vmcb->control.next_rip != 0) {
d2922422 674 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
6bc31bdc 675 svm->next_rip = svm->vmcb->control.next_rip;
f104765b 676 }
6bc31bdc 677
a2fa3e9f 678 if (!svm->next_rip) {
51d8b661 679 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
f629cf84
GN
680 EMULATE_DONE)
681 printk(KERN_DEBUG "%s: NOP\n", __func__);
6aa8b732
AK
682 return;
683 }
5fdbf976
MT
684 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
685 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
686 __func__, kvm_rip_read(vcpu), svm->next_rip);
6aa8b732 687
5fdbf976 688 kvm_rip_write(vcpu, svm->next_rip);
2809f5d2 689 svm_set_interrupt_shadow(vcpu, 0);
6aa8b732
AK
690}
691
cfcd20e5 692static void svm_queue_exception(struct kvm_vcpu *vcpu)
116a4752
JK
693{
694 struct vcpu_svm *svm = to_svm(vcpu);
cfcd20e5
WL
695 unsigned nr = vcpu->arch.exception.nr;
696 bool has_error_code = vcpu->arch.exception.has_error_code;
664f8e26 697 bool reinject = vcpu->arch.exception.injected;
cfcd20e5 698 u32 error_code = vcpu->arch.exception.error_code;
116a4752 699
e0231715
JR
700 /*
701 * If we are within a nested VM we'd better #VMEXIT and let the guest
702 * handle the exception
703 */
ce7ddec4
JR
704 if (!reinject &&
705 nested_svm_check_exception(svm, nr, has_error_code, error_code))
116a4752
JK
706 return;
707
2a6b20b8 708 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
66b7138f
JK
709 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
710
711 /*
712 * For guest debugging where we have to reinject #BP if some
713 * INT3 is guest-owned:
714 * Emulate nRIP by moving RIP forward. Will fail if injection
715 * raises a fault that is not intercepted. Still better than
716 * failing in all cases.
717 */
718 skip_emulated_instruction(&svm->vcpu);
719 rip = kvm_rip_read(&svm->vcpu);
720 svm->int3_rip = rip + svm->vmcb->save.cs.base;
721 svm->int3_injected = rip - old_rip;
722 }
723
116a4752
JK
724 svm->vmcb->control.event_inj = nr
725 | SVM_EVTINJ_VALID
726 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
727 | SVM_EVTINJ_TYPE_EXEPT;
728 svm->vmcb->control.event_inj_err = error_code;
729}
730
67ec6607
JR
731static void svm_init_erratum_383(void)
732{
733 u32 low, high;
734 int err;
735 u64 val;
736
e6ee94d5 737 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
67ec6607
JR
738 return;
739
740 /* Use _safe variants to not break nested virtualization */
741 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
742 if (err)
743 return;
744
745 val |= (1ULL << 47);
746
747 low = lower_32_bits(val);
748 high = upper_32_bits(val);
749
750 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
751
752 erratum_383_found = true;
753}
754
2b036c6b
BO
755static void svm_init_osvw(struct kvm_vcpu *vcpu)
756{
757 /*
758 * Guests should see errata 400 and 415 as fixed (assuming that
759 * HLT and IO instructions are intercepted).
760 */
761 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
762 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
763
764 /*
765 * By increasing VCPU's osvw.length to 3 we are telling the guest that
766 * all osvw.status bits inside that length, including bit 0 (which is
767 * reserved for erratum 298), are valid. However, if host processor's
768 * osvw_len is 0 then osvw_status[0] carries no information. We need to
769 * be conservative here and therefore we tell the guest that erratum 298
770 * is present (because we really don't know).
771 */
772 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
773 vcpu->arch.osvw.status |= 1;
774}
775
6aa8b732
AK
776static int has_svm(void)
777{
63d1142f 778 const char *msg;
6aa8b732 779
63d1142f 780 if (!cpu_has_svm(&msg)) {
ff81ff10 781 printk(KERN_INFO "has_svm: %s\n", msg);
6aa8b732
AK
782 return 0;
783 }
784
6aa8b732
AK
785 return 1;
786}
787
13a34e06 788static void svm_hardware_disable(void)
6aa8b732 789{
fbc0db76
JR
790 /* Make sure we clean up behind us */
791 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
792 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
793
2c8dceeb 794 cpu_svm_disable();
1018faa6
JR
795
796 amd_pmu_disable_virt();
6aa8b732
AK
797}
798
13a34e06 799static int svm_hardware_enable(void)
6aa8b732
AK
800{
801
0fe1e009 802 struct svm_cpu_data *sd;
6aa8b732 803 uint64_t efer;
6aa8b732
AK
804 struct desc_struct *gdt;
805 int me = raw_smp_processor_id();
806
10474ae8
AG
807 rdmsrl(MSR_EFER, efer);
808 if (efer & EFER_SVME)
809 return -EBUSY;
810
6aa8b732 811 if (!has_svm()) {
1f5b77f5 812 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
10474ae8 813 return -EINVAL;
6aa8b732 814 }
0fe1e009 815 sd = per_cpu(svm_data, me);
0fe1e009 816 if (!sd) {
1f5b77f5 817 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
10474ae8 818 return -EINVAL;
6aa8b732
AK
819 }
820
0fe1e009
TH
821 sd->asid_generation = 1;
822 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
823 sd->next_asid = sd->max_asid + 1;
ed3cd233 824 sd->min_asid = max_sev_asid + 1;
6aa8b732 825
45fc8757 826 gdt = get_current_gdt_rw();
0fe1e009 827 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
6aa8b732 828
9962d032 829 wrmsrl(MSR_EFER, efer | EFER_SVME);
6aa8b732 830
d0316554 831 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
10474ae8 832
fbc0db76
JR
833 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
834 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
89cbc767 835 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
fbc0db76
JR
836 }
837
2b036c6b
BO
838
839 /*
840 * Get OSVW bits.
841 *
842 * Note that it is possible to have a system with mixed processor
843 * revisions and therefore different OSVW bits. If bits are not the same
844 * on different processors then choose the worst case (i.e. if erratum
845 * is present on one processor and not on another then assume that the
846 * erratum is present everywhere).
847 */
848 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
849 uint64_t len, status = 0;
850 int err;
851
852 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
853 if (!err)
854 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
855 &err);
856
857 if (err)
858 osvw_status = osvw_len = 0;
859 else {
860 if (len < osvw_len)
861 osvw_len = len;
862 osvw_status |= status;
863 osvw_status &= (1ULL << osvw_len) - 1;
864 }
865 } else
866 osvw_status = osvw_len = 0;
867
67ec6607
JR
868 svm_init_erratum_383();
869
1018faa6
JR
870 amd_pmu_enable_virt();
871
10474ae8 872 return 0;
6aa8b732
AK
873}
874
0da1db75
JR
875static void svm_cpu_uninit(int cpu)
876{
0fe1e009 877 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
0da1db75 878
0fe1e009 879 if (!sd)
0da1db75
JR
880 return;
881
882 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
70cd94e6 883 kfree(sd->sev_vmcbs);
0fe1e009
TH
884 __free_page(sd->save_area);
885 kfree(sd);
0da1db75
JR
886}
887
6aa8b732
AK
888static int svm_cpu_init(int cpu)
889{
0fe1e009 890 struct svm_cpu_data *sd;
6aa8b732
AK
891 int r;
892
0fe1e009
TH
893 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
894 if (!sd)
6aa8b732 895 return -ENOMEM;
0fe1e009 896 sd->cpu = cpu;
6aa8b732 897 r = -ENOMEM;
70cd94e6 898 sd->save_area = alloc_page(GFP_KERNEL);
0fe1e009 899 if (!sd->save_area)
6aa8b732
AK
900 goto err_1;
901
70cd94e6
BS
902 if (svm_sev_enabled()) {
903 r = -ENOMEM;
904 sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
905 if (!sd->sev_vmcbs)
906 goto err_1;
907 }
908
0fe1e009 909 per_cpu(svm_data, cpu) = sd;
6aa8b732
AK
910
911 return 0;
912
913err_1:
0fe1e009 914 kfree(sd);
6aa8b732
AK
915 return r;
916
917}
918
ac72a9b7
JR
919static bool valid_msr_intercept(u32 index)
920{
921 int i;
922
923 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
924 if (direct_access_msrs[i].index == index)
925 return true;
926
927 return false;
928}
929
bfc733a7
RR
930static void set_msr_interception(u32 *msrpm, unsigned msr,
931 int read, int write)
6aa8b732 932{
455716fa
JR
933 u8 bit_read, bit_write;
934 unsigned long tmp;
935 u32 offset;
6aa8b732 936
ac72a9b7
JR
937 /*
938 * If this warning triggers extend the direct_access_msrs list at the
939 * beginning of the file
940 */
941 WARN_ON(!valid_msr_intercept(msr));
942
455716fa
JR
943 offset = svm_msrpm_offset(msr);
944 bit_read = 2 * (msr & 0x0f);
945 bit_write = 2 * (msr & 0x0f) + 1;
946 tmp = msrpm[offset];
947
948 BUG_ON(offset == MSR_INVALID);
949
950 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
951 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
952
953 msrpm[offset] = tmp;
6aa8b732
AK
954}
955
f65c229c 956static void svm_vcpu_init_msrpm(u32 *msrpm)
6aa8b732
AK
957{
958 int i;
959
f65c229c
JR
960 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
961
ac72a9b7
JR
962 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
963 if (!direct_access_msrs[i].always)
964 continue;
965
966 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
967 }
f65c229c
JR
968}
969
323c3d80
JR
970static void add_msr_offset(u32 offset)
971{
972 int i;
973
974 for (i = 0; i < MSRPM_OFFSETS; ++i) {
975
976 /* Offset already in list? */
977 if (msrpm_offsets[i] == offset)
bfc733a7 978 return;
323c3d80
JR
979
980 /* Slot used by another offset? */
981 if (msrpm_offsets[i] != MSR_INVALID)
982 continue;
983
984 /* Add offset to list */
985 msrpm_offsets[i] = offset;
986
987 return;
6aa8b732 988 }
323c3d80
JR
989
990 /*
991 * If this BUG triggers the msrpm_offsets table has an overflow. Just
992 * increase MSRPM_OFFSETS in this case.
993 */
bfc733a7 994 BUG();
6aa8b732
AK
995}
996
323c3d80 997static void init_msrpm_offsets(void)
f65c229c 998{
323c3d80 999 int i;
f65c229c 1000
323c3d80
JR
1001 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1002
1003 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1004 u32 offset;
1005
1006 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1007 BUG_ON(offset == MSR_INVALID);
1008
1009 add_msr_offset(offset);
1010 }
f65c229c
JR
1011}
1012
24e09cbf
JR
1013static void svm_enable_lbrv(struct vcpu_svm *svm)
1014{
1015 u32 *msrpm = svm->msrpm;
1016
0dc92119 1017 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
24e09cbf
JR
1018 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1019 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1020 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1021 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1022}
1023
1024static void svm_disable_lbrv(struct vcpu_svm *svm)
1025{
1026 u32 *msrpm = svm->msrpm;
1027
0dc92119 1028 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
24e09cbf
JR
1029 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1030 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1031 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1032 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1033}
1034
4aebd0e9
LP
1035static void disable_nmi_singlestep(struct vcpu_svm *svm)
1036{
1037 svm->nmi_singlestep = false;
640bd6e5 1038
ab2f4d73
LP
1039 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1040 /* Clear our flags if they were not set by the guest */
1041 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1042 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1043 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1044 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1045 }
4aebd0e9
LP
1046}
1047
5881f737
SS
1048/* Note:
1049 * This hash table is used to map VM_ID to a struct kvm_arch,
1050 * when handling AMD IOMMU GALOG notification to schedule in
1051 * a particular vCPU.
1052 */
1053#define SVM_VM_DATA_HASH_BITS 8
681bcea8 1054static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
3f0d4db7
DV
1055static u32 next_vm_id = 0;
1056static bool next_vm_id_wrapped = 0;
681bcea8 1057static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
5881f737
SS
1058
1059/* Note:
1060 * This function is called from IOMMU driver to notify
1061 * SVM to schedule in a particular vCPU of a particular VM.
1062 */
1063static int avic_ga_log_notifier(u32 ga_tag)
1064{
1065 unsigned long flags;
1066 struct kvm_arch *ka = NULL;
1067 struct kvm_vcpu *vcpu = NULL;
1068 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1069 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1070
1071 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1072
1073 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1074 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1075 struct kvm *kvm = container_of(ka, struct kvm, arch);
1076 struct kvm_arch *vm_data = &kvm->arch;
1077
1078 if (vm_data->avic_vm_id != vm_id)
1079 continue;
1080 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1081 break;
1082 }
1083 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1084
5881f737
SS
1085 /* Note:
1086 * At this point, the IOMMU should have already set the pending
1087 * bit in the vAPIC backing page. So, we just need to schedule
1088 * in the vcpu.
1089 */
1cf53587 1090 if (vcpu)
5881f737
SS
1091 kvm_vcpu_wake_up(vcpu);
1092
1093 return 0;
1094}
1095
e9df0942
BS
1096static __init int sev_hardware_setup(void)
1097{
1098 struct sev_user_data_status *status;
1099 int rc;
1100
1101 /* Maximum number of encrypted guests supported simultaneously */
1102 max_sev_asid = cpuid_ecx(0x8000001F);
1103
1104 if (!max_sev_asid)
1105 return 1;
1106
1654efcb
BS
1107 /* Minimum ASID value that should be used for SEV guest */
1108 min_sev_asid = cpuid_edx(0x8000001F);
1109
1110 /* Initialize SEV ASID bitmap */
1111 sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
1112 sizeof(unsigned long), GFP_KERNEL);
1113 if (!sev_asid_bitmap)
1114 return 1;
1115
e9df0942
BS
1116 status = kmalloc(sizeof(*status), GFP_KERNEL);
1117 if (!status)
1118 return 1;
1119
1120 /*
1121 * Check SEV platform status.
1122 *
1123 * PLATFORM_STATUS can be called in any state, if we failed to query
1124 * the PLATFORM status then either PSP firmware does not support SEV
1125 * feature or SEV firmware is dead.
1126 */
1127 rc = sev_platform_status(status, NULL);
1128 if (rc)
1129 goto err;
1130
1131 pr_info("SEV supported\n");
1132
1133err:
1134 kfree(status);
1135 return rc;
1136}
1137
6aa8b732
AK
1138static __init int svm_hardware_setup(void)
1139{
1140 int cpu;
1141 struct page *iopm_pages;
f65c229c 1142 void *iopm_va;
6aa8b732
AK
1143 int r;
1144
6aa8b732
AK
1145 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1146
1147 if (!iopm_pages)
1148 return -ENOMEM;
c8681339
AL
1149
1150 iopm_va = page_address(iopm_pages);
1151 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
6aa8b732
AK
1152 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1153
323c3d80
JR
1154 init_msrpm_offsets();
1155
50a37eb4
JR
1156 if (boot_cpu_has(X86_FEATURE_NX))
1157 kvm_enable_efer_bits(EFER_NX);
1158
1b2fd70c
AG
1159 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1160 kvm_enable_efer_bits(EFER_FFXSR);
1161
92a1f12d 1162 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
92a1f12d 1163 kvm_has_tsc_control = true;
bc9b961b
HZ
1164 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1165 kvm_tsc_scaling_ratio_frac_bits = 32;
92a1f12d
JR
1166 }
1167
236de055
AG
1168 if (nested) {
1169 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
eec4b140 1170 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
236de055
AG
1171 }
1172
e9df0942
BS
1173 if (sev) {
1174 if (boot_cpu_has(X86_FEATURE_SEV) &&
1175 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1176 r = sev_hardware_setup();
1177 if (r)
1178 sev = false;
1179 } else {
1180 sev = false;
1181 }
1182 }
1183
3230bb47 1184 for_each_possible_cpu(cpu) {
6aa8b732
AK
1185 r = svm_cpu_init(cpu);
1186 if (r)
f65c229c 1187 goto err;
6aa8b732 1188 }
33bd6a0b 1189
2a6b20b8 1190 if (!boot_cpu_has(X86_FEATURE_NPT))
e3da3acd
JR
1191 npt_enabled = false;
1192
6c7dac72
JR
1193 if (npt_enabled && !npt) {
1194 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1195 npt_enabled = false;
1196 }
1197
18552672 1198 if (npt_enabled) {
e3da3acd 1199 printk(KERN_INFO "kvm: Nested Paging enabled\n");
18552672 1200 kvm_enable_tdp();
5f4cb662
JR
1201 } else
1202 kvm_disable_tdp();
e3da3acd 1203
5b8abf1f
SS
1204 if (avic) {
1205 if (!npt_enabled ||
1206 !boot_cpu_has(X86_FEATURE_AVIC) ||
5881f737 1207 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
5b8abf1f 1208 avic = false;
5881f737 1209 } else {
5b8abf1f 1210 pr_info("AVIC enabled\n");
5881f737 1211
5881f737
SS
1212 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1213 }
5b8abf1f 1214 }
44a95dae 1215
89c8a498
JN
1216 if (vls) {
1217 if (!npt_enabled ||
5442c269 1218 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
89c8a498
JN
1219 !IS_ENABLED(CONFIG_X86_64)) {
1220 vls = false;
1221 } else {
1222 pr_info("Virtual VMLOAD VMSAVE supported\n");
1223 }
1224 }
1225
640bd6e5
JN
1226 if (vgif) {
1227 if (!boot_cpu_has(X86_FEATURE_VGIF))
1228 vgif = false;
1229 else
1230 pr_info("Virtual GIF supported\n");
1231 }
1232
6aa8b732
AK
1233 return 0;
1234
f65c229c 1235err:
6aa8b732
AK
1236 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1237 iopm_base = 0;
1238 return r;
1239}
1240
1241static __exit void svm_hardware_unsetup(void)
1242{
0da1db75
JR
1243 int cpu;
1244
1654efcb
BS
1245 if (svm_sev_enabled())
1246 kfree(sev_asid_bitmap);
1247
3230bb47 1248 for_each_possible_cpu(cpu)
0da1db75
JR
1249 svm_cpu_uninit(cpu);
1250
6aa8b732 1251 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
f65c229c 1252 iopm_base = 0;
6aa8b732
AK
1253}
1254
1255static void init_seg(struct vmcb_seg *seg)
1256{
1257 seg->selector = 0;
1258 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
e0231715 1259 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
6aa8b732
AK
1260 seg->limit = 0xffff;
1261 seg->base = 0;
1262}
1263
1264static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1265{
1266 seg->selector = 0;
1267 seg->attrib = SVM_SELECTOR_P_MASK | type;
1268 seg->limit = 0xffff;
1269 seg->base = 0;
1270}
1271
f4e1b3c8
ZA
1272static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1273{
1274 struct vcpu_svm *svm = to_svm(vcpu);
1275 u64 g_tsc_offset = 0;
1276
2030753d 1277 if (is_guest_mode(vcpu)) {
f4e1b3c8
ZA
1278 g_tsc_offset = svm->vmcb->control.tsc_offset -
1279 svm->nested.hsave->control.tsc_offset;
1280 svm->nested.hsave->control.tsc_offset = offset;
489223ed
YY
1281 } else
1282 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1283 svm->vmcb->control.tsc_offset,
1284 offset);
f4e1b3c8
ZA
1285
1286 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
116a0a23
JR
1287
1288 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
f4e1b3c8
ZA
1289}
1290
44a95dae
SS
1291static void avic_init_vmcb(struct vcpu_svm *svm)
1292{
1293 struct vmcb *vmcb = svm->vmcb;
1294 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
d0ec49d4
TL
1295 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1296 phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
1297 phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
44a95dae
SS
1298
1299 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1300 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1301 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1302 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1303 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
44a95dae
SS
1304}
1305
5690891b 1306static void init_vmcb(struct vcpu_svm *svm)
6aa8b732 1307{
e6101a96
JR
1308 struct vmcb_control_area *control = &svm->vmcb->control;
1309 struct vmcb_save_area *save = &svm->vmcb->save;
6aa8b732 1310
4ee546b4 1311 svm->vcpu.arch.hflags = 0;
bff78274 1312
4ee546b4
RJ
1313 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1314 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1315 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1316 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1317 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1318 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
3bbf3565
SS
1319 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1320 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
6aa8b732 1321
5315c716 1322 set_dr_intercepts(svm);
6aa8b732 1323
18c918c5
JR
1324 set_exception_intercept(svm, PF_VECTOR);
1325 set_exception_intercept(svm, UD_VECTOR);
1326 set_exception_intercept(svm, MC_VECTOR);
54a20552 1327 set_exception_intercept(svm, AC_VECTOR);
cbdb967a 1328 set_exception_intercept(svm, DB_VECTOR);
6aa8b732 1329
8a05a1b8
JR
1330 set_intercept(svm, INTERCEPT_INTR);
1331 set_intercept(svm, INTERCEPT_NMI);
1332 set_intercept(svm, INTERCEPT_SMI);
1333 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
332b56e4 1334 set_intercept(svm, INTERCEPT_RDPMC);
8a05a1b8
JR
1335 set_intercept(svm, INTERCEPT_CPUID);
1336 set_intercept(svm, INTERCEPT_INVD);
1337 set_intercept(svm, INTERCEPT_HLT);
1338 set_intercept(svm, INTERCEPT_INVLPG);
1339 set_intercept(svm, INTERCEPT_INVLPGA);
1340 set_intercept(svm, INTERCEPT_IOIO_PROT);
1341 set_intercept(svm, INTERCEPT_MSR_PROT);
1342 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1343 set_intercept(svm, INTERCEPT_SHUTDOWN);
1344 set_intercept(svm, INTERCEPT_VMRUN);
1345 set_intercept(svm, INTERCEPT_VMMCALL);
1346 set_intercept(svm, INTERCEPT_VMLOAD);
1347 set_intercept(svm, INTERCEPT_VMSAVE);
1348 set_intercept(svm, INTERCEPT_STGI);
1349 set_intercept(svm, INTERCEPT_CLGI);
1350 set_intercept(svm, INTERCEPT_SKINIT);
1351 set_intercept(svm, INTERCEPT_WBINVD);
81dd35d4 1352 set_intercept(svm, INTERCEPT_XSETBV);
6aa8b732 1353
668fffa3
MT
1354 if (!kvm_mwait_in_guest()) {
1355 set_intercept(svm, INTERCEPT_MONITOR);
1356 set_intercept(svm, INTERCEPT_MWAIT);
1357 }
1358
d0ec49d4
TL
1359 control->iopm_base_pa = __sme_set(iopm_base);
1360 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
6aa8b732
AK
1361 control->int_ctl = V_INTR_MASKING_MASK;
1362
1363 init_seg(&save->es);
1364 init_seg(&save->ss);
1365 init_seg(&save->ds);
1366 init_seg(&save->fs);
1367 init_seg(&save->gs);
1368
1369 save->cs.selector = 0xf000;
04b66839 1370 save->cs.base = 0xffff0000;
6aa8b732
AK
1371 /* Executable/Readable Code Segment */
1372 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1373 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1374 save->cs.limit = 0xffff;
6aa8b732
AK
1375
1376 save->gdtr.limit = 0xffff;
1377 save->idtr.limit = 0xffff;
1378
1379 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1380 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1381
5690891b 1382 svm_set_efer(&svm->vcpu, 0);
d77c26fc 1383 save->dr6 = 0xffff0ff0;
f6e78475 1384 kvm_set_rflags(&svm->vcpu, 2);
6aa8b732 1385 save->rip = 0x0000fff0;
5fdbf976 1386 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
6aa8b732 1387
e0231715 1388 /*
18fa000a 1389 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
d28bc9dd 1390 * It also updates the guest-visible cr0 value.
6aa8b732 1391 */
79a8059d 1392 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
ebae871a 1393 kvm_mmu_reset_context(&svm->vcpu);
18fa000a 1394
66aee91a 1395 save->cr4 = X86_CR4_PAE;
6aa8b732 1396 /* rdx = ?? */
709ddebf
JR
1397
1398 if (npt_enabled) {
1399 /* Setup VMCB for Nested Paging */
cea3a19b 1400 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
8a05a1b8 1401 clr_intercept(svm, INTERCEPT_INVLPG);
18c918c5 1402 clr_exception_intercept(svm, PF_VECTOR);
4ee546b4
RJ
1403 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1404 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
74545705 1405 save->g_pat = svm->vcpu.arch.pat;
709ddebf
JR
1406 save->cr3 = 0;
1407 save->cr4 = 0;
1408 }
f40f6a45 1409 svm->asid_generation = 0;
1371d904 1410
e6aa9abd 1411 svm->nested.vmcb = 0;
2af9194d
JR
1412 svm->vcpu.arch.hflags = 0;
1413
2a6b20b8 1414 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
565d0998 1415 control->pause_filter_count = 3000;
8a05a1b8 1416 set_intercept(svm, INTERCEPT_PAUSE);
565d0998
ML
1417 }
1418
67034bb9 1419 if (kvm_vcpu_apicv_active(&svm->vcpu))
44a95dae
SS
1420 avic_init_vmcb(svm);
1421
89c8a498
JN
1422 /*
1423 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1424 * in VMCB and clear intercepts to avoid #VMEXIT.
1425 */
1426 if (vls) {
1427 clr_intercept(svm, INTERCEPT_VMLOAD);
1428 clr_intercept(svm, INTERCEPT_VMSAVE);
1429 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1430 }
1431
640bd6e5
JN
1432 if (vgif) {
1433 clr_intercept(svm, INTERCEPT_STGI);
1434 clr_intercept(svm, INTERCEPT_CLGI);
1435 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1436 }
1437
1654efcb
BS
1438 if (sev_guest(svm->vcpu.kvm))
1439 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1440
8d28fec4
RJ
1441 mark_all_dirty(svm->vmcb);
1442
2af9194d 1443 enable_gif(svm);
44a95dae
SS
1444
1445}
1446
d3e7dec0
DC
1447static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1448 unsigned int index)
44a95dae
SS
1449{
1450 u64 *avic_physical_id_table;
1451 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1452
1453 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1454 return NULL;
1455
1456 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1457
1458 return &avic_physical_id_table[index];
1459}
1460
1461/**
1462 * Note:
1463 * AVIC hardware walks the nested page table to check permissions,
1464 * but does not use the SPA address specified in the leaf page
1465 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1466 * field of the VMCB. Therefore, we set up the
1467 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1468 */
1469static int avic_init_access_page(struct kvm_vcpu *vcpu)
1470{
1471 struct kvm *kvm = vcpu->kvm;
1472 int ret;
1473
1474 if (kvm->arch.apic_access_page_done)
1475 return 0;
1476
1477 ret = x86_set_memory_region(kvm,
1478 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1479 APIC_DEFAULT_PHYS_BASE,
1480 PAGE_SIZE);
1481 if (ret)
1482 return ret;
1483
1484 kvm->arch.apic_access_page_done = true;
1485 return 0;
1486}
1487
1488static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1489{
1490 int ret;
1491 u64 *entry, new_entry;
1492 int id = vcpu->vcpu_id;
1493 struct vcpu_svm *svm = to_svm(vcpu);
1494
1495 ret = avic_init_access_page(vcpu);
1496 if (ret)
1497 return ret;
1498
1499 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1500 return -EINVAL;
1501
1502 if (!svm->vcpu.arch.apic->regs)
1503 return -EINVAL;
1504
1505 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1506
1507 /* Setting AVIC backing page address in the phy APIC ID table */
1508 entry = avic_get_physical_id_entry(vcpu, id);
1509 if (!entry)
1510 return -EINVAL;
1511
1512 new_entry = READ_ONCE(*entry);
d0ec49d4
TL
1513 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1514 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1515 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
44a95dae
SS
1516 WRITE_ONCE(*entry, new_entry);
1517
1518 svm->avic_physical_id_cache = entry;
1519
1520 return 0;
1521}
1522
1654efcb
BS
1523static void __sev_asid_free(int asid)
1524{
70cd94e6
BS
1525 struct svm_cpu_data *sd;
1526 int cpu, pos;
1654efcb
BS
1527
1528 pos = asid - 1;
1529 clear_bit(pos, sev_asid_bitmap);
70cd94e6
BS
1530
1531 for_each_possible_cpu(cpu) {
1532 sd = per_cpu(svm_data, cpu);
1533 sd->sev_vmcbs[pos] = NULL;
1534 }
1654efcb
BS
1535}
1536
1537static void sev_asid_free(struct kvm *kvm)
1538{
1539 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1540
1541 __sev_asid_free(sev->asid);
1542}
1543
59414c98
BS
1544static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1545{
1546 struct sev_data_decommission *decommission;
1547 struct sev_data_deactivate *data;
1548
1549 if (!handle)
1550 return;
1551
1552 data = kzalloc(sizeof(*data), GFP_KERNEL);
1553 if (!data)
1554 return;
1555
1556 /* deactivate handle */
1557 data->handle = handle;
1558 sev_guest_deactivate(data, NULL);
1559
1560 wbinvd_on_all_cpus();
1561 sev_guest_df_flush(NULL);
1562 kfree(data);
1563
1564 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1565 if (!decommission)
1566 return;
1567
1568 /* decommission handle */
1569 decommission->handle = handle;
1570 sev_guest_decommission(decommission, NULL);
1571
1572 kfree(decommission);
1573}
1574
89c50580
BS
1575static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1576 unsigned long ulen, unsigned long *n,
1577 int write)
1578{
1579 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1580 unsigned long npages, npinned, size;
1581 unsigned long locked, lock_limit;
1582 struct page **pages;
1583 int first, last;
1584
1585 /* Calculate number of pages. */
1586 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1587 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1588 npages = (last - first + 1);
1589
1590 locked = sev->pages_locked + npages;
1591 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1592 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1593 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1594 return NULL;
1595 }
1596
1597 /* Avoid using vmalloc for smaller buffers. */
1598 size = npages * sizeof(struct page *);
1599 if (size > PAGE_SIZE)
1600 pages = vmalloc(size);
1601 else
1602 pages = kmalloc(size, GFP_KERNEL);
1603
1604 if (!pages)
1605 return NULL;
1606
1607 /* Pin the user virtual address. */
1608 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
1609 if (npinned != npages) {
1610 pr_err("SEV: Failure locking %lu pages.\n", npages);
1611 goto err;
1612 }
1613
1614 *n = npages;
1615 sev->pages_locked = locked;
1616
1617 return pages;
1618
1619err:
1620 if (npinned > 0)
1621 release_pages(pages, npinned);
1622
1623 kvfree(pages);
1624 return NULL;
1625}
1626
1627static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1628 unsigned long npages)
1629{
1630 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1631
1632 release_pages(pages, npages);
1633 kvfree(pages);
1634 sev->pages_locked -= npages;
1635}
1636
1637static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1638{
1639 uint8_t *page_virtual;
1640 unsigned long i;
1641
1642 if (npages == 0 || pages == NULL)
1643 return;
1644
1645 for (i = 0; i < npages; i++) {
1646 page_virtual = kmap_atomic(pages[i]);
1647 clflush_cache_range(page_virtual, PAGE_SIZE);
1648 kunmap_atomic(page_virtual);
1649 }
1650}
1651
1654efcb
BS
1652static void sev_vm_destroy(struct kvm *kvm)
1653{
59414c98
BS
1654 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1655
1654efcb
BS
1656 if (!sev_guest(kvm))
1657 return;
1658
59414c98 1659 sev_unbind_asid(kvm, sev->handle);
1654efcb
BS
1660 sev_asid_free(kvm);
1661}
1662
44a95dae
SS
1663static void avic_vm_destroy(struct kvm *kvm)
1664{
5881f737 1665 unsigned long flags;
44a95dae
SS
1666 struct kvm_arch *vm_data = &kvm->arch;
1667
3863dff0
DV
1668 if (!avic)
1669 return;
1670
44a95dae
SS
1671 if (vm_data->avic_logical_id_table_page)
1672 __free_page(vm_data->avic_logical_id_table_page);
1673 if (vm_data->avic_physical_id_table_page)
1674 __free_page(vm_data->avic_physical_id_table_page);
5881f737
SS
1675
1676 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1677 hash_del(&vm_data->hnode);
1678 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
44a95dae
SS
1679}
1680
1654efcb
BS
1681static void svm_vm_destroy(struct kvm *kvm)
1682{
1683 avic_vm_destroy(kvm);
1684 sev_vm_destroy(kvm);
1685}
1686
44a95dae
SS
1687static int avic_vm_init(struct kvm *kvm)
1688{
5881f737 1689 unsigned long flags;
3f0d4db7 1690 int err = -ENOMEM;
44a95dae
SS
1691 struct kvm_arch *vm_data = &kvm->arch;
1692 struct page *p_page;
1693 struct page *l_page;
3f0d4db7
DV
1694 struct kvm_arch *ka;
1695 u32 vm_id;
44a95dae
SS
1696
1697 if (!avic)
1698 return 0;
1699
1700 /* Allocating physical APIC ID table (4KB) */
1701 p_page = alloc_page(GFP_KERNEL);
1702 if (!p_page)
1703 goto free_avic;
1704
1705 vm_data->avic_physical_id_table_page = p_page;
1706 clear_page(page_address(p_page));
1707
1708 /* Allocating logical APIC ID table (4KB) */
1709 l_page = alloc_page(GFP_KERNEL);
1710 if (!l_page)
1711 goto free_avic;
1712
1713 vm_data->avic_logical_id_table_page = l_page;
1714 clear_page(page_address(l_page));
1715
5881f737 1716 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
3f0d4db7
DV
1717 again:
1718 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1719 if (vm_id == 0) { /* id is 1-based, zero is not okay */
1720 next_vm_id_wrapped = 1;
1721 goto again;
1722 }
1723 /* Is it still in use? Only possible if wrapped at least once */
1724 if (next_vm_id_wrapped) {
1725 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1726 struct kvm *k2 = container_of(ka, struct kvm, arch);
1727 struct kvm_arch *vd2 = &k2->arch;
1728 if (vd2->avic_vm_id == vm_id)
1729 goto again;
1730 }
1731 }
1732 vm_data->avic_vm_id = vm_id;
5881f737
SS
1733 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1734 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1735
44a95dae
SS
1736 return 0;
1737
1738free_avic:
1739 avic_vm_destroy(kvm);
1740 return err;
6aa8b732
AK
1741}
1742
411b44ba
SS
1743static inline int
1744avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
8221c137 1745{
411b44ba
SS
1746 int ret = 0;
1747 unsigned long flags;
1748 struct amd_svm_iommu_ir *ir;
8221c137
SS
1749 struct vcpu_svm *svm = to_svm(vcpu);
1750
411b44ba
SS
1751 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1752 return 0;
8221c137 1753
411b44ba
SS
1754 /*
1755 * Here, we go through the per-vcpu ir_list to update all existing
1756 * interrupt remapping table entry targeting this vcpu.
1757 */
1758 spin_lock_irqsave(&svm->ir_list_lock, flags);
8221c137 1759
411b44ba
SS
1760 if (list_empty(&svm->ir_list))
1761 goto out;
8221c137 1762
411b44ba
SS
1763 list_for_each_entry(ir, &svm->ir_list, node) {
1764 ret = amd_iommu_update_ga(cpu, r, ir->data);
1765 if (ret)
1766 break;
1767 }
1768out:
1769 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1770 return ret;
8221c137
SS
1771}
1772
1773static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1774{
1775 u64 entry;
1776 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
7d669f50 1777 int h_physical_id = kvm_cpu_get_apicid(cpu);
8221c137
SS
1778 struct vcpu_svm *svm = to_svm(vcpu);
1779
1780 if (!kvm_vcpu_apicv_active(vcpu))
1781 return;
1782
1783 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1784 return;
1785
1786 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1787 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1788
1789 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1790 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1791
1792 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1793 if (svm->avic_is_running)
1794 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1795
1796 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
411b44ba
SS
1797 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1798 svm->avic_is_running);
8221c137
SS
1799}
1800
1801static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1802{
1803 u64 entry;
1804 struct vcpu_svm *svm = to_svm(vcpu);
1805
1806 if (!kvm_vcpu_apicv_active(vcpu))
1807 return;
1808
1809 entry = READ_ONCE(*(svm->avic_physical_id_cache));
411b44ba
SS
1810 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1811 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1812
8221c137
SS
1813 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1814 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
6aa8b732
AK
1815}
1816
411b44ba
SS
1817/**
1818 * This function is called during VCPU halt/unhalt.
1819 */
1820static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1821{
1822 struct vcpu_svm *svm = to_svm(vcpu);
1823
1824 svm->avic_is_running = is_run;
1825 if (is_run)
1826 avic_vcpu_load(vcpu, vcpu->cpu);
1827 else
1828 avic_vcpu_put(vcpu);
1829}
1830
d28bc9dd 1831static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
04d2cc77
AK
1832{
1833 struct vcpu_svm *svm = to_svm(vcpu);
66f7b72e
JS
1834 u32 dummy;
1835 u32 eax = 1;
04d2cc77 1836
d28bc9dd
NA
1837 if (!init_event) {
1838 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1839 MSR_IA32_APICBASE_ENABLE;
1840 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1841 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1842 }
5690891b 1843 init_vmcb(svm);
70433389 1844
e911eb3b 1845 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
66f7b72e 1846 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
44a95dae
SS
1847
1848 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1849 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
04d2cc77
AK
1850}
1851
dfa20099
SS
1852static int avic_init_vcpu(struct vcpu_svm *svm)
1853{
1854 int ret;
1855
67034bb9 1856 if (!kvm_vcpu_apicv_active(&svm->vcpu))
dfa20099
SS
1857 return 0;
1858
1859 ret = avic_init_backing_page(&svm->vcpu);
1860 if (ret)
1861 return ret;
1862
1863 INIT_LIST_HEAD(&svm->ir_list);
1864 spin_lock_init(&svm->ir_list_lock);
1865
1866 return ret;
1867}
1868
fb3f0f51 1869static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
6aa8b732 1870{
a2fa3e9f 1871 struct vcpu_svm *svm;
6aa8b732 1872 struct page *page;
f65c229c 1873 struct page *msrpm_pages;
b286d5d8 1874 struct page *hsave_page;
3d6368ef 1875 struct page *nested_msrpm_pages;
fb3f0f51 1876 int err;
6aa8b732 1877
c16f862d 1878 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
fb3f0f51
RR
1879 if (!svm) {
1880 err = -ENOMEM;
1881 goto out;
1882 }
1883
1884 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1885 if (err)
1886 goto free_svm;
1887
b7af4043 1888 err = -ENOMEM;
6aa8b732 1889 page = alloc_page(GFP_KERNEL);
b7af4043 1890 if (!page)
fb3f0f51 1891 goto uninit;
6aa8b732 1892
f65c229c
JR
1893 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1894 if (!msrpm_pages)
b7af4043 1895 goto free_page1;
3d6368ef
AG
1896
1897 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1898 if (!nested_msrpm_pages)
b7af4043 1899 goto free_page2;
f65c229c 1900
b286d5d8
AG
1901 hsave_page = alloc_page(GFP_KERNEL);
1902 if (!hsave_page)
b7af4043
TY
1903 goto free_page3;
1904
dfa20099
SS
1905 err = avic_init_vcpu(svm);
1906 if (err)
1907 goto free_page4;
44a95dae 1908
8221c137
SS
1909 /* We initialize this flag to true to make sure that the is_running
1910 * bit would be set the first time the vcpu is loaded.
1911 */
1912 svm->avic_is_running = true;
1913
e6aa9abd 1914 svm->nested.hsave = page_address(hsave_page);
b286d5d8 1915
b7af4043
TY
1916 svm->msrpm = page_address(msrpm_pages);
1917 svm_vcpu_init_msrpm(svm->msrpm);
1918
e6aa9abd 1919 svm->nested.msrpm = page_address(nested_msrpm_pages);
323c3d80 1920 svm_vcpu_init_msrpm(svm->nested.msrpm);
3d6368ef 1921
a2fa3e9f
GH
1922 svm->vmcb = page_address(page);
1923 clear_page(svm->vmcb);
d0ec49d4 1924 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
a2fa3e9f 1925 svm->asid_generation = 0;
5690891b 1926 init_vmcb(svm);
6aa8b732 1927
2b036c6b
BO
1928 svm_init_osvw(&svm->vcpu);
1929
fb3f0f51 1930 return &svm->vcpu;
36241b8c 1931
44a95dae
SS
1932free_page4:
1933 __free_page(hsave_page);
b7af4043
TY
1934free_page3:
1935 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
1936free_page2:
1937 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
1938free_page1:
1939 __free_page(page);
fb3f0f51
RR
1940uninit:
1941 kvm_vcpu_uninit(&svm->vcpu);
1942free_svm:
a4770347 1943 kmem_cache_free(kvm_vcpu_cache, svm);
fb3f0f51
RR
1944out:
1945 return ERR_PTR(err);
6aa8b732
AK
1946}
1947
1948static void svm_free_vcpu(struct kvm_vcpu *vcpu)
1949{
a2fa3e9f
GH
1950 struct vcpu_svm *svm = to_svm(vcpu);
1951
d0ec49d4 1952 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
f65c229c 1953 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
e6aa9abd
JR
1954 __free_page(virt_to_page(svm->nested.hsave));
1955 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
fb3f0f51 1956 kvm_vcpu_uninit(vcpu);
a4770347 1957 kmem_cache_free(kvm_vcpu_cache, svm);
6aa8b732
AK
1958}
1959
15ad7146 1960static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
6aa8b732 1961{
a2fa3e9f 1962 struct vcpu_svm *svm = to_svm(vcpu);
15ad7146 1963 int i;
0cc5064d 1964
0cc5064d 1965 if (unlikely(cpu != vcpu->cpu)) {
4b656b12 1966 svm->asid_generation = 0;
8d28fec4 1967 mark_all_dirty(svm->vmcb);
0cc5064d 1968 }
94dfbdb3 1969
82ca2d10
AK
1970#ifdef CONFIG_X86_64
1971 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
1972#endif
dacccfdd
AK
1973 savesegment(fs, svm->host.fs);
1974 savesegment(gs, svm->host.gs);
1975 svm->host.ldt = kvm_read_ldt();
1976
94dfbdb3 1977 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
a2fa3e9f 1978 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
fbc0db76 1979
ad721883
HZ
1980 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1981 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
1982 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
1983 __this_cpu_write(current_tsc_ratio, tsc_ratio);
1984 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
1985 }
fbc0db76 1986 }
46896c73
PB
1987 /* This assumes that the kernel never uses MSR_TSC_AUX */
1988 if (static_cpu_has(X86_FEATURE_RDTSCP))
1989 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
8221c137
SS
1990
1991 avic_vcpu_load(vcpu, cpu);
6aa8b732
AK
1992}
1993
1994static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1995{
a2fa3e9f 1996 struct vcpu_svm *svm = to_svm(vcpu);
94dfbdb3
AL
1997 int i;
1998
8221c137
SS
1999 avic_vcpu_put(vcpu);
2000
e1beb1d3 2001 ++vcpu->stat.host_state_reload;
dacccfdd
AK
2002 kvm_load_ldt(svm->host.ldt);
2003#ifdef CONFIG_X86_64
2004 loadsegment(fs, svm->host.fs);
296f781a 2005 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
893a5ab6 2006 load_gs_index(svm->host.gs);
dacccfdd 2007#else
831ca609 2008#ifdef CONFIG_X86_32_LAZY_GS
dacccfdd 2009 loadsegment(gs, svm->host.gs);
831ca609 2010#endif
dacccfdd 2011#endif
94dfbdb3 2012 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
a2fa3e9f 2013 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
6aa8b732
AK
2014}
2015
8221c137
SS
2016static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2017{
2018 avic_set_running(vcpu, false);
2019}
2020
2021static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2022{
2023 avic_set_running(vcpu, true);
2024}
2025
6aa8b732
AK
2026static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2027{
9b611747
LP
2028 struct vcpu_svm *svm = to_svm(vcpu);
2029 unsigned long rflags = svm->vmcb->save.rflags;
2030
2031 if (svm->nmi_singlestep) {
2032 /* Hide our flags if they were not set by the guest */
2033 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2034 rflags &= ~X86_EFLAGS_TF;
2035 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2036 rflags &= ~X86_EFLAGS_RF;
2037 }
2038 return rflags;
6aa8b732
AK
2039}
2040
2041static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2042{
9b611747
LP
2043 if (to_svm(vcpu)->nmi_singlestep)
2044 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2045
ae9fedc7 2046 /*
bb3541f1 2047 * Any change of EFLAGS.VM is accompanied by a reload of SS
ae9fedc7
PB
2048 * (caused by either a task switch or an inter-privilege IRET),
2049 * so we do not need to update the CPL here.
2050 */
a2fa3e9f 2051 to_svm(vcpu)->vmcb->save.rflags = rflags;
6aa8b732
AK
2052}
2053
6de4f3ad
AK
2054static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2055{
2056 switch (reg) {
2057 case VCPU_EXREG_PDPTR:
2058 BUG_ON(!npt_enabled);
9f8fe504 2059 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
6de4f3ad
AK
2060 break;
2061 default:
2062 BUG();
2063 }
2064}
2065
f0b85051
AG
2066static void svm_set_vintr(struct vcpu_svm *svm)
2067{
8a05a1b8 2068 set_intercept(svm, INTERCEPT_VINTR);
f0b85051
AG
2069}
2070
2071static void svm_clear_vintr(struct vcpu_svm *svm)
2072{
8a05a1b8 2073 clr_intercept(svm, INTERCEPT_VINTR);
f0b85051
AG
2074}
2075
6aa8b732
AK
2076static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2077{
a2fa3e9f 2078 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
6aa8b732
AK
2079
2080 switch (seg) {
2081 case VCPU_SREG_CS: return &save->cs;
2082 case VCPU_SREG_DS: return &save->ds;
2083 case VCPU_SREG_ES: return &save->es;
2084 case VCPU_SREG_FS: return &save->fs;
2085 case VCPU_SREG_GS: return &save->gs;
2086 case VCPU_SREG_SS: return &save->ss;
2087 case VCPU_SREG_TR: return &save->tr;
2088 case VCPU_SREG_LDTR: return &save->ldtr;
2089 }
2090 BUG();
8b6d44c7 2091 return NULL;
6aa8b732
AK
2092}
2093
2094static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2095{
2096 struct vmcb_seg *s = svm_seg(vcpu, seg);
2097
2098 return s->base;
2099}
2100
2101static void svm_get_segment(struct kvm_vcpu *vcpu,
2102 struct kvm_segment *var, int seg)
2103{
2104 struct vmcb_seg *s = svm_seg(vcpu, seg);
2105
2106 var->base = s->base;
2107 var->limit = s->limit;
2108 var->selector = s->selector;
2109 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2110 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2111 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2112 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2113 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2114 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2115 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
80112c89
JM
2116
2117 /*
2118 * AMD CPUs circa 2014 track the G bit for all segments except CS.
2119 * However, the SVM spec states that the G bit is not observed by the
2120 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2121 * So let's synthesize a legal G bit for all segments, this helps
2122 * running KVM nested. It also helps cross-vendor migration, because
2123 * Intel's vmentry has a check on the 'G' bit.
2124 */
2125 var->g = s->limit > 0xfffff;
25022acc 2126
e0231715
JR
2127 /*
2128 * AMD's VMCB does not have an explicit unusable field, so emulate it
19bca6ab
AP
2129 * for cross vendor migration purposes by "not present"
2130 */
8eae9570 2131 var->unusable = !var->present;
19bca6ab 2132
1fbdc7a5 2133 switch (seg) {
1fbdc7a5
AP
2134 case VCPU_SREG_TR:
2135 /*
2136 * Work around a bug where the busy flag in the tr selector
2137 * isn't exposed
2138 */
c0d09828 2139 var->type |= 0x2;
1fbdc7a5
AP
2140 break;
2141 case VCPU_SREG_DS:
2142 case VCPU_SREG_ES:
2143 case VCPU_SREG_FS:
2144 case VCPU_SREG_GS:
2145 /*
2146 * The accessed bit must always be set in the segment
2147 * descriptor cache, although it can be cleared in the
2148 * descriptor, the cached bit always remains at 1. Since
2149 * Intel has a check on this, set it here to support
2150 * cross-vendor migration.
2151 */
2152 if (!var->unusable)
2153 var->type |= 0x1;
2154 break;
b586eb02 2155 case VCPU_SREG_SS:
e0231715
JR
2156 /*
2157 * On AMD CPUs sometimes the DB bit in the segment
b586eb02
AP
2158 * descriptor is left as 1, although the whole segment has
2159 * been made unusable. Clear it here to pass an Intel VMX
2160 * entry check when cross vendor migrating.
2161 */
2162 if (var->unusable)
2163 var->db = 0;
d9c1b543 2164 /* This is symmetric with svm_set_segment() */
33b458d2 2165 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
b586eb02 2166 break;
1fbdc7a5 2167 }
6aa8b732
AK
2168}
2169
2e4d2653
IE
2170static int svm_get_cpl(struct kvm_vcpu *vcpu)
2171{
2172 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2173
2174 return save->cpl;
2175}
2176
89a27f4d 2177static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 2178{
a2fa3e9f
GH
2179 struct vcpu_svm *svm = to_svm(vcpu);
2180
89a27f4d
GN
2181 dt->size = svm->vmcb->save.idtr.limit;
2182 dt->address = svm->vmcb->save.idtr.base;
6aa8b732
AK
2183}
2184
89a27f4d 2185static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 2186{
a2fa3e9f
GH
2187 struct vcpu_svm *svm = to_svm(vcpu);
2188
89a27f4d
GN
2189 svm->vmcb->save.idtr.limit = dt->size;
2190 svm->vmcb->save.idtr.base = dt->address ;
17a703cb 2191 mark_dirty(svm->vmcb, VMCB_DT);
6aa8b732
AK
2192}
2193
89a27f4d 2194static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 2195{
a2fa3e9f
GH
2196 struct vcpu_svm *svm = to_svm(vcpu);
2197
89a27f4d
GN
2198 dt->size = svm->vmcb->save.gdtr.limit;
2199 dt->address = svm->vmcb->save.gdtr.base;
6aa8b732
AK
2200}
2201
89a27f4d 2202static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
6aa8b732 2203{
a2fa3e9f
GH
2204 struct vcpu_svm *svm = to_svm(vcpu);
2205
89a27f4d
GN
2206 svm->vmcb->save.gdtr.limit = dt->size;
2207 svm->vmcb->save.gdtr.base = dt->address ;
17a703cb 2208 mark_dirty(svm->vmcb, VMCB_DT);
6aa8b732
AK
2209}
2210
e8467fda
AK
2211static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2212{
2213}
2214
aff48baa
AK
2215static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2216{
2217}
2218
25c4c276 2219static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
399badf3
AK
2220{
2221}
2222
d225157b
AK
2223static void update_cr0_intercept(struct vcpu_svm *svm)
2224{
2225 ulong gcr0 = svm->vcpu.arch.cr0;
2226 u64 *hcr0 = &svm->vmcb->save.cr0;
2227
bd7e5b08
PB
2228 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2229 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
d225157b 2230
dcca1a65 2231 mark_dirty(svm->vmcb, VMCB_CR);
d225157b 2232
bd7e5b08 2233 if (gcr0 == *hcr0) {
4ee546b4
RJ
2234 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2235 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
d225157b 2236 } else {
4ee546b4
RJ
2237 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2238 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
d225157b
AK
2239 }
2240}
2241
6aa8b732
AK
2242static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2243{
a2fa3e9f
GH
2244 struct vcpu_svm *svm = to_svm(vcpu);
2245
05b3e0c2 2246#ifdef CONFIG_X86_64
f6801dff 2247 if (vcpu->arch.efer & EFER_LME) {
707d92fa 2248 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
f6801dff 2249 vcpu->arch.efer |= EFER_LMA;
2b5203ee 2250 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
6aa8b732
AK
2251 }
2252
d77c26fc 2253 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
f6801dff 2254 vcpu->arch.efer &= ~EFER_LMA;
2b5203ee 2255 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
6aa8b732
AK
2256 }
2257 }
2258#endif
ad312c7c 2259 vcpu->arch.cr0 = cr0;
888f9f3e
AK
2260
2261 if (!npt_enabled)
2262 cr0 |= X86_CR0_PG | X86_CR0_WP;
02daab21 2263
bcf166a9
PB
2264 /*
2265 * re-enable caching here because the QEMU bios
2266 * does not do it - this results in some delay at
2267 * reboot
2268 */
2269 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2270 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
a2fa3e9f 2271 svm->vmcb->save.cr0 = cr0;
dcca1a65 2272 mark_dirty(svm->vmcb, VMCB_CR);
d225157b 2273 update_cr0_intercept(svm);
6aa8b732
AK
2274}
2275
5e1746d6 2276static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
6aa8b732 2277{
1e02ce4c 2278 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
e5eab0ce
JR
2279 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2280
5e1746d6
NHE
2281 if (cr4 & X86_CR4_VMXE)
2282 return 1;
2283
e5eab0ce 2284 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
f40f6a45 2285 svm_flush_tlb(vcpu);
6394b649 2286
ec077263
JR
2287 vcpu->arch.cr4 = cr4;
2288 if (!npt_enabled)
2289 cr4 |= X86_CR4_PAE;
6394b649 2290 cr4 |= host_cr4_mce;
ec077263 2291 to_svm(vcpu)->vmcb->save.cr4 = cr4;
dcca1a65 2292 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
5e1746d6 2293 return 0;
6aa8b732
AK
2294}
2295
2296static void svm_set_segment(struct kvm_vcpu *vcpu,
2297 struct kvm_segment *var, int seg)
2298{
a2fa3e9f 2299 struct vcpu_svm *svm = to_svm(vcpu);
6aa8b732
AK
2300 struct vmcb_seg *s = svm_seg(vcpu, seg);
2301
2302 s->base = var->base;
2303 s->limit = var->limit;
2304 s->selector = var->selector;
d9c1b543
RP
2305 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2306 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2307 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2308 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2309 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2310 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2311 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2312 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
ae9fedc7
PB
2313
2314 /*
2315 * This is always accurate, except if SYSRET returned to a segment
2316 * with SS.DPL != 3. Intel does not have this quirk, and always
2317 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2318 * would entail passing the CPL to userspace and back.
2319 */
2320 if (seg == VCPU_SREG_SS)
d9c1b543
RP
2321 /* This is symmetric with svm_get_segment() */
2322 svm->vmcb->save.cpl = (var->dpl & 3);
6aa8b732 2323
060d0c9a 2324 mark_dirty(svm->vmcb, VMCB_SEG);
6aa8b732
AK
2325}
2326
cbdb967a 2327static void update_bp_intercept(struct kvm_vcpu *vcpu)
6aa8b732 2328{
d0bfb940
JK
2329 struct vcpu_svm *svm = to_svm(vcpu);
2330
18c918c5 2331 clr_exception_intercept(svm, BP_VECTOR);
44c11430 2332
d0bfb940 2333 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
d0bfb940 2334 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
18c918c5 2335 set_exception_intercept(svm, BP_VECTOR);
d0bfb940
JK
2336 } else
2337 vcpu->guest_debug = 0;
44c11430
GN
2338}
2339
0fe1e009 2340static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
6aa8b732 2341{
0fe1e009
TH
2342 if (sd->next_asid > sd->max_asid) {
2343 ++sd->asid_generation;
4faefff3 2344 sd->next_asid = sd->min_asid;
a2fa3e9f 2345 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
6aa8b732
AK
2346 }
2347
0fe1e009
TH
2348 svm->asid_generation = sd->asid_generation;
2349 svm->vmcb->control.asid = sd->next_asid++;
d48086d1
JR
2350
2351 mark_dirty(svm->vmcb, VMCB_ASID);
6aa8b732
AK
2352}
2353
73aaf249
JK
2354static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2355{
2356 return to_svm(vcpu)->vmcb->save.dr6;
2357}
2358
2359static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2360{
2361 struct vcpu_svm *svm = to_svm(vcpu);
2362
2363 svm->vmcb->save.dr6 = value;
2364 mark_dirty(svm->vmcb, VMCB_DR);
2365}
2366
facb0139
PB
2367static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2368{
2369 struct vcpu_svm *svm = to_svm(vcpu);
2370
2371 get_debugreg(vcpu->arch.db[0], 0);
2372 get_debugreg(vcpu->arch.db[1], 1);
2373 get_debugreg(vcpu->arch.db[2], 2);
2374 get_debugreg(vcpu->arch.db[3], 3);
2375 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2376 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2377
2378 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2379 set_dr_intercepts(svm);
2380}
2381
020df079 2382static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
6aa8b732 2383{
42dbaa5a 2384 struct vcpu_svm *svm = to_svm(vcpu);
42dbaa5a 2385
020df079 2386 svm->vmcb->save.dr7 = value;
72214b96 2387 mark_dirty(svm->vmcb, VMCB_DR);
6aa8b732
AK
2388}
2389
851ba692 2390static int pf_interception(struct vcpu_svm *svm)
6aa8b732 2391{
631bc487 2392 u64 fault_address = svm->vmcb->control.exit_info_2;
1261bfa3 2393 u64 error_code = svm->vmcb->control.exit_info_1;
6aa8b732 2394
1261bfa3 2395 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
dc25e89e 2396 svm->vmcb->control.insn_bytes,
d0006530
PB
2397 svm->vmcb->control.insn_len);
2398}
2399
2400static int npf_interception(struct vcpu_svm *svm)
2401{
2402 u64 fault_address = svm->vmcb->control.exit_info_2;
2403 u64 error_code = svm->vmcb->control.exit_info_1;
2404
2405 trace_kvm_page_fault(fault_address, error_code);
2406 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2407 svm->vmcb->control.insn_bytes,
2408 svm->vmcb->control.insn_len);
6aa8b732
AK
2409}
2410
851ba692 2411static int db_interception(struct vcpu_svm *svm)
d0bfb940 2412{
851ba692
AK
2413 struct kvm_run *kvm_run = svm->vcpu.run;
2414
d0bfb940 2415 if (!(svm->vcpu.guest_debug &
44c11430 2416 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
6be7d306 2417 !svm->nmi_singlestep) {
d0bfb940
JK
2418 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2419 return 1;
2420 }
44c11430 2421
6be7d306 2422 if (svm->nmi_singlestep) {
4aebd0e9 2423 disable_nmi_singlestep(svm);
44c11430
GN
2424 }
2425
2426 if (svm->vcpu.guest_debug &
e0231715 2427 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
44c11430
GN
2428 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2429 kvm_run->debug.arch.pc =
2430 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2431 kvm_run->debug.arch.exception = DB_VECTOR;
2432 return 0;
2433 }
2434
2435 return 1;
d0bfb940
JK
2436}
2437
851ba692 2438static int bp_interception(struct vcpu_svm *svm)
d0bfb940 2439{
851ba692
AK
2440 struct kvm_run *kvm_run = svm->vcpu.run;
2441
d0bfb940
JK
2442 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2443 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2444 kvm_run->debug.arch.exception = BP_VECTOR;
2445 return 0;
2446}
2447
851ba692 2448static int ud_interception(struct vcpu_svm *svm)
7aa81cc0
AL
2449{
2450 int er;
2451
51d8b661 2452 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
7aa81cc0 2453 if (er != EMULATE_DONE)
7ee5d940 2454 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
7aa81cc0
AL
2455 return 1;
2456}
2457
54a20552
EN
2458static int ac_interception(struct vcpu_svm *svm)
2459{
2460 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2461 return 1;
2462}
2463
67ec6607
JR
2464static bool is_erratum_383(void)
2465{
2466 int err, i;
2467 u64 value;
2468
2469 if (!erratum_383_found)
2470 return false;
2471
2472 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2473 if (err)
2474 return false;
2475
2476 /* Bit 62 may or may not be set for this mce */
2477 value &= ~(1ULL << 62);
2478
2479 if (value != 0xb600000000010015ULL)
2480 return false;
2481
2482 /* Clear MCi_STATUS registers */
2483 for (i = 0; i < 6; ++i)
2484 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2485
2486 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2487 if (!err) {
2488 u32 low, high;
2489
2490 value &= ~(1ULL << 2);
2491 low = lower_32_bits(value);
2492 high = upper_32_bits(value);
2493
2494 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2495 }
2496
2497 /* Flush tlb to evict multi-match entries */
2498 __flush_tlb_all();
2499
2500 return true;
2501}
2502
fe5913e4 2503static void svm_handle_mce(struct vcpu_svm *svm)
53371b50 2504{
67ec6607
JR
2505 if (is_erratum_383()) {
2506 /*
2507 * Erratum 383 triggered. Guest state is corrupt so kill the
2508 * guest.
2509 */
2510 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2511
a8eeb04a 2512 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
67ec6607
JR
2513
2514 return;
2515 }
2516
53371b50
JR
2517 /*
2518 * On an #MC intercept the MCE handler is not called automatically in
2519 * the host. So do it by hand here.
2520 */
2521 asm volatile (
2522 "int $0x12\n");
2523 /* not sure if we ever come back to this point */
2524
fe5913e4
JR
2525 return;
2526}
2527
2528static int mc_interception(struct vcpu_svm *svm)
2529{
53371b50
JR
2530 return 1;
2531}
2532
851ba692 2533static int shutdown_interception(struct vcpu_svm *svm)
46fe4ddd 2534{
851ba692
AK
2535 struct kvm_run *kvm_run = svm->vcpu.run;
2536
46fe4ddd
JR
2537 /*
2538 * VMCB is undefined after a SHUTDOWN intercept
2539 * so reinitialize it.
2540 */
a2fa3e9f 2541 clear_page(svm->vmcb);
5690891b 2542 init_vmcb(svm);
46fe4ddd
JR
2543
2544 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2545 return 0;
2546}
2547
851ba692 2548static int io_interception(struct vcpu_svm *svm)
6aa8b732 2549{
cf8f70bf 2550 struct kvm_vcpu *vcpu = &svm->vcpu;
d77c26fc 2551 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
b742c1e6 2552 int size, in, string, ret;
039576c0 2553 unsigned port;
6aa8b732 2554
e756fc62 2555 ++svm->vcpu.stat.io_exits;
e70669ab 2556 string = (io_info & SVM_IOIO_STR_MASK) != 0;
039576c0 2557 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
8370c3d0 2558 if (string)
51d8b661 2559 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
cf8f70bf 2560
039576c0
AK
2561 port = io_info >> 16;
2562 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
cf8f70bf 2563 svm->next_rip = svm->vmcb->control.exit_info_2;
b742c1e6 2564 ret = kvm_skip_emulated_instruction(&svm->vcpu);
cf8f70bf 2565
b742c1e6
LP
2566 /*
2567 * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
2568 * KVM_EXIT_DEBUG here.
2569 */
2570 if (in)
2571 return kvm_fast_pio_in(vcpu, size, port) && ret;
2572 else
2573 return kvm_fast_pio_out(vcpu, size, port) && ret;
6aa8b732
AK
2574}
2575
851ba692 2576static int nmi_interception(struct vcpu_svm *svm)
c47f098d
JR
2577{
2578 return 1;
2579}
2580
851ba692 2581static int intr_interception(struct vcpu_svm *svm)
a0698055
JR
2582{
2583 ++svm->vcpu.stat.irq_exits;
2584 return 1;
2585}
2586
851ba692 2587static int nop_on_interception(struct vcpu_svm *svm)
6aa8b732
AK
2588{
2589 return 1;
2590}
2591
851ba692 2592static int halt_interception(struct vcpu_svm *svm)
6aa8b732 2593{
5fdbf976 2594 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
e756fc62 2595 return kvm_emulate_halt(&svm->vcpu);
6aa8b732
AK
2596}
2597
851ba692 2598static int vmmcall_interception(struct vcpu_svm *svm)
02e235bc 2599{
5fdbf976 2600 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
0d9c055e 2601 return kvm_emulate_hypercall(&svm->vcpu);
02e235bc
AK
2602}
2603
5bd2edc3
JR
2604static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2605{
2606 struct vcpu_svm *svm = to_svm(vcpu);
2607
2608 return svm->nested.nested_cr3;
2609}
2610
e4e517b4
AK
2611static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2612{
2613 struct vcpu_svm *svm = to_svm(vcpu);
2614 u64 cr3 = svm->nested.nested_cr3;
2615 u64 pdpte;
2616 int ret;
2617
d0ec49d4 2618 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
54bf36aa 2619 offset_in_page(cr3) + index * 8, 8);
e4e517b4
AK
2620 if (ret)
2621 return 0;
2622 return pdpte;
2623}
2624
5bd2edc3
JR
2625static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2626 unsigned long root)
2627{
2628 struct vcpu_svm *svm = to_svm(vcpu);
2629
d0ec49d4 2630 svm->vmcb->control.nested_cr3 = __sme_set(root);
b2747166 2631 mark_dirty(svm->vmcb, VMCB_NPT);
f40f6a45 2632 svm_flush_tlb(vcpu);
5bd2edc3
JR
2633}
2634
6389ee94
AK
2635static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2636 struct x86_exception *fault)
5bd2edc3
JR
2637{
2638 struct vcpu_svm *svm = to_svm(vcpu);
2639
5e352519
PB
2640 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2641 /*
2642 * TODO: track the cause of the nested page fault, and
2643 * correctly fill in the high bits of exit_info_1.
2644 */
2645 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2646 svm->vmcb->control.exit_code_hi = 0;
2647 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2648 svm->vmcb->control.exit_info_2 = fault->address;
2649 }
2650
2651 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2652 svm->vmcb->control.exit_info_1 |= fault->error_code;
2653
2654 /*
2655 * The present bit is always zero for page structure faults on real
2656 * hardware.
2657 */
2658 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2659 svm->vmcb->control.exit_info_1 &= ~1;
5bd2edc3
JR
2660
2661 nested_svm_vmexit(svm);
2662}
2663
8a3c1a33 2664static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
4b16184c 2665{
ad896af0
PB
2666 WARN_ON(mmu_is_nested(vcpu));
2667 kvm_init_shadow_mmu(vcpu);
4b16184c
JR
2668 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2669 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
e4e517b4 2670 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
4b16184c 2671 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
855feb67 2672 vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
c258b62b 2673 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
4b16184c 2674 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
4b16184c
JR
2675}
2676
2677static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2678{
2679 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2680}
2681
c0725420
AG
2682static int nested_svm_check_permissions(struct vcpu_svm *svm)
2683{
e9196ceb
DC
2684 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2685 !is_paging(&svm->vcpu)) {
c0725420
AG
2686 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2687 return 1;
2688 }
2689
2690 if (svm->vmcb->save.cpl) {
2691 kvm_inject_gp(&svm->vcpu, 0);
2692 return 1;
2693 }
2694
e9196ceb 2695 return 0;
c0725420
AG
2696}
2697
cf74a78b
AG
2698static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2699 bool has_error_code, u32 error_code)
2700{
b8e88bc8
JR
2701 int vmexit;
2702
2030753d 2703 if (!is_guest_mode(&svm->vcpu))
0295ad7d 2704 return 0;
cf74a78b 2705
adfe20fb
WL
2706 vmexit = nested_svm_intercept(svm);
2707 if (vmexit != NESTED_EXIT_DONE)
2708 return 0;
2709
0295ad7d
JR
2710 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2711 svm->vmcb->control.exit_code_hi = 0;
2712 svm->vmcb->control.exit_info_1 = error_code;
b96fb439
PB
2713
2714 /*
2715 * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
2716 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2717 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
2718 * written only when inject_pending_event runs (DR6 would written here
2719 * too). This should be conditional on a new capability---if the
2720 * capability is disabled, kvm_multiple_exception would write the
2721 * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
2722 */
adfe20fb
WL
2723 if (svm->vcpu.arch.exception.nested_apf)
2724 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2725 else
2726 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
b8e88bc8 2727
adfe20fb 2728 svm->nested.exit_required = true;
b8e88bc8 2729 return vmexit;
cf74a78b
AG
2730}
2731
8fe54654
JR
2732/* This function returns true if it is save to enable the irq window */
2733static inline bool nested_svm_intr(struct vcpu_svm *svm)
cf74a78b 2734{
2030753d 2735 if (!is_guest_mode(&svm->vcpu))
8fe54654 2736 return true;
cf74a78b 2737
26666957 2738 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
8fe54654 2739 return true;
cf74a78b 2740
26666957 2741 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
8fe54654 2742 return false;
cf74a78b 2743
a0a07cd2
GN
2744 /*
2745 * if vmexit was already requested (by intercepted exception
2746 * for instance) do not overwrite it with "external interrupt"
2747 * vmexit.
2748 */
2749 if (svm->nested.exit_required)
2750 return false;
2751
197717d5
JR
2752 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2753 svm->vmcb->control.exit_info_1 = 0;
2754 svm->vmcb->control.exit_info_2 = 0;
26666957 2755
cd3ff653
JR
2756 if (svm->nested.intercept & 1ULL) {
2757 /*
2758 * The #vmexit can't be emulated here directly because this
c5ec2e56 2759 * code path runs with irqs and preemption disabled. A
cd3ff653
JR
2760 * #vmexit emulation might sleep. Only signal request for
2761 * the #vmexit here.
2762 */
2763 svm->nested.exit_required = true;
236649de 2764 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
8fe54654 2765 return false;
cf74a78b
AG
2766 }
2767
8fe54654 2768 return true;
cf74a78b
AG
2769}
2770
887f500c
JR
2771/* This function returns true if it is save to enable the nmi window */
2772static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2773{
2030753d 2774 if (!is_guest_mode(&svm->vcpu))
887f500c
JR
2775 return true;
2776
2777 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2778 return true;
2779
2780 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2781 svm->nested.exit_required = true;
2782
2783 return false;
cf74a78b
AG
2784}
2785
7597f129 2786static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
34f80cfa
JR
2787{
2788 struct page *page;
2789
6c3bd3d7
JR
2790 might_sleep();
2791
54bf36aa 2792 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
34f80cfa
JR
2793 if (is_error_page(page))
2794 goto error;
2795
7597f129
JR
2796 *_page = page;
2797
2798 return kmap(page);
34f80cfa
JR
2799
2800error:
34f80cfa
JR
2801 kvm_inject_gp(&svm->vcpu, 0);
2802
2803 return NULL;
2804}
2805
7597f129 2806static void nested_svm_unmap(struct page *page)
34f80cfa 2807{
7597f129 2808 kunmap(page);
34f80cfa
JR
2809 kvm_release_page_dirty(page);
2810}
34f80cfa 2811
ce2ac085
JR
2812static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
2813{
9bf41833
JK
2814 unsigned port, size, iopm_len;
2815 u16 val, mask;
2816 u8 start_bit;
ce2ac085 2817 u64 gpa;
34f80cfa 2818
ce2ac085
JR
2819 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2820 return NESTED_EXIT_HOST;
34f80cfa 2821
ce2ac085 2822 port = svm->vmcb->control.exit_info_1 >> 16;
9bf41833
JK
2823 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2824 SVM_IOIO_SIZE_SHIFT;
ce2ac085 2825 gpa = svm->nested.vmcb_iopm + (port / 8);
9bf41833
JK
2826 start_bit = port % 8;
2827 iopm_len = (start_bit + size > 8) ? 2 : 1;
2828 mask = (0xf >> (4 - size)) << start_bit;
2829 val = 0;
ce2ac085 2830
54bf36aa 2831 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
9bf41833 2832 return NESTED_EXIT_DONE;
ce2ac085 2833
9bf41833 2834 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
34f80cfa
JR
2835}
2836
d2477826 2837static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
4c2161ae 2838{
0d6b3537
JR
2839 u32 offset, msr, value;
2840 int write, mask;
4c2161ae 2841
3d62d9aa 2842 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
d2477826 2843 return NESTED_EXIT_HOST;
3d62d9aa 2844
0d6b3537
JR
2845 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2846 offset = svm_msrpm_offset(msr);
2847 write = svm->vmcb->control.exit_info_1 & 1;
2848 mask = 1 << ((2 * (msr & 0xf)) + write);
3d62d9aa 2849
0d6b3537
JR
2850 if (offset == MSR_INVALID)
2851 return NESTED_EXIT_DONE;
4c2161ae 2852
0d6b3537
JR
2853 /* Offset is in 32 bit units but need in 8 bit units */
2854 offset *= 4;
4c2161ae 2855
54bf36aa 2856 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
0d6b3537 2857 return NESTED_EXIT_DONE;
3d62d9aa 2858
0d6b3537 2859 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
4c2161ae
JR
2860}
2861
ab2f4d73
LP
2862/* DB exceptions for our internal use must not cause vmexit */
2863static int nested_svm_intercept_db(struct vcpu_svm *svm)
2864{
2865 unsigned long dr6;
2866
2867 /* if we're not singlestepping, it's not ours */
2868 if (!svm->nmi_singlestep)
2869 return NESTED_EXIT_DONE;
2870
2871 /* if it's not a singlestep exception, it's not ours */
2872 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
2873 return NESTED_EXIT_DONE;
2874 if (!(dr6 & DR6_BS))
2875 return NESTED_EXIT_DONE;
2876
2877 /* if the guest is singlestepping, it should get the vmexit */
2878 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
2879 disable_nmi_singlestep(svm);
2880 return NESTED_EXIT_DONE;
2881 }
2882
2883 /* it's ours, the nested hypervisor must not see this one */
2884 return NESTED_EXIT_HOST;
2885}
2886
410e4d57 2887static int nested_svm_exit_special(struct vcpu_svm *svm)
cf74a78b 2888{
cf74a78b 2889 u32 exit_code = svm->vmcb->control.exit_code;
4c2161ae 2890
410e4d57
JR
2891 switch (exit_code) {
2892 case SVM_EXIT_INTR:
2893 case SVM_EXIT_NMI:
ff47a49b 2894 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
410e4d57 2895 return NESTED_EXIT_HOST;
410e4d57 2896 case SVM_EXIT_NPF:
e0231715 2897 /* For now we are always handling NPFs when using them */
410e4d57
JR
2898 if (npt_enabled)
2899 return NESTED_EXIT_HOST;
2900 break;
410e4d57 2901 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
631bc487 2902 /* When we're shadowing, trap PFs, but not async PF */
1261bfa3 2903 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
410e4d57
JR
2904 return NESTED_EXIT_HOST;
2905 break;
2906 default:
2907 break;
cf74a78b
AG
2908 }
2909
410e4d57
JR
2910 return NESTED_EXIT_CONTINUE;
2911}
2912
2913/*
2914 * If this function returns true, this #vmexit was already handled
2915 */
b8e88bc8 2916static int nested_svm_intercept(struct vcpu_svm *svm)
410e4d57
JR
2917{
2918 u32 exit_code = svm->vmcb->control.exit_code;
2919 int vmexit = NESTED_EXIT_HOST;
2920
cf74a78b 2921 switch (exit_code) {
9c4e40b9 2922 case SVM_EXIT_MSR:
3d62d9aa 2923 vmexit = nested_svm_exit_handled_msr(svm);
9c4e40b9 2924 break;
ce2ac085
JR
2925 case SVM_EXIT_IOIO:
2926 vmexit = nested_svm_intercept_ioio(svm);
2927 break;
4ee546b4
RJ
2928 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
2929 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
2930 if (svm->nested.intercept_cr & bit)
410e4d57 2931 vmexit = NESTED_EXIT_DONE;
cf74a78b
AG
2932 break;
2933 }
3aed041a
JR
2934 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
2935 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
2936 if (svm->nested.intercept_dr & bit)
410e4d57 2937 vmexit = NESTED_EXIT_DONE;
cf74a78b
AG
2938 break;
2939 }
2940 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
2941 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
ab2f4d73
LP
2942 if (svm->nested.intercept_exceptions & excp_bits) {
2943 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
2944 vmexit = nested_svm_intercept_db(svm);
2945 else
2946 vmexit = NESTED_EXIT_DONE;
2947 }
631bc487
GN
2948 /* async page fault always cause vmexit */
2949 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
adfe20fb 2950 svm->vcpu.arch.exception.nested_apf != 0)
631bc487 2951 vmexit = NESTED_EXIT_DONE;
cf74a78b
AG
2952 break;
2953 }
228070b1
JR
2954 case SVM_EXIT_ERR: {
2955 vmexit = NESTED_EXIT_DONE;
2956 break;
2957 }
cf74a78b
AG
2958 default: {
2959 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
aad42c64 2960 if (svm->nested.intercept & exit_bits)
410e4d57 2961 vmexit = NESTED_EXIT_DONE;
cf74a78b
AG
2962 }
2963 }
2964
b8e88bc8
JR
2965 return vmexit;
2966}
2967
2968static int nested_svm_exit_handled(struct vcpu_svm *svm)
2969{
2970 int vmexit;
2971
2972 vmexit = nested_svm_intercept(svm);
2973
2974 if (vmexit == NESTED_EXIT_DONE)
9c4e40b9 2975 nested_svm_vmexit(svm);
9c4e40b9
JR
2976
2977 return vmexit;
cf74a78b
AG
2978}
2979
0460a979
JR
2980static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
2981{
2982 struct vmcb_control_area *dst = &dst_vmcb->control;
2983 struct vmcb_control_area *from = &from_vmcb->control;
2984
4ee546b4 2985 dst->intercept_cr = from->intercept_cr;
3aed041a 2986 dst->intercept_dr = from->intercept_dr;
0460a979
JR
2987 dst->intercept_exceptions = from->intercept_exceptions;
2988 dst->intercept = from->intercept;
2989 dst->iopm_base_pa = from->iopm_base_pa;
2990 dst->msrpm_base_pa = from->msrpm_base_pa;
2991 dst->tsc_offset = from->tsc_offset;
2992 dst->asid = from->asid;
2993 dst->tlb_ctl = from->tlb_ctl;
2994 dst->int_ctl = from->int_ctl;
2995 dst->int_vector = from->int_vector;
2996 dst->int_state = from->int_state;
2997 dst->exit_code = from->exit_code;
2998 dst->exit_code_hi = from->exit_code_hi;
2999 dst->exit_info_1 = from->exit_info_1;
3000 dst->exit_info_2 = from->exit_info_2;
3001 dst->exit_int_info = from->exit_int_info;
3002 dst->exit_int_info_err = from->exit_int_info_err;
3003 dst->nested_ctl = from->nested_ctl;
3004 dst->event_inj = from->event_inj;
3005 dst->event_inj_err = from->event_inj_err;
3006 dst->nested_cr3 = from->nested_cr3;
0dc92119 3007 dst->virt_ext = from->virt_ext;
0460a979
JR
3008}
3009
34f80cfa 3010static int nested_svm_vmexit(struct vcpu_svm *svm)
cf74a78b 3011{
34f80cfa 3012 struct vmcb *nested_vmcb;
e6aa9abd 3013 struct vmcb *hsave = svm->nested.hsave;
33740e40 3014 struct vmcb *vmcb = svm->vmcb;
7597f129 3015 struct page *page;
cf74a78b 3016
17897f36
JR
3017 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3018 vmcb->control.exit_info_1,
3019 vmcb->control.exit_info_2,
3020 vmcb->control.exit_int_info,
e097e5ff
SH
3021 vmcb->control.exit_int_info_err,
3022 KVM_ISA_SVM);
17897f36 3023
7597f129 3024 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
34f80cfa
JR
3025 if (!nested_vmcb)
3026 return 1;
3027
2030753d
JR
3028 /* Exit Guest-Mode */
3029 leave_guest_mode(&svm->vcpu);
06fc7772
JR
3030 svm->nested.vmcb = 0;
3031
cf74a78b 3032 /* Give the current vmcb to the guest */
33740e40
JR
3033 disable_gif(svm);
3034
3035 nested_vmcb->save.es = vmcb->save.es;
3036 nested_vmcb->save.cs = vmcb->save.cs;
3037 nested_vmcb->save.ss = vmcb->save.ss;
3038 nested_vmcb->save.ds = vmcb->save.ds;
3039 nested_vmcb->save.gdtr = vmcb->save.gdtr;
3040 nested_vmcb->save.idtr = vmcb->save.idtr;
3f6a9d16 3041 nested_vmcb->save.efer = svm->vcpu.arch.efer;
cdbbdc12 3042 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
9f8fe504 3043 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
33740e40 3044 nested_vmcb->save.cr2 = vmcb->save.cr2;
cdbbdc12 3045 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
f6e78475 3046 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
33740e40
JR
3047 nested_vmcb->save.rip = vmcb->save.rip;
3048 nested_vmcb->save.rsp = vmcb->save.rsp;
3049 nested_vmcb->save.rax = vmcb->save.rax;
3050 nested_vmcb->save.dr7 = vmcb->save.dr7;
3051 nested_vmcb->save.dr6 = vmcb->save.dr6;
3052 nested_vmcb->save.cpl = vmcb->save.cpl;
3053
3054 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
3055 nested_vmcb->control.int_vector = vmcb->control.int_vector;
3056 nested_vmcb->control.int_state = vmcb->control.int_state;
3057 nested_vmcb->control.exit_code = vmcb->control.exit_code;
3058 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
3059 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
3060 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
3061 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
3062 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
6092d3d3
JR
3063
3064 if (svm->nrips_enabled)
3065 nested_vmcb->control.next_rip = vmcb->control.next_rip;
8d23c466
AG
3066
3067 /*
3068 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3069 * to make sure that we do not lose injected events. So check event_inj
3070 * here and copy it to exit_int_info if it is valid.
3071 * Exit_int_info and event_inj can't be both valid because the case
3072 * below only happens on a VMRUN instruction intercept which has
3073 * no valid exit_int_info set.
3074 */
3075 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3076 struct vmcb_control_area *nc = &nested_vmcb->control;
3077
3078 nc->exit_int_info = vmcb->control.event_inj;
3079 nc->exit_int_info_err = vmcb->control.event_inj_err;
3080 }
3081
33740e40
JR
3082 nested_vmcb->control.tlb_ctl = 0;
3083 nested_vmcb->control.event_inj = 0;
3084 nested_vmcb->control.event_inj_err = 0;
cf74a78b
AG
3085
3086 /* We always set V_INTR_MASKING and remember the old value in hflags */
3087 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3088 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3089
cf74a78b 3090 /* Restore the original control entries */
0460a979 3091 copy_vmcb_control_area(vmcb, hsave);
cf74a78b 3092
219b65dc
AG
3093 kvm_clear_exception_queue(&svm->vcpu);
3094 kvm_clear_interrupt_queue(&svm->vcpu);
cf74a78b 3095
4b16184c
JR
3096 svm->nested.nested_cr3 = 0;
3097
cf74a78b
AG
3098 /* Restore selected save entries */
3099 svm->vmcb->save.es = hsave->save.es;
3100 svm->vmcb->save.cs = hsave->save.cs;
3101 svm->vmcb->save.ss = hsave->save.ss;
3102 svm->vmcb->save.ds = hsave->save.ds;
3103 svm->vmcb->save.gdtr = hsave->save.gdtr;
3104 svm->vmcb->save.idtr = hsave->save.idtr;
f6e78475 3105 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
cf74a78b
AG
3106 svm_set_efer(&svm->vcpu, hsave->save.efer);
3107 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3108 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3109 if (npt_enabled) {
3110 svm->vmcb->save.cr3 = hsave->save.cr3;
3111 svm->vcpu.arch.cr3 = hsave->save.cr3;
3112 } else {
2390218b 3113 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
cf74a78b
AG
3114 }
3115 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3116 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
3117 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
3118 svm->vmcb->save.dr7 = 0;
3119 svm->vmcb->save.cpl = 0;
3120 svm->vmcb->control.exit_int_info = 0;
3121
8d28fec4
RJ
3122 mark_all_dirty(svm->vmcb);
3123
7597f129 3124 nested_svm_unmap(page);
cf74a78b 3125
4b16184c 3126 nested_svm_uninit_mmu_context(&svm->vcpu);
cf74a78b
AG
3127 kvm_mmu_reset_context(&svm->vcpu);
3128 kvm_mmu_load(&svm->vcpu);
3129
3130 return 0;
3131}
3d6368ef 3132
9738b2c9 3133static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
3d6368ef 3134{
323c3d80
JR
3135 /*
3136 * This function merges the msr permission bitmaps of kvm and the
c5ec2e56 3137 * nested vmcb. It is optimized in that it only merges the parts where
323c3d80
JR
3138 * the kvm msr permission bitmap may contain zero bits
3139 */
3d6368ef 3140 int i;
9738b2c9 3141
323c3d80
JR
3142 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3143 return true;
9738b2c9 3144
323c3d80
JR
3145 for (i = 0; i < MSRPM_OFFSETS; i++) {
3146 u32 value, p;
3147 u64 offset;
9738b2c9 3148
323c3d80
JR
3149 if (msrpm_offsets[i] == 0xffffffff)
3150 break;
3d6368ef 3151
0d6b3537
JR
3152 p = msrpm_offsets[i];
3153 offset = svm->nested.vmcb_msrpm + (p * 4);
323c3d80 3154
54bf36aa 3155 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
323c3d80
JR
3156 return false;
3157
3158 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3159 }
3d6368ef 3160
d0ec49d4 3161 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
9738b2c9
JR
3162
3163 return true;
3d6368ef
AG
3164}
3165
52c65a30
JR
3166static bool nested_vmcb_checks(struct vmcb *vmcb)
3167{
3168 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3169 return false;
3170
dbe77584
JR
3171 if (vmcb->control.asid == 0)
3172 return false;
3173
cea3a19b
TL
3174 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3175 !npt_enabled)
4b16184c
JR
3176 return false;
3177
52c65a30
JR
3178 return true;
3179}
3180
c2634065
LP
3181static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3182 struct vmcb *nested_vmcb, struct page *page)
3d6368ef 3183{
f6e78475 3184 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
3d6368ef
AG
3185 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3186 else
3187 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3188
cea3a19b 3189 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
4b16184c
JR
3190 kvm_mmu_unload(&svm->vcpu);
3191 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3192 nested_svm_init_mmu_context(&svm->vcpu);
3193 }
3194
3d6368ef
AG
3195 /* Load the nested guest state */
3196 svm->vmcb->save.es = nested_vmcb->save.es;
3197 svm->vmcb->save.cs = nested_vmcb->save.cs;
3198 svm->vmcb->save.ss = nested_vmcb->save.ss;
3199 svm->vmcb->save.ds = nested_vmcb->save.ds;
3200 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3201 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
f6e78475 3202 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
3d6368ef
AG
3203 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3204 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3205 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3206 if (npt_enabled) {
3207 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3208 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
0e5cbe36 3209 } else
2390218b 3210 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
0e5cbe36
JR
3211
3212 /* Guest paging mode is active - reset mmu */
3213 kvm_mmu_reset_context(&svm->vcpu);
3214
defbba56 3215 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
3d6368ef
AG
3216 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3217 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
3218 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
e0231715 3219
3d6368ef
AG
3220 /* In case we don't even reach vcpu_run, the fields are not updated */
3221 svm->vmcb->save.rax = nested_vmcb->save.rax;
3222 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3223 svm->vmcb->save.rip = nested_vmcb->save.rip;
3224 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3225 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3226 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3227
f7138538 3228 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
ce2ac085 3229 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
3d6368ef 3230
aad42c64 3231 /* cache intercepts */
4ee546b4 3232 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
3aed041a 3233 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
aad42c64
JR
3234 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3235 svm->nested.intercept = nested_vmcb->control.intercept;
3236
f40f6a45 3237 svm_flush_tlb(&svm->vcpu);
3d6368ef 3238 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3d6368ef
AG
3239 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3240 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3241 else
3242 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3243
88ab24ad
JR
3244 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3245 /* We only want the cr8 intercept bits of the guest */
4ee546b4
RJ
3246 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3247 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
88ab24ad
JR
3248 }
3249
0d945bd9 3250 /* We don't want to see VMMCALLs from a nested guest */
8a05a1b8 3251 clr_intercept(svm, INTERCEPT_VMMCALL);
0d945bd9 3252
0dc92119 3253 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3d6368ef
AG
3254 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3255 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3256 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
3d6368ef
AG
3257 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3258 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3259
7597f129 3260 nested_svm_unmap(page);
9738b2c9 3261
2030753d
JR
3262 /* Enter Guest-Mode */
3263 enter_guest_mode(&svm->vcpu);
3264
384c6368
JR
3265 /*
3266 * Merge guest and host intercepts - must be called with vcpu in
3267 * guest-mode to take affect here
3268 */
3269 recalc_intercepts(svm);
3270
06fc7772 3271 svm->nested.vmcb = vmcb_gpa;
9738b2c9 3272
2af9194d 3273 enable_gif(svm);
3d6368ef 3274
8d28fec4 3275 mark_all_dirty(svm->vmcb);
c2634065
LP
3276}
3277
3278static bool nested_svm_vmrun(struct vcpu_svm *svm)
3279{
3280 struct vmcb *nested_vmcb;
3281 struct vmcb *hsave = svm->nested.hsave;
3282 struct vmcb *vmcb = svm->vmcb;
3283 struct page *page;
3284 u64 vmcb_gpa;
3285
3286 vmcb_gpa = svm->vmcb->save.rax;
3287
3288 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3289 if (!nested_vmcb)
3290 return false;
3291
3292 if (!nested_vmcb_checks(nested_vmcb)) {
3293 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
3294 nested_vmcb->control.exit_code_hi = 0;
3295 nested_vmcb->control.exit_info_1 = 0;
3296 nested_vmcb->control.exit_info_2 = 0;
3297
3298 nested_svm_unmap(page);
3299
3300 return false;
3301 }
3302
3303 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3304 nested_vmcb->save.rip,
3305 nested_vmcb->control.int_ctl,
3306 nested_vmcb->control.event_inj,
3307 nested_vmcb->control.nested_ctl);
3308
3309 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3310 nested_vmcb->control.intercept_cr >> 16,
3311 nested_vmcb->control.intercept_exceptions,
3312 nested_vmcb->control.intercept);
3313
3314 /* Clear internal status */
3315 kvm_clear_exception_queue(&svm->vcpu);
3316 kvm_clear_interrupt_queue(&svm->vcpu);
3317
3318 /*
3319 * Save the old vmcb, so we don't need to pick what we save, but can
3320 * restore everything when a VMEXIT occurs
3321 */
3322 hsave->save.es = vmcb->save.es;
3323 hsave->save.cs = vmcb->save.cs;
3324 hsave->save.ss = vmcb->save.ss;
3325 hsave->save.ds = vmcb->save.ds;
3326 hsave->save.gdtr = vmcb->save.gdtr;
3327 hsave->save.idtr = vmcb->save.idtr;
3328 hsave->save.efer = svm->vcpu.arch.efer;
3329 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
3330 hsave->save.cr4 = svm->vcpu.arch.cr4;
3331 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3332 hsave->save.rip = kvm_rip_read(&svm->vcpu);
3333 hsave->save.rsp = vmcb->save.rsp;
3334 hsave->save.rax = vmcb->save.rax;
3335 if (npt_enabled)
3336 hsave->save.cr3 = vmcb->save.cr3;
3337 else
3338 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
3339
3340 copy_vmcb_control_area(hsave, vmcb);
3341
3342 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
8d28fec4 3343
9738b2c9 3344 return true;
3d6368ef
AG
3345}
3346
9966bf68 3347static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
5542675b
AG
3348{
3349 to_vmcb->save.fs = from_vmcb->save.fs;
3350 to_vmcb->save.gs = from_vmcb->save.gs;
3351 to_vmcb->save.tr = from_vmcb->save.tr;
3352 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3353 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3354 to_vmcb->save.star = from_vmcb->save.star;
3355 to_vmcb->save.lstar = from_vmcb->save.lstar;
3356 to_vmcb->save.cstar = from_vmcb->save.cstar;
3357 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3358 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3359 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3360 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
5542675b
AG
3361}
3362
851ba692 3363static int vmload_interception(struct vcpu_svm *svm)
5542675b 3364{
9966bf68 3365 struct vmcb *nested_vmcb;
7597f129 3366 struct page *page;
b742c1e6 3367 int ret;
9966bf68 3368
5542675b
AG
3369 if (nested_svm_check_permissions(svm))
3370 return 1;
3371
7597f129 3372 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
9966bf68
JR
3373 if (!nested_vmcb)
3374 return 1;
3375
e3e9ed3d 3376 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
b742c1e6 3377 ret = kvm_skip_emulated_instruction(&svm->vcpu);
e3e9ed3d 3378
9966bf68 3379 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
7597f129 3380 nested_svm_unmap(page);
5542675b 3381
b742c1e6 3382 return ret;
5542675b
AG
3383}
3384
851ba692 3385static int vmsave_interception(struct vcpu_svm *svm)
5542675b 3386{
9966bf68 3387 struct vmcb *nested_vmcb;
7597f129 3388 struct page *page;
b742c1e6 3389 int ret;
9966bf68 3390
5542675b
AG
3391 if (nested_svm_check_permissions(svm))
3392 return 1;
3393
7597f129 3394 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
9966bf68
JR
3395 if (!nested_vmcb)
3396 return 1;
3397
e3e9ed3d 3398 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
b742c1e6 3399 ret = kvm_skip_emulated_instruction(&svm->vcpu);
e3e9ed3d 3400
9966bf68 3401 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
7597f129 3402 nested_svm_unmap(page);
5542675b 3403
b742c1e6 3404 return ret;
5542675b
AG
3405}
3406
851ba692 3407static int vmrun_interception(struct vcpu_svm *svm)
3d6368ef 3408{
3d6368ef
AG
3409 if (nested_svm_check_permissions(svm))
3410 return 1;
3411
b75f4eb3
RJ
3412 /* Save rip after vmrun instruction */
3413 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
3d6368ef 3414
9738b2c9 3415 if (!nested_svm_vmrun(svm))
3d6368ef
AG
3416 return 1;
3417
9738b2c9 3418 if (!nested_svm_vmrun_msrpm(svm))
1f8da478
JR
3419 goto failed;
3420
3421 return 1;
3422
3423failed:
3424
3425 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3426 svm->vmcb->control.exit_code_hi = 0;
3427 svm->vmcb->control.exit_info_1 = 0;
3428 svm->vmcb->control.exit_info_2 = 0;
3429
3430 nested_svm_vmexit(svm);
3d6368ef
AG
3431
3432 return 1;
3433}
3434
851ba692 3435static int stgi_interception(struct vcpu_svm *svm)
1371d904 3436{
b742c1e6
LP
3437 int ret;
3438
1371d904
AG
3439 if (nested_svm_check_permissions(svm))
3440 return 1;
3441
640bd6e5
JN
3442 /*
3443 * If VGIF is enabled, the STGI intercept is only added to
cc3d967f 3444 * detect the opening of the SMI/NMI window; remove it now.
640bd6e5
JN
3445 */
3446 if (vgif_enabled(svm))
3447 clr_intercept(svm, INTERCEPT_STGI);
3448
1371d904 3449 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
b742c1e6 3450 ret = kvm_skip_emulated_instruction(&svm->vcpu);
3842d135 3451 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
1371d904 3452
2af9194d 3453 enable_gif(svm);
1371d904 3454
b742c1e6 3455 return ret;
1371d904
AG
3456}
3457
851ba692 3458static int clgi_interception(struct vcpu_svm *svm)
1371d904 3459{
b742c1e6
LP
3460 int ret;
3461
1371d904
AG
3462 if (nested_svm_check_permissions(svm))
3463 return 1;
3464
3465 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
b742c1e6 3466 ret = kvm_skip_emulated_instruction(&svm->vcpu);
1371d904 3467
2af9194d 3468 disable_gif(svm);
1371d904
AG
3469
3470 /* After a CLGI no interrupts should come */
340d3bc3
SS
3471 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3472 svm_clear_vintr(svm);
3473 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3474 mark_dirty(svm->vmcb, VMCB_INTR);
3475 }
decdbf6a 3476
b742c1e6 3477 return ret;
1371d904
AG
3478}
3479
851ba692 3480static int invlpga_interception(struct vcpu_svm *svm)
ff092385
AG
3481{
3482 struct kvm_vcpu *vcpu = &svm->vcpu;
ff092385 3483
668f198f
DK
3484 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3485 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
ec1ff790 3486
ff092385 3487 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
668f198f 3488 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
ff092385
AG
3489
3490 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
b742c1e6 3491 return kvm_skip_emulated_instruction(&svm->vcpu);
ff092385
AG
3492}
3493
532a46b9
JR
3494static int skinit_interception(struct vcpu_svm *svm)
3495{
668f198f 3496 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
532a46b9
JR
3497
3498 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3499 return 1;
3500}
3501
dab429a7
DK
3502static int wbinvd_interception(struct vcpu_svm *svm)
3503{
6affcbed 3504 return kvm_emulate_wbinvd(&svm->vcpu);
dab429a7
DK
3505}
3506
81dd35d4
JR
3507static int xsetbv_interception(struct vcpu_svm *svm)
3508{
3509 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3510 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3511
3512 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3513 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
b742c1e6 3514 return kvm_skip_emulated_instruction(&svm->vcpu);
81dd35d4
JR
3515 }
3516
3517 return 1;
3518}
3519
851ba692 3520static int task_switch_interception(struct vcpu_svm *svm)
6aa8b732 3521{
37817f29 3522 u16 tss_selector;
64a7ec06
GN
3523 int reason;
3524 int int_type = svm->vmcb->control.exit_int_info &
3525 SVM_EXITINTINFO_TYPE_MASK;
8317c298 3526 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
fe8e7f83
GN
3527 uint32_t type =
3528 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3529 uint32_t idt_v =
3530 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
e269fb21
JK
3531 bool has_error_code = false;
3532 u32 error_code = 0;
37817f29
IE
3533
3534 tss_selector = (u16)svm->vmcb->control.exit_info_1;
64a7ec06 3535
37817f29
IE
3536 if (svm->vmcb->control.exit_info_2 &
3537 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
64a7ec06
GN
3538 reason = TASK_SWITCH_IRET;
3539 else if (svm->vmcb->control.exit_info_2 &
3540 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3541 reason = TASK_SWITCH_JMP;
fe8e7f83 3542 else if (idt_v)
64a7ec06
GN
3543 reason = TASK_SWITCH_GATE;
3544 else
3545 reason = TASK_SWITCH_CALL;
3546
fe8e7f83
GN
3547 if (reason == TASK_SWITCH_GATE) {
3548 switch (type) {
3549 case SVM_EXITINTINFO_TYPE_NMI:
3550 svm->vcpu.arch.nmi_injected = false;
3551 break;
3552 case SVM_EXITINTINFO_TYPE_EXEPT:
e269fb21
JK
3553 if (svm->vmcb->control.exit_info_2 &
3554 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3555 has_error_code = true;
3556 error_code =
3557 (u32)svm->vmcb->control.exit_info_2;
3558 }
fe8e7f83
GN
3559 kvm_clear_exception_queue(&svm->vcpu);
3560 break;
3561 case SVM_EXITINTINFO_TYPE_INTR:
3562 kvm_clear_interrupt_queue(&svm->vcpu);
3563 break;
3564 default:
3565 break;
3566 }
3567 }
64a7ec06 3568
8317c298
GN
3569 if (reason != TASK_SWITCH_GATE ||
3570 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3571 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
f629cf84
GN
3572 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3573 skip_emulated_instruction(&svm->vcpu);
64a7ec06 3574
7f3d35fd
KW
3575 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3576 int_vec = -1;
3577
3578 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
acb54517
GN
3579 has_error_code, error_code) == EMULATE_FAIL) {
3580 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3581 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3582 svm->vcpu.run->internal.ndata = 0;
3583 return 0;
3584 }
3585 return 1;
6aa8b732
AK
3586}
3587
851ba692 3588static int cpuid_interception(struct vcpu_svm *svm)
6aa8b732 3589{
5fdbf976 3590 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
6a908b62 3591 return kvm_emulate_cpuid(&svm->vcpu);
6aa8b732
AK
3592}
3593
851ba692 3594static int iret_interception(struct vcpu_svm *svm)
95ba8273
GN
3595{
3596 ++svm->vcpu.stat.nmi_window_exits;
8a05a1b8 3597 clr_intercept(svm, INTERCEPT_IRET);
44c11430 3598 svm->vcpu.arch.hflags |= HF_IRET_MASK;
bd3d1ec3 3599 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
f303b4ce 3600 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
95ba8273
GN
3601 return 1;
3602}
3603
851ba692 3604static int invlpg_interception(struct vcpu_svm *svm)
a7052897 3605{
df4f3108
AP
3606 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3607 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3608
3609 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
b742c1e6 3610 return kvm_skip_emulated_instruction(&svm->vcpu);
a7052897
MT
3611}
3612
851ba692 3613static int emulate_on_interception(struct vcpu_svm *svm)
6aa8b732 3614{
51d8b661 3615 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
6aa8b732
AK
3616}
3617
332b56e4
AK
3618static int rdpmc_interception(struct vcpu_svm *svm)
3619{
3620 int err;
3621
3622 if (!static_cpu_has(X86_FEATURE_NRIPS))
3623 return emulate_on_interception(svm);
3624
3625 err = kvm_rdpmc(&svm->vcpu);
6affcbed 3626 return kvm_complete_insn_gp(&svm->vcpu, err);
332b56e4
AK
3627}
3628
52eb5a6d
XL
3629static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3630 unsigned long val)
628afd2a
JR
3631{
3632 unsigned long cr0 = svm->vcpu.arch.cr0;
3633 bool ret = false;
3634 u64 intercept;
3635
3636 intercept = svm->nested.intercept;
3637
3638 if (!is_guest_mode(&svm->vcpu) ||
3639 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3640 return false;
3641
3642 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3643 val &= ~SVM_CR0_SELECTIVE_MASK;
3644
3645 if (cr0 ^ val) {
3646 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3647 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3648 }
3649
3650 return ret;
3651}
3652
7ff76d58
AP
3653#define CR_VALID (1ULL << 63)
3654
3655static int cr_interception(struct vcpu_svm *svm)
3656{
3657 int reg, cr;
3658 unsigned long val;
3659 int err;
3660
3661 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3662 return emulate_on_interception(svm);
3663
3664 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3665 return emulate_on_interception(svm);
3666
3667 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
5e57518d
DK
3668 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3669 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3670 else
3671 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
7ff76d58
AP
3672
3673 err = 0;
3674 if (cr >= 16) { /* mov to cr */
3675 cr -= 16;
3676 val = kvm_register_read(&svm->vcpu, reg);
3677 switch (cr) {
3678 case 0:
628afd2a
JR
3679 if (!check_selective_cr0_intercepted(svm, val))
3680 err = kvm_set_cr0(&svm->vcpu, val);
977b2d03
JR
3681 else
3682 return 1;
3683
7ff76d58
AP
3684 break;
3685 case 3:
3686 err = kvm_set_cr3(&svm->vcpu, val);
3687 break;
3688 case 4:
3689 err = kvm_set_cr4(&svm->vcpu, val);
3690 break;
3691 case 8:
3692 err = kvm_set_cr8(&svm->vcpu, val);
3693 break;
3694 default:
3695 WARN(1, "unhandled write to CR%d", cr);
3696 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3697 return 1;
3698 }
3699 } else { /* mov from cr */
3700 switch (cr) {
3701 case 0:
3702 val = kvm_read_cr0(&svm->vcpu);
3703 break;
3704 case 2:
3705 val = svm->vcpu.arch.cr2;
3706 break;
3707 case 3:
9f8fe504 3708 val = kvm_read_cr3(&svm->vcpu);
7ff76d58
AP
3709 break;
3710 case 4:
3711 val = kvm_read_cr4(&svm->vcpu);
3712 break;
3713 case 8:
3714 val = kvm_get_cr8(&svm->vcpu);
3715 break;
3716 default:
3717 WARN(1, "unhandled read from CR%d", cr);
3718 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3719 return 1;
3720 }
3721 kvm_register_write(&svm->vcpu, reg, val);
3722 }
6affcbed 3723 return kvm_complete_insn_gp(&svm->vcpu, err);
7ff76d58
AP
3724}
3725
cae3797a
AP
3726static int dr_interception(struct vcpu_svm *svm)
3727{
3728 int reg, dr;
3729 unsigned long val;
cae3797a 3730
facb0139
PB
3731 if (svm->vcpu.guest_debug == 0) {
3732 /*
3733 * No more DR vmexits; force a reload of the debug registers
3734 * and reenter on this instruction. The next vmexit will
3735 * retrieve the full state of the debug registers.
3736 */
3737 clr_dr_intercepts(svm);
3738 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3739 return 1;
3740 }
3741
cae3797a
AP
3742 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3743 return emulate_on_interception(svm);
3744
3745 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3746 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3747
3748 if (dr >= 16) { /* mov to DRn */
16f8a6f9
NA
3749 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3750 return 1;
cae3797a
AP
3751 val = kvm_register_read(&svm->vcpu, reg);
3752 kvm_set_dr(&svm->vcpu, dr - 16, val);
3753 } else {
16f8a6f9
NA
3754 if (!kvm_require_dr(&svm->vcpu, dr))
3755 return 1;
3756 kvm_get_dr(&svm->vcpu, dr, &val);
3757 kvm_register_write(&svm->vcpu, reg, val);
cae3797a
AP
3758 }
3759
b742c1e6 3760 return kvm_skip_emulated_instruction(&svm->vcpu);
cae3797a
AP
3761}
3762
851ba692 3763static int cr8_write_interception(struct vcpu_svm *svm)
1d075434 3764{
851ba692 3765 struct kvm_run *kvm_run = svm->vcpu.run;
eea1cff9 3766 int r;
851ba692 3767
0a5fff19
GN
3768 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3769 /* instruction emulation calls kvm_set_cr8() */
7ff76d58 3770 r = cr_interception(svm);
35754c98 3771 if (lapic_in_kernel(&svm->vcpu))
7ff76d58 3772 return r;
0a5fff19 3773 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
7ff76d58 3774 return r;
1d075434
JR
3775 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3776 return 0;
3777}
3778
609e36d3 3779static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
6aa8b732 3780{
a2fa3e9f
GH
3781 struct vcpu_svm *svm = to_svm(vcpu);
3782
609e36d3 3783 switch (msr_info->index) {
af24a4e4 3784 case MSR_IA32_TSC: {
609e36d3 3785 msr_info->data = svm->vmcb->control.tsc_offset +
35181e86 3786 kvm_scale_tsc(vcpu, rdtsc());
fbc0db76 3787
6aa8b732
AK
3788 break;
3789 }
8c06585d 3790 case MSR_STAR:
609e36d3 3791 msr_info->data = svm->vmcb->save.star;
6aa8b732 3792 break;
0e859cac 3793#ifdef CONFIG_X86_64
6aa8b732 3794 case MSR_LSTAR:
609e36d3 3795 msr_info->data = svm->vmcb->save.lstar;
6aa8b732
AK
3796 break;
3797 case MSR_CSTAR:
609e36d3 3798 msr_info->data = svm->vmcb->save.cstar;
6aa8b732
AK
3799 break;
3800 case MSR_KERNEL_GS_BASE:
609e36d3 3801 msr_info->data = svm->vmcb->save.kernel_gs_base;
6aa8b732
AK
3802 break;
3803 case MSR_SYSCALL_MASK:
609e36d3 3804 msr_info->data = svm->vmcb->save.sfmask;
6aa8b732
AK
3805 break;
3806#endif
3807 case MSR_IA32_SYSENTER_CS:
609e36d3 3808 msr_info->data = svm->vmcb->save.sysenter_cs;
6aa8b732
AK
3809 break;
3810 case MSR_IA32_SYSENTER_EIP:
609e36d3 3811 msr_info->data = svm->sysenter_eip;
6aa8b732
AK
3812 break;
3813 case MSR_IA32_SYSENTER_ESP:
609e36d3 3814 msr_info->data = svm->sysenter_esp;
6aa8b732 3815 break;
46896c73
PB
3816 case MSR_TSC_AUX:
3817 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3818 return 1;
3819 msr_info->data = svm->tsc_aux;
3820 break;
e0231715
JR
3821 /*
3822 * Nobody will change the following 5 values in the VMCB so we can
3823 * safely return them on rdmsr. They will always be 0 until LBRV is
3824 * implemented.
3825 */
a2938c80 3826 case MSR_IA32_DEBUGCTLMSR:
609e36d3 3827 msr_info->data = svm->vmcb->save.dbgctl;
a2938c80
JR
3828 break;
3829 case MSR_IA32_LASTBRANCHFROMIP:
609e36d3 3830 msr_info->data = svm->vmcb->save.br_from;
a2938c80
JR
3831 break;
3832 case MSR_IA32_LASTBRANCHTOIP:
609e36d3 3833 msr_info->data = svm->vmcb->save.br_to;
a2938c80
JR
3834 break;
3835 case MSR_IA32_LASTINTFROMIP:
609e36d3 3836 msr_info->data = svm->vmcb->save.last_excp_from;
a2938c80
JR
3837 break;
3838 case MSR_IA32_LASTINTTOIP:
609e36d3 3839 msr_info->data = svm->vmcb->save.last_excp_to;
a2938c80 3840 break;
b286d5d8 3841 case MSR_VM_HSAVE_PA:
609e36d3 3842 msr_info->data = svm->nested.hsave_msr;
b286d5d8 3843 break;
eb6f302e 3844 case MSR_VM_CR:
609e36d3 3845 msr_info->data = svm->nested.vm_cr_msr;
eb6f302e 3846 break;
c8a73f18 3847 case MSR_IA32_UCODE_REV:
609e36d3 3848 msr_info->data = 0x01000065;
c8a73f18 3849 break;
ae8b7875
BP
3850 case MSR_F15H_IC_CFG: {
3851
3852 int family, model;
3853
3854 family = guest_cpuid_family(vcpu);
3855 model = guest_cpuid_model(vcpu);
3856
3857 if (family < 0 || model < 0)
3858 return kvm_get_msr_common(vcpu, msr_info);
3859
3860 msr_info->data = 0;
3861
3862 if (family == 0x15 &&
3863 (model >= 0x2 && model < 0x20))
3864 msr_info->data = 0x1E;
3865 }
3866 break;
6aa8b732 3867 default:
609e36d3 3868 return kvm_get_msr_common(vcpu, msr_info);
6aa8b732
AK
3869 }
3870 return 0;
3871}
3872
851ba692 3873static int rdmsr_interception(struct vcpu_svm *svm)
6aa8b732 3874{
668f198f 3875 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
609e36d3 3876 struct msr_data msr_info;
6aa8b732 3877
609e36d3
PB
3878 msr_info.index = ecx;
3879 msr_info.host_initiated = false;
3880 if (svm_get_msr(&svm->vcpu, &msr_info)) {
59200273 3881 trace_kvm_msr_read_ex(ecx);
c1a5d4f9 3882 kvm_inject_gp(&svm->vcpu, 0);
b742c1e6 3883 return 1;
59200273 3884 } else {
609e36d3 3885 trace_kvm_msr_read(ecx, msr_info.data);
af9ca2d7 3886
609e36d3
PB
3887 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
3888 msr_info.data & 0xffffffff);
3889 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
3890 msr_info.data >> 32);
5fdbf976 3891 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
b742c1e6 3892 return kvm_skip_emulated_instruction(&svm->vcpu);
6aa8b732 3893 }
6aa8b732
AK
3894}
3895
4a810181
JR
3896static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
3897{
3898 struct vcpu_svm *svm = to_svm(vcpu);
3899 int svm_dis, chg_mask;
3900
3901 if (data & ~SVM_VM_CR_VALID_MASK)
3902 return 1;
3903
3904 chg_mask = SVM_VM_CR_VALID_MASK;
3905
3906 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
3907 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
3908
3909 svm->nested.vm_cr_msr &= ~chg_mask;
3910 svm->nested.vm_cr_msr |= (data & chg_mask);
3911
3912 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
3913
3914 /* check for svm_disable while efer.svme is set */
3915 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
3916 return 1;
3917
3918 return 0;
3919}
3920
8fe8ab46 3921static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
6aa8b732 3922{
a2fa3e9f
GH
3923 struct vcpu_svm *svm = to_svm(vcpu);
3924
8fe8ab46
WA
3925 u32 ecx = msr->index;
3926 u64 data = msr->data;
6aa8b732 3927 switch (ecx) {
15038e14
PB
3928 case MSR_IA32_CR_PAT:
3929 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
3930 return 1;
3931 vcpu->arch.pat = data;
3932 svm->vmcb->save.g_pat = data;
3933 mark_dirty(svm->vmcb, VMCB_NPT);
3934 break;
f4e1b3c8 3935 case MSR_IA32_TSC:
8fe8ab46 3936 kvm_write_tsc(vcpu, msr);
6aa8b732 3937 break;
8c06585d 3938 case MSR_STAR:
a2fa3e9f 3939 svm->vmcb->save.star = data;
6aa8b732 3940 break;
49b14f24 3941#ifdef CONFIG_X86_64
6aa8b732 3942 case MSR_LSTAR:
a2fa3e9f 3943 svm->vmcb->save.lstar = data;
6aa8b732
AK
3944 break;
3945 case MSR_CSTAR:
a2fa3e9f 3946 svm->vmcb->save.cstar = data;
6aa8b732
AK
3947 break;
3948 case MSR_KERNEL_GS_BASE:
a2fa3e9f 3949 svm->vmcb->save.kernel_gs_base = data;
6aa8b732
AK
3950 break;
3951 case MSR_SYSCALL_MASK:
a2fa3e9f 3952 svm->vmcb->save.sfmask = data;
6aa8b732
AK
3953 break;
3954#endif
3955 case MSR_IA32_SYSENTER_CS:
a2fa3e9f 3956 svm->vmcb->save.sysenter_cs = data;
6aa8b732
AK
3957 break;
3958 case MSR_IA32_SYSENTER_EIP:
017cb99e 3959 svm->sysenter_eip = data;
a2fa3e9f 3960 svm->vmcb->save.sysenter_eip = data;
6aa8b732
AK
3961 break;
3962 case MSR_IA32_SYSENTER_ESP:
017cb99e 3963 svm->sysenter_esp = data;
a2fa3e9f 3964 svm->vmcb->save.sysenter_esp = data;
6aa8b732 3965 break;
46896c73
PB
3966 case MSR_TSC_AUX:
3967 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3968 return 1;
3969
3970 /*
3971 * This is rare, so we update the MSR here instead of using
3972 * direct_access_msrs. Doing that would require a rdmsr in
3973 * svm_vcpu_put.
3974 */
3975 svm->tsc_aux = data;
3976 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
3977 break;
a2938c80 3978 case MSR_IA32_DEBUGCTLMSR:
2a6b20b8 3979 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
a737f256
CD
3980 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
3981 __func__, data);
24e09cbf
JR
3982 break;
3983 }
3984 if (data & DEBUGCTL_RESERVED_BITS)
3985 return 1;
3986
3987 svm->vmcb->save.dbgctl = data;
b53ba3f9 3988 mark_dirty(svm->vmcb, VMCB_LBR);
24e09cbf
JR
3989 if (data & (1ULL<<0))
3990 svm_enable_lbrv(svm);
3991 else
3992 svm_disable_lbrv(svm);
a2938c80 3993 break;
b286d5d8 3994 case MSR_VM_HSAVE_PA:
e6aa9abd 3995 svm->nested.hsave_msr = data;
62b9abaa 3996 break;
3c5d0a44 3997 case MSR_VM_CR:
4a810181 3998 return svm_set_vm_cr(vcpu, data);
3c5d0a44 3999 case MSR_VM_IGNNE:
a737f256 4000 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
3c5d0a44 4001 break;
44a95dae
SS
4002 case MSR_IA32_APICBASE:
4003 if (kvm_vcpu_apicv_active(vcpu))
4004 avic_update_vapic_bar(to_svm(vcpu), data);
4005 /* Follow through */
6aa8b732 4006 default:
8fe8ab46 4007 return kvm_set_msr_common(vcpu, msr);
6aa8b732
AK
4008 }
4009 return 0;
4010}
4011
851ba692 4012static int wrmsr_interception(struct vcpu_svm *svm)
6aa8b732 4013{
8fe8ab46 4014 struct msr_data msr;
668f198f
DK
4015 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4016 u64 data = kvm_read_edx_eax(&svm->vcpu);
af9ca2d7 4017
8fe8ab46
WA
4018 msr.data = data;
4019 msr.index = ecx;
4020 msr.host_initiated = false;
af9ca2d7 4021
5fdbf976 4022 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
854e8bb1 4023 if (kvm_set_msr(&svm->vcpu, &msr)) {
59200273 4024 trace_kvm_msr_write_ex(ecx, data);
c1a5d4f9 4025 kvm_inject_gp(&svm->vcpu, 0);
b742c1e6 4026 return 1;
59200273
AK
4027 } else {
4028 trace_kvm_msr_write(ecx, data);
b742c1e6 4029 return kvm_skip_emulated_instruction(&svm->vcpu);
59200273 4030 }
6aa8b732
AK
4031}
4032
851ba692 4033static int msr_interception(struct vcpu_svm *svm)
6aa8b732 4034{
e756fc62 4035 if (svm->vmcb->control.exit_info_1)
851ba692 4036 return wrmsr_interception(svm);
6aa8b732 4037 else
851ba692 4038 return rdmsr_interception(svm);
6aa8b732
AK
4039}
4040
851ba692 4041static int interrupt_window_interception(struct vcpu_svm *svm)
c1150d8c 4042{
3842d135 4043 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
f0b85051 4044 svm_clear_vintr(svm);
85f455f7 4045 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
decdbf6a 4046 mark_dirty(svm->vmcb, VMCB_INTR);
675acb75 4047 ++svm->vcpu.stat.irq_window_exits;
c1150d8c
DL
4048 return 1;
4049}
4050
565d0998
ML
4051static int pause_interception(struct vcpu_svm *svm)
4052{
de63ad4c
LM
4053 struct kvm_vcpu *vcpu = &svm->vcpu;
4054 bool in_kernel = (svm_get_cpl(vcpu) == 0);
4055
4056 kvm_vcpu_on_spin(vcpu, in_kernel);
565d0998
ML
4057 return 1;
4058}
4059
87c00572
GS
4060static int nop_interception(struct vcpu_svm *svm)
4061{
b742c1e6 4062 return kvm_skip_emulated_instruction(&(svm->vcpu));
87c00572
GS
4063}
4064
4065static int monitor_interception(struct vcpu_svm *svm)
4066{
4067 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4068 return nop_interception(svm);
4069}
4070
4071static int mwait_interception(struct vcpu_svm *svm)
4072{
4073 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4074 return nop_interception(svm);
4075}
4076
18f40c53
SS
4077enum avic_ipi_failure_cause {
4078 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4079 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4080 AVIC_IPI_FAILURE_INVALID_TARGET,
4081 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4082};
4083
4084static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4085{
4086 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4087 u32 icrl = svm->vmcb->control.exit_info_1;
4088 u32 id = svm->vmcb->control.exit_info_2 >> 32;
5446a979 4089 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
18f40c53
SS
4090 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4091
4092 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4093
4094 switch (id) {
4095 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4096 /*
4097 * AVIC hardware handles the generation of
4098 * IPIs when the specified Message Type is Fixed
4099 * (also known as fixed delivery mode) and
4100 * the Trigger Mode is edge-triggered. The hardware
4101 * also supports self and broadcast delivery modes
4102 * specified via the Destination Shorthand(DSH)
4103 * field of the ICRL. Logical and physical APIC ID
4104 * formats are supported. All other IPI types cause
4105 * a #VMEXIT, which needs to emulated.
4106 */
4107 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4108 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4109 break;
4110 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4111 int i;
4112 struct kvm_vcpu *vcpu;
4113 struct kvm *kvm = svm->vcpu.kvm;
4114 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4115
4116 /*
4117 * At this point, we expect that the AVIC HW has already
4118 * set the appropriate IRR bits on the valid target
4119 * vcpus. So, we just need to kick the appropriate vcpu.
4120 */
4121 kvm_for_each_vcpu(i, vcpu, kvm) {
4122 bool m = kvm_apic_match_dest(vcpu, apic,
4123 icrl & KVM_APIC_SHORT_MASK,
4124 GET_APIC_DEST_FIELD(icrh),
4125 icrl & KVM_APIC_DEST_MASK);
4126
4127 if (m && !avic_vcpu_is_running(vcpu))
4128 kvm_vcpu_wake_up(vcpu);
4129 }
4130 break;
4131 }
4132 case AVIC_IPI_FAILURE_INVALID_TARGET:
4133 break;
4134 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4135 WARN_ONCE(1, "Invalid backing page\n");
4136 break;
4137 default:
4138 pr_err("Unknown IPI interception\n");
4139 }
4140
4141 return 1;
4142}
4143
4144static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4145{
4146 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4147 int index;
4148 u32 *logical_apic_id_table;
4149 int dlid = GET_APIC_LOGICAL_ID(ldr);
4150
4151 if (!dlid)
4152 return NULL;
4153
4154 if (flat) { /* flat */
4155 index = ffs(dlid) - 1;
4156 if (index > 7)
4157 return NULL;
4158 } else { /* cluster */
4159 int cluster = (dlid & 0xf0) >> 4;
4160 int apic = ffs(dlid & 0x0f) - 1;
4161
4162 if ((apic < 0) || (apic > 7) ||
4163 (cluster >= 0xf))
4164 return NULL;
4165 index = (cluster << 2) + apic;
4166 }
4167
4168 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
4169
4170 return &logical_apic_id_table[index];
4171}
4172
4173static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
4174 bool valid)
4175{
4176 bool flat;
4177 u32 *entry, new_entry;
4178
4179 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4180 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4181 if (!entry)
4182 return -EINVAL;
4183
4184 new_entry = READ_ONCE(*entry);
4185 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4186 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4187 if (valid)
4188 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4189 else
4190 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4191 WRITE_ONCE(*entry, new_entry);
4192
4193 return 0;
4194}
4195
4196static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4197{
4198 int ret;
4199 struct vcpu_svm *svm = to_svm(vcpu);
4200 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4201
4202 if (!ldr)
4203 return 1;
4204
4205 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
4206 if (ret && svm->ldr_reg) {
4207 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
4208 svm->ldr_reg = 0;
4209 } else {
4210 svm->ldr_reg = ldr;
4211 }
4212 return ret;
4213}
4214
4215static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4216{
4217 u64 *old, *new;
4218 struct vcpu_svm *svm = to_svm(vcpu);
4219 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4220 u32 id = (apic_id_reg >> 24) & 0xff;
4221
4222 if (vcpu->vcpu_id == id)
4223 return 0;
4224
4225 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4226 new = avic_get_physical_id_entry(vcpu, id);
4227 if (!new || !old)
4228 return 1;
4229
4230 /* We need to move physical_id_entry to new offset */
4231 *new = *old;
4232 *old = 0ULL;
4233 to_svm(vcpu)->avic_physical_id_cache = new;
4234
4235 /*
4236 * Also update the guest physical APIC ID in the logical
4237 * APIC ID table entry if already setup the LDR.
4238 */
4239 if (svm->ldr_reg)
4240 avic_handle_ldr_update(vcpu);
4241
4242 return 0;
4243}
4244
4245static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4246{
4247 struct vcpu_svm *svm = to_svm(vcpu);
4248 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4249 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4250 u32 mod = (dfr >> 28) & 0xf;
4251
4252 /*
4253 * We assume that all local APICs are using the same type.
4254 * If this changes, we need to flush the AVIC logical
4255 * APID id table.
4256 */
4257 if (vm_data->ldr_mode == mod)
4258 return 0;
4259
4260 clear_page(page_address(vm_data->avic_logical_id_table_page));
4261 vm_data->ldr_mode = mod;
4262
4263 if (svm->ldr_reg)
4264 avic_handle_ldr_update(vcpu);
4265 return 0;
4266}
4267
4268static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4269{
4270 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4271 u32 offset = svm->vmcb->control.exit_info_1 &
4272 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4273
4274 switch (offset) {
4275 case APIC_ID:
4276 if (avic_handle_apic_id_update(&svm->vcpu))
4277 return 0;
4278 break;
4279 case APIC_LDR:
4280 if (avic_handle_ldr_update(&svm->vcpu))
4281 return 0;
4282 break;
4283 case APIC_DFR:
4284 avic_handle_dfr_update(&svm->vcpu);
4285 break;
4286 default:
4287 break;
4288 }
4289
4290 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4291
4292 return 1;
4293}
4294
4295static bool is_avic_unaccelerated_access_trap(u32 offset)
4296{
4297 bool ret = false;
4298
4299 switch (offset) {
4300 case APIC_ID:
4301 case APIC_EOI:
4302 case APIC_RRR:
4303 case APIC_LDR:
4304 case APIC_DFR:
4305 case APIC_SPIV:
4306 case APIC_ESR:
4307 case APIC_ICR:
4308 case APIC_LVTT:
4309 case APIC_LVTTHMR:
4310 case APIC_LVTPC:
4311 case APIC_LVT0:
4312 case APIC_LVT1:
4313 case APIC_LVTERR:
4314 case APIC_TMICT:
4315 case APIC_TDCR:
4316 ret = true;
4317 break;
4318 default:
4319 break;
4320 }
4321 return ret;
4322}
4323
4324static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4325{
4326 int ret = 0;
4327 u32 offset = svm->vmcb->control.exit_info_1 &
4328 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4329 u32 vector = svm->vmcb->control.exit_info_2 &
4330 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4331 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4332 AVIC_UNACCEL_ACCESS_WRITE_MASK;
4333 bool trap = is_avic_unaccelerated_access_trap(offset);
4334
4335 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4336 trap, write, vector);
4337 if (trap) {
4338 /* Handling Trap */
4339 WARN_ONCE(!write, "svm: Handling trap read.\n");
4340 ret = avic_unaccel_trap_write(svm);
4341 } else {
4342 /* Handling Fault */
4343 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4344 }
4345
4346 return ret;
4347}
4348
09941fbb 4349static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
7ff76d58
AP
4350 [SVM_EXIT_READ_CR0] = cr_interception,
4351 [SVM_EXIT_READ_CR3] = cr_interception,
4352 [SVM_EXIT_READ_CR4] = cr_interception,
4353 [SVM_EXIT_READ_CR8] = cr_interception,
5e57518d 4354 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
628afd2a 4355 [SVM_EXIT_WRITE_CR0] = cr_interception,
7ff76d58
AP
4356 [SVM_EXIT_WRITE_CR3] = cr_interception,
4357 [SVM_EXIT_WRITE_CR4] = cr_interception,
e0231715 4358 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
cae3797a
AP
4359 [SVM_EXIT_READ_DR0] = dr_interception,
4360 [SVM_EXIT_READ_DR1] = dr_interception,
4361 [SVM_EXIT_READ_DR2] = dr_interception,
4362 [SVM_EXIT_READ_DR3] = dr_interception,
4363 [SVM_EXIT_READ_DR4] = dr_interception,
4364 [SVM_EXIT_READ_DR5] = dr_interception,
4365 [SVM_EXIT_READ_DR6] = dr_interception,
4366 [SVM_EXIT_READ_DR7] = dr_interception,
4367 [SVM_EXIT_WRITE_DR0] = dr_interception,
4368 [SVM_EXIT_WRITE_DR1] = dr_interception,
4369 [SVM_EXIT_WRITE_DR2] = dr_interception,
4370 [SVM_EXIT_WRITE_DR3] = dr_interception,
4371 [SVM_EXIT_WRITE_DR4] = dr_interception,
4372 [SVM_EXIT_WRITE_DR5] = dr_interception,
4373 [SVM_EXIT_WRITE_DR6] = dr_interception,
4374 [SVM_EXIT_WRITE_DR7] = dr_interception,
d0bfb940
JK
4375 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
4376 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
7aa81cc0 4377 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
e0231715 4378 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
e0231715 4379 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
54a20552 4380 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
e0231715 4381 [SVM_EXIT_INTR] = intr_interception,
c47f098d 4382 [SVM_EXIT_NMI] = nmi_interception,
6aa8b732
AK
4383 [SVM_EXIT_SMI] = nop_on_interception,
4384 [SVM_EXIT_INIT] = nop_on_interception,
c1150d8c 4385 [SVM_EXIT_VINTR] = interrupt_window_interception,
332b56e4 4386 [SVM_EXIT_RDPMC] = rdpmc_interception,
6aa8b732 4387 [SVM_EXIT_CPUID] = cpuid_interception,
95ba8273 4388 [SVM_EXIT_IRET] = iret_interception,
cf5a94d1 4389 [SVM_EXIT_INVD] = emulate_on_interception,
565d0998 4390 [SVM_EXIT_PAUSE] = pause_interception,
6aa8b732 4391 [SVM_EXIT_HLT] = halt_interception,
a7052897 4392 [SVM_EXIT_INVLPG] = invlpg_interception,
ff092385 4393 [SVM_EXIT_INVLPGA] = invlpga_interception,
e0231715 4394 [SVM_EXIT_IOIO] = io_interception,
6aa8b732
AK
4395 [SVM_EXIT_MSR] = msr_interception,
4396 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
46fe4ddd 4397 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
3d6368ef 4398 [SVM_EXIT_VMRUN] = vmrun_interception,
02e235bc 4399 [SVM_EXIT_VMMCALL] = vmmcall_interception,
5542675b
AG
4400 [SVM_EXIT_VMLOAD] = vmload_interception,
4401 [SVM_EXIT_VMSAVE] = vmsave_interception,
1371d904
AG
4402 [SVM_EXIT_STGI] = stgi_interception,
4403 [SVM_EXIT_CLGI] = clgi_interception,
532a46b9 4404 [SVM_EXIT_SKINIT] = skinit_interception,
dab429a7 4405 [SVM_EXIT_WBINVD] = wbinvd_interception,
87c00572
GS
4406 [SVM_EXIT_MONITOR] = monitor_interception,
4407 [SVM_EXIT_MWAIT] = mwait_interception,
81dd35d4 4408 [SVM_EXIT_XSETBV] = xsetbv_interception,
d0006530 4409 [SVM_EXIT_NPF] = npf_interception,
64d60670 4410 [SVM_EXIT_RSM] = emulate_on_interception,
18f40c53
SS
4411 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4412 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
6aa8b732
AK
4413};
4414
ae8cc059 4415static void dump_vmcb(struct kvm_vcpu *vcpu)
3f10c846
JR
4416{
4417 struct vcpu_svm *svm = to_svm(vcpu);
4418 struct vmcb_control_area *control = &svm->vmcb->control;
4419 struct vmcb_save_area *save = &svm->vmcb->save;
4420
4421 pr_err("VMCB Control Area:\n");
ae8cc059
JP
4422 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4423 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4424 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4425 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4426 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4427 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4428 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4429 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4430 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4431 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4432 pr_err("%-20s%d\n", "asid:", control->asid);
4433 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4434 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4435 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4436 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4437 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4438 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4439 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4440 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4441 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4442 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4443 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
44a95dae 4444 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
ae8cc059
JP
4445 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4446 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
0dc92119 4447 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
ae8cc059 4448 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
44a95dae
SS
4449 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4450 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4451 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3f10c846 4452 pr_err("VMCB State Save Area:\n");
ae8cc059
JP
4453 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4454 "es:",
4455 save->es.selector, save->es.attrib,
4456 save->es.limit, save->es.base);
4457 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4458 "cs:",
4459 save->cs.selector, save->cs.attrib,
4460 save->cs.limit, save->cs.base);
4461 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4462 "ss:",
4463 save->ss.selector, save->ss.attrib,
4464 save->ss.limit, save->ss.base);
4465 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4466 "ds:",
4467 save->ds.selector, save->ds.attrib,
4468 save->ds.limit, save->ds.base);
4469 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4470 "fs:",
4471 save->fs.selector, save->fs.attrib,
4472 save->fs.limit, save->fs.base);
4473 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4474 "gs:",
4475 save->gs.selector, save->gs.attrib,
4476 save->gs.limit, save->gs.base);
4477 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4478 "gdtr:",
4479 save->gdtr.selector, save->gdtr.attrib,
4480 save->gdtr.limit, save->gdtr.base);
4481 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4482 "ldtr:",
4483 save->ldtr.selector, save->ldtr.attrib,
4484 save->ldtr.limit, save->ldtr.base);
4485 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4486 "idtr:",
4487 save->idtr.selector, save->idtr.attrib,
4488 save->idtr.limit, save->idtr.base);
4489 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4490 "tr:",
4491 save->tr.selector, save->tr.attrib,
4492 save->tr.limit, save->tr.base);
3f10c846
JR
4493 pr_err("cpl: %d efer: %016llx\n",
4494 save->cpl, save->efer);
ae8cc059
JP
4495 pr_err("%-15s %016llx %-13s %016llx\n",
4496 "cr0:", save->cr0, "cr2:", save->cr2);
4497 pr_err("%-15s %016llx %-13s %016llx\n",
4498 "cr3:", save->cr3, "cr4:", save->cr4);
4499 pr_err("%-15s %016llx %-13s %016llx\n",
4500 "dr6:", save->dr6, "dr7:", save->dr7);
4501 pr_err("%-15s %016llx %-13s %016llx\n",
4502 "rip:", save->rip, "rflags:", save->rflags);
4503 pr_err("%-15s %016llx %-13s %016llx\n",
4504 "rsp:", save->rsp, "rax:", save->rax);
4505 pr_err("%-15s %016llx %-13s %016llx\n",
4506 "star:", save->star, "lstar:", save->lstar);
4507 pr_err("%-15s %016llx %-13s %016llx\n",
4508 "cstar:", save->cstar, "sfmask:", save->sfmask);
4509 pr_err("%-15s %016llx %-13s %016llx\n",
4510 "kernel_gs_base:", save->kernel_gs_base,
4511 "sysenter_cs:", save->sysenter_cs);
4512 pr_err("%-15s %016llx %-13s %016llx\n",
4513 "sysenter_esp:", save->sysenter_esp,
4514 "sysenter_eip:", save->sysenter_eip);
4515 pr_err("%-15s %016llx %-13s %016llx\n",
4516 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4517 pr_err("%-15s %016llx %-13s %016llx\n",
4518 "br_from:", save->br_from, "br_to:", save->br_to);
4519 pr_err("%-15s %016llx %-13s %016llx\n",
4520 "excp_from:", save->last_excp_from,
4521 "excp_to:", save->last_excp_to);
3f10c846
JR
4522}
4523
586f9607
AK
4524static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4525{
4526 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4527
4528 *info1 = control->exit_info_1;
4529 *info2 = control->exit_info_2;
4530}
4531
851ba692 4532static int handle_exit(struct kvm_vcpu *vcpu)
6aa8b732 4533{
04d2cc77 4534 struct vcpu_svm *svm = to_svm(vcpu);
851ba692 4535 struct kvm_run *kvm_run = vcpu->run;
a2fa3e9f 4536 u32 exit_code = svm->vmcb->control.exit_code;
6aa8b732 4537
8b89fe1f
PB
4538 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4539
4ee546b4 4540 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
2be4fc7a
JR
4541 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4542 if (npt_enabled)
4543 vcpu->arch.cr3 = svm->vmcb->save.cr3;
af9ca2d7 4544
cd3ff653
JR
4545 if (unlikely(svm->nested.exit_required)) {
4546 nested_svm_vmexit(svm);
4547 svm->nested.exit_required = false;
4548
4549 return 1;
4550 }
4551
2030753d 4552 if (is_guest_mode(vcpu)) {
410e4d57
JR
4553 int vmexit;
4554
d8cabddf
JR
4555 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4556 svm->vmcb->control.exit_info_1,
4557 svm->vmcb->control.exit_info_2,
4558 svm->vmcb->control.exit_int_info,
e097e5ff
SH
4559 svm->vmcb->control.exit_int_info_err,
4560 KVM_ISA_SVM);
d8cabddf 4561
410e4d57
JR
4562 vmexit = nested_svm_exit_special(svm);
4563
4564 if (vmexit == NESTED_EXIT_CONTINUE)
4565 vmexit = nested_svm_exit_handled(svm);
4566
4567 if (vmexit == NESTED_EXIT_DONE)
cf74a78b 4568 return 1;
cf74a78b
AG
4569 }
4570
a5c3832d
JR
4571 svm_complete_interrupts(svm);
4572
04d2cc77
AK
4573 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4574 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4575 kvm_run->fail_entry.hardware_entry_failure_reason
4576 = svm->vmcb->control.exit_code;
3f10c846
JR
4577 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4578 dump_vmcb(vcpu);
04d2cc77
AK
4579 return 0;
4580 }
4581
a2fa3e9f 4582 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
709ddebf 4583 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
55c5e464
JR
4584 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4585 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
6614c7d0 4586 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
6aa8b732 4587 "exit_code 0x%x\n",
b8688d51 4588 __func__, svm->vmcb->control.exit_int_info,
6aa8b732
AK
4589 exit_code);
4590
9d8f549d 4591 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
56919c5c 4592 || !svm_exit_handlers[exit_code]) {
faac2458 4593 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
2bc19dc3
MT
4594 kvm_queue_exception(vcpu, UD_VECTOR);
4595 return 1;
6aa8b732
AK
4596 }
4597
851ba692 4598 return svm_exit_handlers[exit_code](svm);
6aa8b732
AK
4599}
4600
4601static void reload_tss(struct kvm_vcpu *vcpu)
4602{
4603 int cpu = raw_smp_processor_id();
4604
0fe1e009
TH
4605 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4606 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
6aa8b732
AK
4607 load_TR_desc();
4608}
4609
70cd94e6
BS
4610static void pre_sev_run(struct vcpu_svm *svm, int cpu)
4611{
4612 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4613 int asid = sev_get_asid(svm->vcpu.kvm);
4614
4615 /* Assign the asid allocated with this SEV guest */
4616 svm->vmcb->control.asid = asid;
4617
4618 /*
4619 * Flush guest TLB:
4620 *
4621 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
4622 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
4623 */
4624 if (sd->sev_vmcbs[asid] == svm->vmcb &&
4625 svm->last_cpu == cpu)
4626 return;
4627
4628 svm->last_cpu = cpu;
4629 sd->sev_vmcbs[asid] = svm->vmcb;
4630 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4631 mark_dirty(svm->vmcb, VMCB_ASID);
4632}
4633
e756fc62 4634static void pre_svm_run(struct vcpu_svm *svm)
6aa8b732
AK
4635{
4636 int cpu = raw_smp_processor_id();
4637
0fe1e009 4638 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
6aa8b732 4639
70cd94e6
BS
4640 if (sev_guest(svm->vcpu.kvm))
4641 return pre_sev_run(svm, cpu);
4642
4b656b12 4643 /* FIXME: handle wraparound of asid_generation */
0fe1e009
TH
4644 if (svm->asid_generation != sd->asid_generation)
4645 new_asid(svm, sd);
6aa8b732
AK
4646}
4647
95ba8273
GN
4648static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4649{
4650 struct vcpu_svm *svm = to_svm(vcpu);
4651
4652 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4653 vcpu->arch.hflags |= HF_NMI_MASK;
8a05a1b8 4654 set_intercept(svm, INTERCEPT_IRET);
95ba8273
GN
4655 ++vcpu->stat.nmi_injections;
4656}
6aa8b732 4657
85f455f7 4658static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
6aa8b732
AK
4659{
4660 struct vmcb_control_area *control;
4661
340d3bc3 4662 /* The following fields are ignored when AVIC is enabled */
e756fc62 4663 control = &svm->vmcb->control;
85f455f7 4664 control->int_vector = irq;
6aa8b732
AK
4665 control->int_ctl &= ~V_INTR_PRIO_MASK;
4666 control->int_ctl |= V_IRQ_MASK |
4667 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
decdbf6a 4668 mark_dirty(svm->vmcb, VMCB_INTR);
6aa8b732
AK
4669}
4670
66fd3f7f 4671static void svm_set_irq(struct kvm_vcpu *vcpu)
2a8067f1
ED
4672{
4673 struct vcpu_svm *svm = to_svm(vcpu);
4674
2af9194d 4675 BUG_ON(!(gif_set(svm)));
cf74a78b 4676
9fb2d2b4
GN
4677 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4678 ++vcpu->stat.irq_injections;
4679
219b65dc
AG
4680 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4681 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2a8067f1
ED
4682}
4683
3bbf3565
SS
4684static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4685{
4686 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4687}
4688
95ba8273 4689static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
aaacfc9a
JR
4690{
4691 struct vcpu_svm *svm = to_svm(vcpu);
aaacfc9a 4692
3bbf3565
SS
4693 if (svm_nested_virtualize_tpr(vcpu) ||
4694 kvm_vcpu_apicv_active(vcpu))
88ab24ad
JR
4695 return;
4696
596f3142
RK
4697 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4698
95ba8273 4699 if (irr == -1)
aaacfc9a
JR
4700 return;
4701
95ba8273 4702 if (tpr >= irr)
4ee546b4 4703 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
95ba8273 4704}
aaacfc9a 4705
8d14695f
YZ
4706static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4707{
4708 return;
4709}
4710
b2a05fef 4711static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
d62caabb 4712{
67034bb9 4713 return avic && irqchip_split(vcpu->kvm);
44a95dae
SS
4714}
4715
4716static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4717{
d62caabb
AS
4718}
4719
67c9dddc 4720static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
44a95dae 4721{
d62caabb
AS
4722}
4723
44a95dae 4724/* Note: Currently only used by Hyper-V. */
d62caabb 4725static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
c7c9c56c 4726{
44a95dae
SS
4727 struct vcpu_svm *svm = to_svm(vcpu);
4728 struct vmcb *vmcb = svm->vmcb;
4729
67034bb9 4730 if (!kvm_vcpu_apicv_active(&svm->vcpu))
44a95dae
SS
4731 return;
4732
4733 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4734 mark_dirty(vmcb, VMCB_INTR);
c7c9c56c
YZ
4735}
4736
6308630b 4737static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
c7c9c56c
YZ
4738{
4739 return;
4740}
4741
340d3bc3
SS
4742static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4743{
4744 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4745 smp_mb__after_atomic();
4746
4747 if (avic_vcpu_is_running(vcpu))
4748 wrmsrl(SVM_AVIC_DOORBELL,
7d669f50 4749 kvm_cpu_get_apicid(vcpu->cpu));
340d3bc3
SS
4750 else
4751 kvm_vcpu_wake_up(vcpu);
4752}
4753
411b44ba
SS
4754static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4755{
4756 unsigned long flags;
4757 struct amd_svm_iommu_ir *cur;
4758
4759 spin_lock_irqsave(&svm->ir_list_lock, flags);
4760 list_for_each_entry(cur, &svm->ir_list, node) {
4761 if (cur->data != pi->ir_data)
4762 continue;
4763 list_del(&cur->node);
4764 kfree(cur);
4765 break;
4766 }
4767 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4768}
4769
4770static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4771{
4772 int ret = 0;
4773 unsigned long flags;
4774 struct amd_svm_iommu_ir *ir;
4775
4776 /**
4777 * In some cases, the existing irte is updaed and re-set,
4778 * so we need to check here if it's already been * added
4779 * to the ir_list.
4780 */
4781 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4782 struct kvm *kvm = svm->vcpu.kvm;
4783 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4784 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4785 struct vcpu_svm *prev_svm;
4786
4787 if (!prev_vcpu) {
4788 ret = -EINVAL;
4789 goto out;
4790 }
4791
4792 prev_svm = to_svm(prev_vcpu);
4793 svm_ir_list_del(prev_svm, pi);
4794 }
4795
4796 /**
4797 * Allocating new amd_iommu_pi_data, which will get
4798 * add to the per-vcpu ir_list.
4799 */
4800 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
4801 if (!ir) {
4802 ret = -ENOMEM;
4803 goto out;
4804 }
4805 ir->data = pi->ir_data;
4806
4807 spin_lock_irqsave(&svm->ir_list_lock, flags);
4808 list_add(&ir->node, &svm->ir_list);
4809 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4810out:
4811 return ret;
4812}
4813
4814/**
4815 * Note:
4816 * The HW cannot support posting multicast/broadcast
4817 * interrupts to a vCPU. So, we still use legacy interrupt
4818 * remapping for these kind of interrupts.
4819 *
4820 * For lowest-priority interrupts, we only support
4821 * those with single CPU as the destination, e.g. user
4822 * configures the interrupts via /proc/irq or uses
4823 * irqbalance to make the interrupts single-CPU.
4824 */
4825static int
4826get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
4827 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
4828{
4829 struct kvm_lapic_irq irq;
4830 struct kvm_vcpu *vcpu = NULL;
4831
4832 kvm_set_msi_irq(kvm, e, &irq);
4833
4834 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
4835 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
4836 __func__, irq.vector);
4837 return -1;
4838 }
4839
4840 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
4841 irq.vector);
4842 *svm = to_svm(vcpu);
d0ec49d4 4843 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
411b44ba
SS
4844 vcpu_info->vector = irq.vector;
4845
4846 return 0;
4847}
4848
4849/*
4850 * svm_update_pi_irte - set IRTE for Posted-Interrupts
4851 *
4852 * @kvm: kvm
4853 * @host_irq: host irq of the interrupt
4854 * @guest_irq: gsi of the interrupt
4855 * @set: set or unset PI
4856 * returns 0 on success, < 0 on failure
4857 */
4858static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
4859 uint32_t guest_irq, bool set)
4860{
4861 struct kvm_kernel_irq_routing_entry *e;
4862 struct kvm_irq_routing_table *irq_rt;
4863 int idx, ret = -EINVAL;
4864
4865 if (!kvm_arch_has_assigned_device(kvm) ||
4866 !irq_remapping_cap(IRQ_POSTING_CAP))
4867 return 0;
4868
4869 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
4870 __func__, host_irq, guest_irq, set);
4871
4872 idx = srcu_read_lock(&kvm->irq_srcu);
4873 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
4874 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
4875
4876 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
4877 struct vcpu_data vcpu_info;
4878 struct vcpu_svm *svm = NULL;
4879
4880 if (e->type != KVM_IRQ_ROUTING_MSI)
4881 continue;
4882
4883 /**
4884 * Here, we setup with legacy mode in the following cases:
4885 * 1. When cannot target interrupt to a specific vcpu.
4886 * 2. Unsetting posted interrupt.
4887 * 3. APIC virtialization is disabled for the vcpu.
4888 */
4889 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
4890 kvm_vcpu_apicv_active(&svm->vcpu)) {
4891 struct amd_iommu_pi_data pi;
4892
4893 /* Try to enable guest_mode in IRTE */
d0ec49d4
TL
4894 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
4895 AVIC_HPA_MASK);
411b44ba
SS
4896 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
4897 svm->vcpu.vcpu_id);
4898 pi.is_guest_mode = true;
4899 pi.vcpu_data = &vcpu_info;
4900 ret = irq_set_vcpu_affinity(host_irq, &pi);
4901
4902 /**
4903 * Here, we successfully setting up vcpu affinity in
4904 * IOMMU guest mode. Now, we need to store the posted
4905 * interrupt information in a per-vcpu ir_list so that
4906 * we can reference to them directly when we update vcpu
4907 * scheduling information in IOMMU irte.
4908 */
4909 if (!ret && pi.is_guest_mode)
4910 svm_ir_list_add(svm, &pi);
4911 } else {
4912 /* Use legacy mode in IRTE */
4913 struct amd_iommu_pi_data pi;
4914
4915 /**
4916 * Here, pi is used to:
4917 * - Tell IOMMU to use legacy mode for this interrupt.
4918 * - Retrieve ga_tag of prior interrupt remapping data.
4919 */
4920 pi.is_guest_mode = false;
4921 ret = irq_set_vcpu_affinity(host_irq, &pi);
4922
4923 /**
4924 * Check if the posted interrupt was previously
4925 * setup with the guest_mode by checking if the ga_tag
4926 * was cached. If so, we need to clean up the per-vcpu
4927 * ir_list.
4928 */
4929 if (!ret && pi.prev_ga_tag) {
4930 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
4931 struct kvm_vcpu *vcpu;
4932
4933 vcpu = kvm_get_vcpu_by_id(kvm, id);
4934 if (vcpu)
4935 svm_ir_list_del(to_svm(vcpu), &pi);
4936 }
4937 }
4938
4939 if (!ret && svm) {
4940 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
4941 host_irq, e->gsi,
4942 vcpu_info.vector,
4943 vcpu_info.pi_desc_addr, set);
4944 }
4945
4946 if (ret < 0) {
4947 pr_err("%s: failed to update PI IRTE\n", __func__);
4948 goto out;
4949 }
4950 }
4951
4952 ret = 0;
4953out:
4954 srcu_read_unlock(&kvm->irq_srcu, idx);
4955 return ret;
4956}
4957
95ba8273
GN
4958static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
4959{
4960 struct vcpu_svm *svm = to_svm(vcpu);
4961 struct vmcb *vmcb = svm->vmcb;
924584cc
JR
4962 int ret;
4963 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
4964 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
4965 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
4966
4967 return ret;
aaacfc9a
JR
4968}
4969
3cfc3092
JK
4970static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
4971{
4972 struct vcpu_svm *svm = to_svm(vcpu);
4973
4974 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
4975}
4976
4977static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4978{
4979 struct vcpu_svm *svm = to_svm(vcpu);
4980
4981 if (masked) {
4982 svm->vcpu.arch.hflags |= HF_NMI_MASK;
8a05a1b8 4983 set_intercept(svm, INTERCEPT_IRET);
3cfc3092
JK
4984 } else {
4985 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
8a05a1b8 4986 clr_intercept(svm, INTERCEPT_IRET);
3cfc3092
JK
4987 }
4988}
4989
78646121
GN
4990static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
4991{
4992 struct vcpu_svm *svm = to_svm(vcpu);
4993 struct vmcb *vmcb = svm->vmcb;
7fcdb510
JR
4994 int ret;
4995
4996 if (!gif_set(svm) ||
4997 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
4998 return 0;
4999
f6e78475 5000 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
7fcdb510 5001
2030753d 5002 if (is_guest_mode(vcpu))
7fcdb510
JR
5003 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5004
5005 return ret;
78646121
GN
5006}
5007
c9a7953f 5008static void enable_irq_window(struct kvm_vcpu *vcpu)
6aa8b732 5009{
219b65dc 5010 struct vcpu_svm *svm = to_svm(vcpu);
219b65dc 5011
340d3bc3
SS
5012 if (kvm_vcpu_apicv_active(vcpu))
5013 return;
5014
e0231715
JR
5015 /*
5016 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5017 * 1, because that's a separate STGI/VMRUN intercept. The next time we
5018 * get that intercept, this function will be called again though and
640bd6e5
JN
5019 * we'll get the vintr intercept. However, if the vGIF feature is
5020 * enabled, the STGI interception will not occur. Enable the irq
5021 * window under the assumption that the hardware will set the GIF.
e0231715 5022 */
640bd6e5 5023 if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
219b65dc
AG
5024 svm_set_vintr(svm);
5025 svm_inject_irq(svm, 0x0);
5026 }
85f455f7
ED
5027}
5028
c9a7953f 5029static void enable_nmi_window(struct kvm_vcpu *vcpu)
c1150d8c 5030{
04d2cc77 5031 struct vcpu_svm *svm = to_svm(vcpu);
c1150d8c 5032
44c11430
GN
5033 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5034 == HF_NMI_MASK)
c9a7953f 5035 return; /* IRET will cause a vm exit */
44c11430 5036
640bd6e5
JN
5037 if (!gif_set(svm)) {
5038 if (vgif_enabled(svm))
5039 set_intercept(svm, INTERCEPT_STGI);
1a5e1852 5040 return; /* STGI will cause a vm exit */
640bd6e5 5041 }
1a5e1852
LP
5042
5043 if (svm->nested.exit_required)
5044 return; /* we're not going to run the guest yet */
5045
e0231715
JR
5046 /*
5047 * Something prevents NMI from been injected. Single step over possible
5048 * problem (IRET or exception injection or interrupt shadow)
5049 */
ab2f4d73 5050 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
6be7d306 5051 svm->nmi_singlestep = true;
44c11430 5052 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
c1150d8c
DL
5053}
5054
cbc94022
IE
5055static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5056{
5057 return 0;
5058}
5059
d9e368d6
AK
5060static void svm_flush_tlb(struct kvm_vcpu *vcpu)
5061{
38e5e92f
JR
5062 struct vcpu_svm *svm = to_svm(vcpu);
5063
5064 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5065 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5066 else
5067 svm->asid_generation--;
d9e368d6
AK
5068}
5069
04d2cc77
AK
5070static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5071{
5072}
5073
d7bf8221
JR
5074static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5075{
5076 struct vcpu_svm *svm = to_svm(vcpu);
5077
3bbf3565 5078 if (svm_nested_virtualize_tpr(vcpu))
88ab24ad
JR
5079 return;
5080
4ee546b4 5081 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
d7bf8221 5082 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
615d5193 5083 kvm_set_cr8(vcpu, cr8);
d7bf8221
JR
5084 }
5085}
5086
649d6864
JR
5087static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5088{
5089 struct vcpu_svm *svm = to_svm(vcpu);
5090 u64 cr8;
5091
3bbf3565
SS
5092 if (svm_nested_virtualize_tpr(vcpu) ||
5093 kvm_vcpu_apicv_active(vcpu))
88ab24ad
JR
5094 return;
5095
649d6864
JR
5096 cr8 = kvm_get_cr8(vcpu);
5097 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5098 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5099}
5100
9222be18
GN
5101static void svm_complete_interrupts(struct vcpu_svm *svm)
5102{
5103 u8 vector;
5104 int type;
5105 u32 exitintinfo = svm->vmcb->control.exit_int_info;
66b7138f
JK
5106 unsigned int3_injected = svm->int3_injected;
5107
5108 svm->int3_injected = 0;
9222be18 5109
bd3d1ec3
AK
5110 /*
5111 * If we've made progress since setting HF_IRET_MASK, we've
5112 * executed an IRET and can allow NMI injection.
5113 */
5114 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5115 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
44c11430 5116 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
3842d135
AK
5117 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5118 }
44c11430 5119
9222be18
GN
5120 svm->vcpu.arch.nmi_injected = false;
5121 kvm_clear_exception_queue(&svm->vcpu);
5122 kvm_clear_interrupt_queue(&svm->vcpu);
5123
5124 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5125 return;
5126
3842d135
AK
5127 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5128
9222be18
GN
5129 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5130 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5131
5132 switch (type) {
5133 case SVM_EXITINTINFO_TYPE_NMI:
5134 svm->vcpu.arch.nmi_injected = true;
5135 break;
5136 case SVM_EXITINTINFO_TYPE_EXEPT:
66b7138f
JK
5137 /*
5138 * In case of software exceptions, do not reinject the vector,
5139 * but re-execute the instruction instead. Rewind RIP first
5140 * if we emulated INT3 before.
5141 */
5142 if (kvm_exception_is_soft(vector)) {
5143 if (vector == BP_VECTOR && int3_injected &&
5144 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5145 kvm_rip_write(&svm->vcpu,
5146 kvm_rip_read(&svm->vcpu) -
5147 int3_injected);
9222be18 5148 break;
66b7138f 5149 }
9222be18
GN
5150 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5151 u32 err = svm->vmcb->control.exit_int_info_err;
ce7ddec4 5152 kvm_requeue_exception_e(&svm->vcpu, vector, err);
9222be18
GN
5153
5154 } else
ce7ddec4 5155 kvm_requeue_exception(&svm->vcpu, vector);
9222be18
GN
5156 break;
5157 case SVM_EXITINTINFO_TYPE_INTR:
66fd3f7f 5158 kvm_queue_interrupt(&svm->vcpu, vector, false);
9222be18
GN
5159 break;
5160 default:
5161 break;
5162 }
5163}
5164
b463a6f7
AK
5165static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5166{
5167 struct vcpu_svm *svm = to_svm(vcpu);
5168 struct vmcb_control_area *control = &svm->vmcb->control;
5169
5170 control->exit_int_info = control->event_inj;
5171 control->exit_int_info_err = control->event_inj_err;
5172 control->event_inj = 0;
5173 svm_complete_interrupts(svm);
5174}
5175
851ba692 5176static void svm_vcpu_run(struct kvm_vcpu *vcpu)
6aa8b732 5177{
a2fa3e9f 5178 struct vcpu_svm *svm = to_svm(vcpu);
d9e368d6 5179
2041a06a
JR
5180 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5181 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5182 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5183
cd3ff653
JR
5184 /*
5185 * A vmexit emulation is required before the vcpu can be executed
5186 * again.
5187 */
5188 if (unlikely(svm->nested.exit_required))
5189 return;
5190
a12713c2
LP
5191 /*
5192 * Disable singlestep if we're injecting an interrupt/exception.
5193 * We don't want our modified rflags to be pushed on the stack where
5194 * we might not be able to easily reset them if we disabled NMI
5195 * singlestep later.
5196 */
5197 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5198 /*
5199 * Event injection happens before external interrupts cause a
5200 * vmexit and interrupts are disabled here, so smp_send_reschedule
5201 * is enough to force an immediate vmexit.
5202 */
5203 disable_nmi_singlestep(svm);
5204 smp_send_reschedule(vcpu->cpu);
5205 }
5206
e756fc62 5207 pre_svm_run(svm);
6aa8b732 5208
649d6864
JR
5209 sync_lapic_to_cr8(vcpu);
5210
cda0ffdd 5211 svm->vmcb->save.cr2 = vcpu->arch.cr2;
6aa8b732 5212
04d2cc77
AK
5213 clgi();
5214
5215 local_irq_enable();
36241b8c 5216
6aa8b732 5217 asm volatile (
7454766f
AK
5218 "push %%" _ASM_BP "; \n\t"
5219 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5220 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5221 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5222 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5223 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5224 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
05b3e0c2 5225#ifdef CONFIG_X86_64
fb3f0f51
RR
5226 "mov %c[r8](%[svm]), %%r8 \n\t"
5227 "mov %c[r9](%[svm]), %%r9 \n\t"
5228 "mov %c[r10](%[svm]), %%r10 \n\t"
5229 "mov %c[r11](%[svm]), %%r11 \n\t"
5230 "mov %c[r12](%[svm]), %%r12 \n\t"
5231 "mov %c[r13](%[svm]), %%r13 \n\t"
5232 "mov %c[r14](%[svm]), %%r14 \n\t"
5233 "mov %c[r15](%[svm]), %%r15 \n\t"
6aa8b732
AK
5234#endif
5235
6aa8b732 5236 /* Enter guest mode */
7454766f
AK
5237 "push %%" _ASM_AX " \n\t"
5238 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
4ecac3fd
AK
5239 __ex(SVM_VMLOAD) "\n\t"
5240 __ex(SVM_VMRUN) "\n\t"
5241 __ex(SVM_VMSAVE) "\n\t"
7454766f 5242 "pop %%" _ASM_AX " \n\t"
6aa8b732
AK
5243
5244 /* Save guest registers, load host registers */
7454766f
AK
5245 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5246 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5247 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5248 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5249 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5250 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
05b3e0c2 5251#ifdef CONFIG_X86_64
fb3f0f51
RR
5252 "mov %%r8, %c[r8](%[svm]) \n\t"
5253 "mov %%r9, %c[r9](%[svm]) \n\t"
5254 "mov %%r10, %c[r10](%[svm]) \n\t"
5255 "mov %%r11, %c[r11](%[svm]) \n\t"
5256 "mov %%r12, %c[r12](%[svm]) \n\t"
5257 "mov %%r13, %c[r13](%[svm]) \n\t"
5258 "mov %%r14, %c[r14](%[svm]) \n\t"
5259 "mov %%r15, %c[r15](%[svm]) \n\t"
6aa8b732 5260#endif
7454766f 5261 "pop %%" _ASM_BP
6aa8b732 5262 :
fb3f0f51 5263 : [svm]"a"(svm),
6aa8b732 5264 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
ad312c7c
ZX
5265 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5266 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5267 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5268 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5269 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5270 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
05b3e0c2 5271#ifdef CONFIG_X86_64
ad312c7c
ZX
5272 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5273 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5274 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5275 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5276 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5277 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5278 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5279 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
6aa8b732 5280#endif
54a08c04
LV
5281 : "cc", "memory"
5282#ifdef CONFIG_X86_64
7454766f 5283 , "rbx", "rcx", "rdx", "rsi", "rdi"
54a08c04 5284 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
7454766f
AK
5285#else
5286 , "ebx", "ecx", "edx", "esi", "edi"
54a08c04
LV
5287#endif
5288 );
6aa8b732 5289
82ca2d10
AK
5290#ifdef CONFIG_X86_64
5291 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5292#else
dacccfdd 5293 loadsegment(fs, svm->host.fs);
831ca609
AK
5294#ifndef CONFIG_X86_32_LAZY_GS
5295 loadsegment(gs, svm->host.gs);
5296#endif
9581d442 5297#endif
6aa8b732
AK
5298
5299 reload_tss(vcpu);
5300
56ba47dd
AK
5301 local_irq_disable();
5302
13c34e07
AK
5303 vcpu->arch.cr2 = svm->vmcb->save.cr2;
5304 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5305 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5306 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5307
3781c01c
JR
5308 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5309 kvm_before_handle_nmi(&svm->vcpu);
5310
5311 stgi();
5312
5313 /* Any pending NMI will happen here */
5314
5315 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5316 kvm_after_handle_nmi(&svm->vcpu);
5317
d7bf8221
JR
5318 sync_cr8_to_lapic(vcpu);
5319
a2fa3e9f 5320 svm->next_rip = 0;
9222be18 5321
38e5e92f
JR
5322 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5323
631bc487
GN
5324 /* if exit due to PF check for async PF */
5325 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
1261bfa3 5326 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
631bc487 5327
6de4f3ad
AK
5328 if (npt_enabled) {
5329 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5330 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5331 }
fe5913e4
JR
5332
5333 /*
5334 * We need to handle MC intercepts here before the vcpu has a chance to
5335 * change the physical cpu
5336 */
5337 if (unlikely(svm->vmcb->control.exit_code ==
5338 SVM_EXIT_EXCP_BASE + MC_VECTOR))
5339 svm_handle_mce(svm);
8d28fec4
RJ
5340
5341 mark_all_clean(svm->vmcb);
6aa8b732 5342}
c207aee4 5343STACK_FRAME_NON_STANDARD(svm_vcpu_run);
6aa8b732 5344
6aa8b732
AK
5345static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5346{
a2fa3e9f
GH
5347 struct vcpu_svm *svm = to_svm(vcpu);
5348
d0ec49d4 5349 svm->vmcb->save.cr3 = __sme_set(root);
dcca1a65 5350 mark_dirty(svm->vmcb, VMCB_CR);
f40f6a45 5351 svm_flush_tlb(vcpu);
6aa8b732
AK
5352}
5353
1c97f0a0
JR
5354static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5355{
5356 struct vcpu_svm *svm = to_svm(vcpu);
5357
d0ec49d4 5358 svm->vmcb->control.nested_cr3 = __sme_set(root);
b2747166 5359 mark_dirty(svm->vmcb, VMCB_NPT);
1c97f0a0
JR
5360
5361 /* Also sync guest cr3 here in case we live migrate */
9f8fe504 5362 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
dcca1a65 5363 mark_dirty(svm->vmcb, VMCB_CR);
1c97f0a0 5364
f40f6a45 5365 svm_flush_tlb(vcpu);
1c97f0a0
JR
5366}
5367
6aa8b732
AK
5368static int is_disabled(void)
5369{
6031a61c
JR
5370 u64 vm_cr;
5371
5372 rdmsrl(MSR_VM_CR, vm_cr);
5373 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5374 return 1;
5375
6aa8b732
AK
5376 return 0;
5377}
5378
102d8325
IM
5379static void
5380svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5381{
5382 /*
5383 * Patch in the VMMCALL instruction:
5384 */
5385 hypercall[0] = 0x0f;
5386 hypercall[1] = 0x01;
5387 hypercall[2] = 0xd9;
102d8325
IM
5388}
5389
002c7f7c
YS
5390static void svm_check_processor_compat(void *rtn)
5391{
5392 *(int *)rtn = 0;
5393}
5394
774ead3a
AK
5395static bool svm_cpu_has_accelerated_tpr(void)
5396{
5397 return false;
5398}
5399
6d396b55
PB
5400static bool svm_has_high_real_mode_segbase(void)
5401{
5402 return true;
5403}
5404
fc07e76a
PB
5405static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5406{
5407 return 0;
5408}
5409
0e851880
SY
5410static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5411{
6092d3d3
JR
5412 struct vcpu_svm *svm = to_svm(vcpu);
5413
5414 /* Update nrips enabled cache */
d6321d49 5415 svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
46781eae
SS
5416
5417 if (!kvm_vcpu_apicv_active(vcpu))
5418 return;
5419
1b4d56b8 5420 guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
0e851880
SY
5421}
5422
d4330ef2
JR
5423static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5424{
c2c63a49 5425 switch (func) {
46781eae
SS
5426 case 0x1:
5427 if (avic)
5428 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5429 break;
4c62a2dc
JR
5430 case 0x80000001:
5431 if (nested)
5432 entry->ecx |= (1 << 2); /* Set SVM bit */
5433 break;
c2c63a49
JR
5434 case 0x8000000A:
5435 entry->eax = 1; /* SVM revision 1 */
5436 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5437 ASID emulation to nested SVM */
5438 entry->ecx = 0; /* Reserved */
7a190667
JR
5439 entry->edx = 0; /* Per default do not support any
5440 additional features */
5441
5442 /* Support next_rip if host supports it */
2a6b20b8 5443 if (boot_cpu_has(X86_FEATURE_NRIPS))
7a190667 5444 entry->edx |= SVM_FEATURE_NRIP;
c2c63a49 5445
3d4aeaad
JR
5446 /* Support NPT for the guest if enabled */
5447 if (npt_enabled)
5448 entry->edx |= SVM_FEATURE_NPT;
5449
c2c63a49 5450 break;
8765d753
BS
5451 case 0x8000001F:
5452 /* Support memory encryption cpuid if host supports it */
5453 if (boot_cpu_has(X86_FEATURE_SEV))
5454 cpuid(0x8000001f, &entry->eax, &entry->ebx,
5455 &entry->ecx, &entry->edx);
5456
c2c63a49 5457 }
d4330ef2
JR
5458}
5459
17cc3935 5460static int svm_get_lpage_level(void)
344f414f 5461{
17cc3935 5462 return PT_PDPE_LEVEL;
344f414f
JR
5463}
5464
4e47c7a6
SY
5465static bool svm_rdtscp_supported(void)
5466{
46896c73 5467 return boot_cpu_has(X86_FEATURE_RDTSCP);
4e47c7a6
SY
5468}
5469
ad756a16
MJ
5470static bool svm_invpcid_supported(void)
5471{
5472 return false;
5473}
5474
93c4adc7
PB
5475static bool svm_mpx_supported(void)
5476{
5477 return false;
5478}
5479
55412b2e
WL
5480static bool svm_xsaves_supported(void)
5481{
5482 return false;
5483}
5484
f5f48ee1
SY
5485static bool svm_has_wbinvd_exit(void)
5486{
5487 return true;
5488}
5489
8061252e 5490#define PRE_EX(exit) { .exit_code = (exit), \
40e19b51 5491 .stage = X86_ICPT_PRE_EXCEPT, }
cfec82cb 5492#define POST_EX(exit) { .exit_code = (exit), \
40e19b51 5493 .stage = X86_ICPT_POST_EXCEPT, }
d7eb8203 5494#define POST_MEM(exit) { .exit_code = (exit), \
40e19b51 5495 .stage = X86_ICPT_POST_MEMACCESS, }
cfec82cb 5496
09941fbb 5497static const struct __x86_intercept {
cfec82cb
JR
5498 u32 exit_code;
5499 enum x86_intercept_stage stage;
cfec82cb
JR
5500} x86_intercept_map[] = {
5501 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5502 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5503 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5504 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5505 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
3b88e41a
JR
5506 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5507 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
dee6bb70
JR
5508 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5509 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5510 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5511 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5512 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5513 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5514 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5515 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
01de8b09
JR
5516 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5517 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5518 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5519 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5520 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5521 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5522 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5523 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
d7eb8203
JR
5524 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5525 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5526 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
8061252e
JR
5527 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5528 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5529 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5530 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5531 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5532 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5533 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5534 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5535 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
bf608f88
JR
5536 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5537 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5538 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5539 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5540 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5541 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5542 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
f6511935
JR
5543 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5544 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5545 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5546 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
cfec82cb
JR
5547};
5548
8061252e 5549#undef PRE_EX
cfec82cb 5550#undef POST_EX
d7eb8203 5551#undef POST_MEM
cfec82cb 5552
8a76d7f2
JR
5553static int svm_check_intercept(struct kvm_vcpu *vcpu,
5554 struct x86_instruction_info *info,
5555 enum x86_intercept_stage stage)
5556{
cfec82cb
JR
5557 struct vcpu_svm *svm = to_svm(vcpu);
5558 int vmexit, ret = X86EMUL_CONTINUE;
5559 struct __x86_intercept icpt_info;
5560 struct vmcb *vmcb = svm->vmcb;
5561
5562 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5563 goto out;
5564
5565 icpt_info = x86_intercept_map[info->intercept];
5566
40e19b51 5567 if (stage != icpt_info.stage)
cfec82cb
JR
5568 goto out;
5569
5570 switch (icpt_info.exit_code) {
5571 case SVM_EXIT_READ_CR0:
5572 if (info->intercept == x86_intercept_cr_read)
5573 icpt_info.exit_code += info->modrm_reg;
5574 break;
5575 case SVM_EXIT_WRITE_CR0: {
5576 unsigned long cr0, val;
5577 u64 intercept;
5578
5579 if (info->intercept == x86_intercept_cr_write)
5580 icpt_info.exit_code += info->modrm_reg;
5581
62baf44c
JK
5582 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5583 info->intercept == x86_intercept_clts)
cfec82cb
JR
5584 break;
5585
5586 intercept = svm->nested.intercept;
5587
5588 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5589 break;
5590
5591 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5592 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5593
5594 if (info->intercept == x86_intercept_lmsw) {
5595 cr0 &= 0xfUL;
5596 val &= 0xfUL;
5597 /* lmsw can't clear PE - catch this here */
5598 if (cr0 & X86_CR0_PE)
5599 val |= X86_CR0_PE;
5600 }
5601
5602 if (cr0 ^ val)
5603 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5604
5605 break;
5606 }
3b88e41a
JR
5607 case SVM_EXIT_READ_DR0:
5608 case SVM_EXIT_WRITE_DR0:
5609 icpt_info.exit_code += info->modrm_reg;
5610 break;
8061252e
JR
5611 case SVM_EXIT_MSR:
5612 if (info->intercept == x86_intercept_wrmsr)
5613 vmcb->control.exit_info_1 = 1;
5614 else
5615 vmcb->control.exit_info_1 = 0;
5616 break;
bf608f88
JR
5617 case SVM_EXIT_PAUSE:
5618 /*
5619 * We get this for NOP only, but pause
5620 * is rep not, check this here
5621 */
5622 if (info->rep_prefix != REPE_PREFIX)
5623 goto out;
49a8afca 5624 break;
f6511935
JR
5625 case SVM_EXIT_IOIO: {
5626 u64 exit_info;
5627 u32 bytes;
5628
f6511935
JR
5629 if (info->intercept == x86_intercept_in ||
5630 info->intercept == x86_intercept_ins) {
6cbc5f5a
JK
5631 exit_info = ((info->src_val & 0xffff) << 16) |
5632 SVM_IOIO_TYPE_MASK;
f6511935 5633 bytes = info->dst_bytes;
6493f157 5634 } else {
6cbc5f5a 5635 exit_info = (info->dst_val & 0xffff) << 16;
6493f157 5636 bytes = info->src_bytes;
f6511935
JR
5637 }
5638
5639 if (info->intercept == x86_intercept_outs ||
5640 info->intercept == x86_intercept_ins)
5641 exit_info |= SVM_IOIO_STR_MASK;
5642
5643 if (info->rep_prefix)
5644 exit_info |= SVM_IOIO_REP_MASK;
5645
5646 bytes = min(bytes, 4u);
5647
5648 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5649
5650 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5651
5652 vmcb->control.exit_info_1 = exit_info;
5653 vmcb->control.exit_info_2 = info->next_rip;
5654
5655 break;
5656 }
cfec82cb
JR
5657 default:
5658 break;
5659 }
5660
f104765b
BD
5661 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5662 if (static_cpu_has(X86_FEATURE_NRIPS))
5663 vmcb->control.next_rip = info->next_rip;
cfec82cb
JR
5664 vmcb->control.exit_code = icpt_info.exit_code;
5665 vmexit = nested_svm_exit_handled(svm);
5666
5667 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5668 : X86EMUL_CONTINUE;
5669
5670out:
5671 return ret;
8a76d7f2
JR
5672}
5673
a547c6db
YZ
5674static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5675{
5676 local_irq_enable();
f2485b3e
PB
5677 /*
5678 * We must have an instruction with interrupts enabled, so
5679 * the timer interrupt isn't delayed by the interrupt shadow.
5680 */
5681 asm("nop");
5682 local_irq_disable();
a547c6db
YZ
5683}
5684
ae97a3b8
RK
5685static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5686{
5687}
5688
be8ca170
SS
5689static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5690{
5691 if (avic_handle_apic_id_update(vcpu) != 0)
5692 return;
5693 if (avic_handle_dfr_update(vcpu) != 0)
5694 return;
5695 avic_handle_ldr_update(vcpu);
5696}
5697
74f16909
BP
5698static void svm_setup_mce(struct kvm_vcpu *vcpu)
5699{
5700 /* [63:9] are reserved. */
5701 vcpu->arch.mcg_cap &= 0x1ff;
5702}
5703
72d7b374
LP
5704static int svm_smi_allowed(struct kvm_vcpu *vcpu)
5705{
05cade71
LP
5706 struct vcpu_svm *svm = to_svm(vcpu);
5707
5708 /* Per APM Vol.2 15.22.2 "Response to SMI" */
5709 if (!gif_set(svm))
5710 return 0;
5711
5712 if (is_guest_mode(&svm->vcpu) &&
5713 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5714 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
5715 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5716 svm->nested.exit_required = true;
5717 return 0;
5718 }
5719
72d7b374
LP
5720 return 1;
5721}
5722
0234bf88
LP
5723static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
5724{
05cade71
LP
5725 struct vcpu_svm *svm = to_svm(vcpu);
5726 int ret;
5727
5728 if (is_guest_mode(vcpu)) {
5729 /* FED8h - SVM Guest */
5730 put_smstate(u64, smstate, 0x7ed8, 1);
5731 /* FEE0h - SVM Guest VMCB Physical Address */
5732 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5733
5734 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5735 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5736 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5737
5738 ret = nested_svm_vmexit(svm);
5739 if (ret)
5740 return ret;
5741 }
0234bf88
LP
5742 return 0;
5743}
5744
5745static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
5746{
05cade71
LP
5747 struct vcpu_svm *svm = to_svm(vcpu);
5748 struct vmcb *nested_vmcb;
5749 struct page *page;
5750 struct {
5751 u64 guest;
5752 u64 vmcb;
5753 } svm_state_save;
5754 int ret;
5755
5756 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
5757 sizeof(svm_state_save));
5758 if (ret)
5759 return ret;
5760
5761 if (svm_state_save.guest) {
5762 vcpu->arch.hflags &= ~HF_SMM_MASK;
5763 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
5764 if (nested_vmcb)
5765 enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
5766 else
5767 ret = 1;
5768 vcpu->arch.hflags |= HF_SMM_MASK;
5769 }
5770 return ret;
0234bf88
LP
5771}
5772
cc3d967f
LP
5773static int enable_smi_window(struct kvm_vcpu *vcpu)
5774{
5775 struct vcpu_svm *svm = to_svm(vcpu);
5776
5777 if (!gif_set(svm)) {
5778 if (vgif_enabled(svm))
5779 set_intercept(svm, INTERCEPT_STGI);
5780 /* STGI will cause a vm exit */
5781 return 1;
5782 }
5783 return 0;
5784}
5785
1654efcb
BS
5786static int sev_asid_new(void)
5787{
5788 int pos;
5789
5790 /*
5791 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
5792 */
5793 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
5794 if (pos >= max_sev_asid)
5795 return -EBUSY;
5796
5797 set_bit(pos, sev_asid_bitmap);
5798 return pos + 1;
5799}
5800
5801static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
5802{
5803 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5804 int asid, ret;
5805
5806 ret = -EBUSY;
5807 asid = sev_asid_new();
5808 if (asid < 0)
5809 return ret;
5810
5811 ret = sev_platform_init(&argp->error);
5812 if (ret)
5813 goto e_free;
5814
5815 sev->active = true;
5816 sev->asid = asid;
5817
5818 return 0;
5819
5820e_free:
5821 __sev_asid_free(asid);
5822 return ret;
5823}
5824
59414c98
BS
5825static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
5826{
5827 struct sev_data_activate *data;
5828 int asid = sev_get_asid(kvm);
5829 int ret;
5830
5831 wbinvd_on_all_cpus();
5832
5833 ret = sev_guest_df_flush(error);
5834 if (ret)
5835 return ret;
5836
5837 data = kzalloc(sizeof(*data), GFP_KERNEL);
5838 if (!data)
5839 return -ENOMEM;
5840
5841 /* activate ASID on the given handle */
5842 data->handle = handle;
5843 data->asid = asid;
5844 ret = sev_guest_activate(data, error);
5845 kfree(data);
5846
5847 return ret;
5848}
5849
89c50580 5850static int __sev_issue_cmd(int fd, int id, void *data, int *error)
59414c98
BS
5851{
5852 struct fd f;
5853 int ret;
5854
5855 f = fdget(fd);
5856 if (!f.file)
5857 return -EBADF;
5858
5859 ret = sev_issue_cmd_external_user(f.file, id, data, error);
5860
5861 fdput(f);
5862 return ret;
5863}
5864
89c50580
BS
5865static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
5866{
5867 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5868
5869 return __sev_issue_cmd(sev->fd, id, data, error);
5870}
5871
59414c98
BS
5872static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
5873{
5874 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5875 struct sev_data_launch_start *start;
5876 struct kvm_sev_launch_start params;
5877 void *dh_blob, *session_blob;
5878 int *error = &argp->error;
5879 int ret;
5880
5881 if (!sev_guest(kvm))
5882 return -ENOTTY;
5883
5884 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
5885 return -EFAULT;
5886
5887 start = kzalloc(sizeof(*start), GFP_KERNEL);
5888 if (!start)
5889 return -ENOMEM;
5890
5891 dh_blob = NULL;
5892 if (params.dh_uaddr) {
5893 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
5894 if (IS_ERR(dh_blob)) {
5895 ret = PTR_ERR(dh_blob);
5896 goto e_free;
5897 }
5898
5899 start->dh_cert_address = __sme_set(__pa(dh_blob));
5900 start->dh_cert_len = params.dh_len;
5901 }
5902
5903 session_blob = NULL;
5904 if (params.session_uaddr) {
5905 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
5906 if (IS_ERR(session_blob)) {
5907 ret = PTR_ERR(session_blob);
5908 goto e_free_dh;
5909 }
5910
5911 start->session_address = __sme_set(__pa(session_blob));
5912 start->session_len = params.session_len;
5913 }
5914
5915 start->handle = params.handle;
5916 start->policy = params.policy;
5917
5918 /* create memory encryption context */
89c50580 5919 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
59414c98
BS
5920 if (ret)
5921 goto e_free_session;
5922
5923 /* Bind ASID to this guest */
5924 ret = sev_bind_asid(kvm, start->handle, error);
5925 if (ret)
5926 goto e_free_session;
5927
5928 /* return handle to userspace */
5929 params.handle = start->handle;
5930 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
5931 sev_unbind_asid(kvm, start->handle);
5932 ret = -EFAULT;
5933 goto e_free_session;
5934 }
5935
5936 sev->handle = start->handle;
5937 sev->fd = argp->sev_fd;
5938
5939e_free_session:
5940 kfree(session_blob);
5941e_free_dh:
5942 kfree(dh_blob);
5943e_free:
5944 kfree(start);
5945 return ret;
5946}
5947
89c50580
BS
5948static int get_num_contig_pages(int idx, struct page **inpages,
5949 unsigned long npages)
5950{
5951 unsigned long paddr, next_paddr;
5952 int i = idx + 1, pages = 1;
5953
5954 /* find the number of contiguous pages starting from idx */
5955 paddr = __sme_page_pa(inpages[idx]);
5956 while (i < npages) {
5957 next_paddr = __sme_page_pa(inpages[i++]);
5958 if ((paddr + PAGE_SIZE) == next_paddr) {
5959 pages++;
5960 paddr = next_paddr;
5961 continue;
5962 }
5963 break;
5964 }
5965
5966 return pages;
5967}
5968
5969static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
5970{
5971 unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
5972 struct kvm_sev_info *sev = &kvm->arch.sev_info;
5973 struct kvm_sev_launch_update_data params;
5974 struct sev_data_launch_update_data *data;
5975 struct page **inpages;
5976 int i, ret, pages;
5977
5978 if (!sev_guest(kvm))
5979 return -ENOTTY;
5980
5981 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
5982 return -EFAULT;
5983
5984 data = kzalloc(sizeof(*data), GFP_KERNEL);
5985 if (!data)
5986 return -ENOMEM;
5987
5988 vaddr = params.uaddr;
5989 size = params.len;
5990 vaddr_end = vaddr + size;
5991
5992 /* Lock the user memory. */
5993 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
5994 if (!inpages) {
5995 ret = -ENOMEM;
5996 goto e_free;
5997 }
5998
5999 /*
6000 * The LAUNCH_UPDATE command will perform in-place encryption of the
6001 * memory content (i.e it will write the same memory region with C=1).
6002 * It's possible that the cache may contain the data with C=0, i.e.,
6003 * unencrypted so invalidate it first.
6004 */
6005 sev_clflush_pages(inpages, npages);
6006
6007 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6008 int offset, len;
6009
6010 /*
6011 * If the user buffer is not page-aligned, calculate the offset
6012 * within the page.
6013 */
6014 offset = vaddr & (PAGE_SIZE - 1);
6015
6016 /* Calculate the number of pages that can be encrypted in one go. */
6017 pages = get_num_contig_pages(i, inpages, npages);
6018
6019 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6020
6021 data->handle = sev->handle;
6022 data->len = len;
6023 data->address = __sme_page_pa(inpages[i]) + offset;
6024 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6025 if (ret)
6026 goto e_unpin;
6027
6028 size -= len;
6029 next_vaddr = vaddr + len;
6030 }
6031
6032e_unpin:
6033 /* content of memory is updated, mark pages dirty */
6034 for (i = 0; i < npages; i++) {
6035 set_page_dirty_lock(inpages[i]);
6036 mark_page_accessed(inpages[i]);
6037 }
6038 /* unlock the user pages */
6039 sev_unpin_memory(kvm, inpages, npages);
6040e_free:
6041 kfree(data);
6042 return ret;
6043}
6044
0d0736f7
BS
6045static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6046{
6047 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6048 struct sev_data_launch_measure *data;
6049 struct kvm_sev_launch_measure params;
6050 void *blob = NULL;
6051 int ret;
6052
6053 if (!sev_guest(kvm))
6054 return -ENOTTY;
6055
6056 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6057 return -EFAULT;
6058
6059 data = kzalloc(sizeof(*data), GFP_KERNEL);
6060 if (!data)
6061 return -ENOMEM;
6062
6063 /* User wants to query the blob length */
6064 if (!params.len)
6065 goto cmd;
6066
6067 if (params.uaddr) {
6068 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6069 ret = -EINVAL;
6070 goto e_free;
6071 }
6072
6073 if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
6074 ret = -EFAULT;
6075 goto e_free;
6076 }
6077
6078 ret = -ENOMEM;
6079 blob = kmalloc(params.len, GFP_KERNEL);
6080 if (!blob)
6081 goto e_free;
6082
6083 data->address = __psp_pa(blob);
6084 data->len = params.len;
6085 }
6086
6087cmd:
6088 data->handle = sev->handle;
6089 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6090
6091 /*
6092 * If we query the session length, FW responded with expected data.
6093 */
6094 if (!params.len)
6095 goto done;
6096
6097 if (ret)
6098 goto e_free_blob;
6099
6100 if (blob) {
6101 if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
6102 ret = -EFAULT;
6103 }
6104
6105done:
6106 params.len = data->len;
6107 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6108 ret = -EFAULT;
6109e_free_blob:
6110 kfree(blob);
6111e_free:
6112 kfree(data);
6113 return ret;
6114}
6115
5bdb0e2f
BS
6116static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6117{
6118 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6119 struct sev_data_launch_finish *data;
6120 int ret;
6121
6122 if (!sev_guest(kvm))
6123 return -ENOTTY;
6124
6125 data = kzalloc(sizeof(*data), GFP_KERNEL);
6126 if (!data)
6127 return -ENOMEM;
6128
6129 data->handle = sev->handle;
6130 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6131
6132 kfree(data);
6133 return ret;
6134}
6135
255d9e75
BS
6136static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6137{
6138 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6139 struct kvm_sev_guest_status params;
6140 struct sev_data_guest_status *data;
6141 int ret;
6142
6143 if (!sev_guest(kvm))
6144 return -ENOTTY;
6145
6146 data = kzalloc(sizeof(*data), GFP_KERNEL);
6147 if (!data)
6148 return -ENOMEM;
6149
6150 data->handle = sev->handle;
6151 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6152 if (ret)
6153 goto e_free;
6154
6155 params.policy = data->policy;
6156 params.state = data->state;
6157 params.handle = data->handle;
6158
6159 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6160 ret = -EFAULT;
6161e_free:
6162 kfree(data);
6163 return ret;
6164}
6165
24f41fb2
BS
6166static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6167 unsigned long dst, int size,
6168 int *error, bool enc)
6169{
6170 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6171 struct sev_data_dbg *data;
6172 int ret;
6173
6174 data = kzalloc(sizeof(*data), GFP_KERNEL);
6175 if (!data)
6176 return -ENOMEM;
6177
6178 data->handle = sev->handle;
6179 data->dst_addr = dst;
6180 data->src_addr = src;
6181 data->len = size;
6182
6183 ret = sev_issue_cmd(kvm,
6184 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6185 data, error);
6186 kfree(data);
6187 return ret;
6188}
6189
6190static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6191 unsigned long dst_paddr, int sz, int *err)
6192{
6193 int offset;
6194
6195 /*
6196 * Its safe to read more than we are asked, caller should ensure that
6197 * destination has enough space.
6198 */
6199 src_paddr = round_down(src_paddr, 16);
6200 offset = src_paddr & 15;
6201 sz = round_up(sz + offset, 16);
6202
6203 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6204}
6205
6206static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6207 unsigned long __user dst_uaddr,
6208 unsigned long dst_paddr,
6209 int size, int *err)
6210{
6211 struct page *tpage = NULL;
6212 int ret, offset;
6213
6214 /* if inputs are not 16-byte then use intermediate buffer */
6215 if (!IS_ALIGNED(dst_paddr, 16) ||
6216 !IS_ALIGNED(paddr, 16) ||
6217 !IS_ALIGNED(size, 16)) {
6218 tpage = (void *)alloc_page(GFP_KERNEL);
6219 if (!tpage)
6220 return -ENOMEM;
6221
6222 dst_paddr = __sme_page_pa(tpage);
6223 }
6224
6225 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6226 if (ret)
6227 goto e_free;
6228
6229 if (tpage) {
6230 offset = paddr & 15;
6231 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6232 page_address(tpage) + offset, size))
6233 ret = -EFAULT;
6234 }
6235
6236e_free:
6237 if (tpage)
6238 __free_page(tpage);
6239
6240 return ret;
6241}
6242
7d1594f5
BS
6243static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6244 unsigned long __user vaddr,
6245 unsigned long dst_paddr,
6246 unsigned long __user dst_vaddr,
6247 int size, int *error)
6248{
6249 struct page *src_tpage = NULL;
6250 struct page *dst_tpage = NULL;
6251 int ret, len = size;
6252
6253 /* If source buffer is not aligned then use an intermediate buffer */
6254 if (!IS_ALIGNED(vaddr, 16)) {
6255 src_tpage = alloc_page(GFP_KERNEL);
6256 if (!src_tpage)
6257 return -ENOMEM;
6258
6259 if (copy_from_user(page_address(src_tpage),
6260 (void __user *)(uintptr_t)vaddr, size)) {
6261 __free_page(src_tpage);
6262 return -EFAULT;
6263 }
6264
6265 paddr = __sme_page_pa(src_tpage);
6266 }
6267
6268 /*
6269 * If destination buffer or length is not aligned then do read-modify-write:
6270 * - decrypt destination in an intermediate buffer
6271 * - copy the source buffer in an intermediate buffer
6272 * - use the intermediate buffer as source buffer
6273 */
6274 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6275 int dst_offset;
6276
6277 dst_tpage = alloc_page(GFP_KERNEL);
6278 if (!dst_tpage) {
6279 ret = -ENOMEM;
6280 goto e_free;
6281 }
6282
6283 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6284 __sme_page_pa(dst_tpage), size, error);
6285 if (ret)
6286 goto e_free;
6287
6288 /*
6289 * If source is kernel buffer then use memcpy() otherwise
6290 * copy_from_user().
6291 */
6292 dst_offset = dst_paddr & 15;
6293
6294 if (src_tpage)
6295 memcpy(page_address(dst_tpage) + dst_offset,
6296 page_address(src_tpage), size);
6297 else {
6298 if (copy_from_user(page_address(dst_tpage) + dst_offset,
6299 (void __user *)(uintptr_t)vaddr, size)) {
6300 ret = -EFAULT;
6301 goto e_free;
6302 }
6303 }
6304
6305 paddr = __sme_page_pa(dst_tpage);
6306 dst_paddr = round_down(dst_paddr, 16);
6307 len = round_up(size, 16);
6308 }
6309
6310 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6311
6312e_free:
6313 if (src_tpage)
6314 __free_page(src_tpage);
6315 if (dst_tpage)
6316 __free_page(dst_tpage);
6317 return ret;
6318}
6319
24f41fb2
BS
6320static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6321{
6322 unsigned long vaddr, vaddr_end, next_vaddr;
6323 unsigned long dst_vaddr, dst_vaddr_end;
6324 struct page **src_p, **dst_p;
6325 struct kvm_sev_dbg debug;
6326 unsigned long n;
6327 int ret, size;
6328
6329 if (!sev_guest(kvm))
6330 return -ENOTTY;
6331
6332 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6333 return -EFAULT;
6334
6335 vaddr = debug.src_uaddr;
6336 size = debug.len;
6337 vaddr_end = vaddr + size;
6338 dst_vaddr = debug.dst_uaddr;
6339 dst_vaddr_end = dst_vaddr + size;
6340
6341 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6342 int len, s_off, d_off;
6343
6344 /* lock userspace source and destination page */
6345 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6346 if (!src_p)
6347 return -EFAULT;
6348
6349 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6350 if (!dst_p) {
6351 sev_unpin_memory(kvm, src_p, n);
6352 return -EFAULT;
6353 }
6354
6355 /*
6356 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6357 * memory content (i.e it will write the same memory region with C=1).
6358 * It's possible that the cache may contain the data with C=0, i.e.,
6359 * unencrypted so invalidate it first.
6360 */
6361 sev_clflush_pages(src_p, 1);
6362 sev_clflush_pages(dst_p, 1);
6363
6364 /*
6365 * Since user buffer may not be page aligned, calculate the
6366 * offset within the page.
6367 */
6368 s_off = vaddr & ~PAGE_MASK;
6369 d_off = dst_vaddr & ~PAGE_MASK;
6370 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6371
7d1594f5
BS
6372 if (dec)
6373 ret = __sev_dbg_decrypt_user(kvm,
6374 __sme_page_pa(src_p[0]) + s_off,
6375 dst_vaddr,
6376 __sme_page_pa(dst_p[0]) + d_off,
6377 len, &argp->error);
6378 else
6379 ret = __sev_dbg_encrypt_user(kvm,
6380 __sme_page_pa(src_p[0]) + s_off,
6381 vaddr,
6382 __sme_page_pa(dst_p[0]) + d_off,
6383 dst_vaddr,
6384 len, &argp->error);
24f41fb2
BS
6385
6386 sev_unpin_memory(kvm, src_p, 1);
6387 sev_unpin_memory(kvm, dst_p, 1);
6388
6389 if (ret)
6390 goto err;
6391
6392 next_vaddr = vaddr + len;
6393 dst_vaddr = dst_vaddr + len;
6394 size -= len;
6395 }
6396err:
6397 return ret;
6398}
6399
9f5b5b95
BS
6400static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6401{
6402 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6403 struct sev_data_launch_secret *data;
6404 struct kvm_sev_launch_secret params;
6405 struct page **pages;
6406 void *blob, *hdr;
6407 unsigned long n;
6408 int ret;
6409
6410 if (!sev_guest(kvm))
6411 return -ENOTTY;
6412
6413 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6414 return -EFAULT;
6415
6416 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6417 if (!pages)
6418 return -ENOMEM;
6419
6420 /*
6421 * The secret must be copied into contiguous memory region, lets verify
6422 * that userspace memory pages are contiguous before we issue command.
6423 */
6424 if (get_num_contig_pages(0, pages, n) != n) {
6425 ret = -EINVAL;
6426 goto e_unpin_memory;
6427 }
6428
6429 ret = -ENOMEM;
6430 data = kzalloc(sizeof(*data), GFP_KERNEL);
6431 if (!data)
6432 goto e_unpin_memory;
6433
6434 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6435 if (IS_ERR(blob)) {
6436 ret = PTR_ERR(blob);
6437 goto e_free;
6438 }
6439
6440 data->trans_address = __psp_pa(blob);
6441 data->trans_len = params.trans_len;
6442
6443 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6444 if (IS_ERR(hdr)) {
6445 ret = PTR_ERR(hdr);
6446 goto e_free_blob;
6447 }
6448 data->trans_address = __psp_pa(blob);
6449 data->trans_len = params.trans_len;
6450
6451 data->handle = sev->handle;
6452 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6453
6454 kfree(hdr);
6455
6456e_free_blob:
6457 kfree(blob);
6458e_free:
6459 kfree(data);
6460e_unpin_memory:
6461 sev_unpin_memory(kvm, pages, n);
6462 return ret;
6463}
6464
1654efcb
BS
6465static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6466{
6467 struct kvm_sev_cmd sev_cmd;
6468 int r;
6469
6470 if (!svm_sev_enabled())
6471 return -ENOTTY;
6472
6473 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6474 return -EFAULT;
6475
6476 mutex_lock(&kvm->lock);
6477
6478 switch (sev_cmd.id) {
6479 case KVM_SEV_INIT:
6480 r = sev_guest_init(kvm, &sev_cmd);
6481 break;
59414c98
BS
6482 case KVM_SEV_LAUNCH_START:
6483 r = sev_launch_start(kvm, &sev_cmd);
6484 break;
89c50580
BS
6485 case KVM_SEV_LAUNCH_UPDATE_DATA:
6486 r = sev_launch_update_data(kvm, &sev_cmd);
6487 break;
0d0736f7
BS
6488 case KVM_SEV_LAUNCH_MEASURE:
6489 r = sev_launch_measure(kvm, &sev_cmd);
6490 break;
5bdb0e2f
BS
6491 case KVM_SEV_LAUNCH_FINISH:
6492 r = sev_launch_finish(kvm, &sev_cmd);
6493 break;
255d9e75
BS
6494 case KVM_SEV_GUEST_STATUS:
6495 r = sev_guest_status(kvm, &sev_cmd);
6496 break;
24f41fb2
BS
6497 case KVM_SEV_DBG_DECRYPT:
6498 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6499 break;
7d1594f5
BS
6500 case KVM_SEV_DBG_ENCRYPT:
6501 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6502 break;
9f5b5b95
BS
6503 case KVM_SEV_LAUNCH_SECRET:
6504 r = sev_launch_secret(kvm, &sev_cmd);
6505 break;
1654efcb
BS
6506 default:
6507 r = -EINVAL;
6508 goto out;
6509 }
6510
6511 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
6512 r = -EFAULT;
6513
6514out:
6515 mutex_unlock(&kvm->lock);
6516 return r;
6517}
6518
404f6aac 6519static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
6aa8b732
AK
6520 .cpu_has_kvm_support = has_svm,
6521 .disabled_by_bios = is_disabled,
6522 .hardware_setup = svm_hardware_setup,
6523 .hardware_unsetup = svm_hardware_unsetup,
002c7f7c 6524 .check_processor_compatibility = svm_check_processor_compat,
6aa8b732
AK
6525 .hardware_enable = svm_hardware_enable,
6526 .hardware_disable = svm_hardware_disable,
774ead3a 6527 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
6d396b55 6528 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
6aa8b732
AK
6529
6530 .vcpu_create = svm_create_vcpu,
6531 .vcpu_free = svm_free_vcpu,
04d2cc77 6532 .vcpu_reset = svm_vcpu_reset,
6aa8b732 6533
44a95dae 6534 .vm_init = avic_vm_init,
1654efcb 6535 .vm_destroy = svm_vm_destroy,
44a95dae 6536
04d2cc77 6537 .prepare_guest_switch = svm_prepare_guest_switch,
6aa8b732
AK
6538 .vcpu_load = svm_vcpu_load,
6539 .vcpu_put = svm_vcpu_put,
8221c137
SS
6540 .vcpu_blocking = svm_vcpu_blocking,
6541 .vcpu_unblocking = svm_vcpu_unblocking,
6aa8b732 6542
a96036b8 6543 .update_bp_intercept = update_bp_intercept,
6aa8b732
AK
6544 .get_msr = svm_get_msr,
6545 .set_msr = svm_set_msr,
6546 .get_segment_base = svm_get_segment_base,
6547 .get_segment = svm_get_segment,
6548 .set_segment = svm_set_segment,
2e4d2653 6549 .get_cpl = svm_get_cpl,
1747fb71 6550 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
e8467fda 6551 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
aff48baa 6552 .decache_cr3 = svm_decache_cr3,
25c4c276 6553 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
6aa8b732 6554 .set_cr0 = svm_set_cr0,
6aa8b732
AK
6555 .set_cr3 = svm_set_cr3,
6556 .set_cr4 = svm_set_cr4,
6557 .set_efer = svm_set_efer,
6558 .get_idt = svm_get_idt,
6559 .set_idt = svm_set_idt,
6560 .get_gdt = svm_get_gdt,
6561 .set_gdt = svm_set_gdt,
73aaf249
JK
6562 .get_dr6 = svm_get_dr6,
6563 .set_dr6 = svm_set_dr6,
020df079 6564 .set_dr7 = svm_set_dr7,
facb0139 6565 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
6de4f3ad 6566 .cache_reg = svm_cache_reg,
6aa8b732
AK
6567 .get_rflags = svm_get_rflags,
6568 .set_rflags = svm_set_rflags,
be94f6b7 6569
6aa8b732 6570 .tlb_flush = svm_flush_tlb,
6aa8b732 6571
6aa8b732 6572 .run = svm_vcpu_run,
04d2cc77 6573 .handle_exit = handle_exit,
6aa8b732 6574 .skip_emulated_instruction = skip_emulated_instruction,
2809f5d2
GC
6575 .set_interrupt_shadow = svm_set_interrupt_shadow,
6576 .get_interrupt_shadow = svm_get_interrupt_shadow,
102d8325 6577 .patch_hypercall = svm_patch_hypercall,
2a8067f1 6578 .set_irq = svm_set_irq,
95ba8273 6579 .set_nmi = svm_inject_nmi,
298101da 6580 .queue_exception = svm_queue_exception,
b463a6f7 6581 .cancel_injection = svm_cancel_injection,
78646121 6582 .interrupt_allowed = svm_interrupt_allowed,
95ba8273 6583 .nmi_allowed = svm_nmi_allowed,
3cfc3092
JK
6584 .get_nmi_mask = svm_get_nmi_mask,
6585 .set_nmi_mask = svm_set_nmi_mask,
95ba8273
GN
6586 .enable_nmi_window = enable_nmi_window,
6587 .enable_irq_window = enable_irq_window,
6588 .update_cr8_intercept = update_cr8_intercept,
8d14695f 6589 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
d62caabb
AS
6590 .get_enable_apicv = svm_get_enable_apicv,
6591 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
c7c9c56c 6592 .load_eoi_exitmap = svm_load_eoi_exitmap,
44a95dae
SS
6593 .hwapic_irr_update = svm_hwapic_irr_update,
6594 .hwapic_isr_update = svm_hwapic_isr_update,
be8ca170 6595 .apicv_post_state_restore = avic_post_state_restore,
cbc94022
IE
6596
6597 .set_tss_addr = svm_set_tss_addr,
67253af5 6598 .get_tdp_level = get_npt_level,
4b12f0de 6599 .get_mt_mask = svm_get_mt_mask,
229456fc 6600
586f9607 6601 .get_exit_info = svm_get_exit_info,
586f9607 6602
17cc3935 6603 .get_lpage_level = svm_get_lpage_level,
0e851880
SY
6604
6605 .cpuid_update = svm_cpuid_update,
4e47c7a6
SY
6606
6607 .rdtscp_supported = svm_rdtscp_supported,
ad756a16 6608 .invpcid_supported = svm_invpcid_supported,
93c4adc7 6609 .mpx_supported = svm_mpx_supported,
55412b2e 6610 .xsaves_supported = svm_xsaves_supported,
d4330ef2
JR
6611
6612 .set_supported_cpuid = svm_set_supported_cpuid,
f5f48ee1
SY
6613
6614 .has_wbinvd_exit = svm_has_wbinvd_exit,
99e3e30a
ZA
6615
6616 .write_tsc_offset = svm_write_tsc_offset,
1c97f0a0
JR
6617
6618 .set_tdp_cr3 = set_tdp_cr3,
8a76d7f2
JR
6619
6620 .check_intercept = svm_check_intercept,
a547c6db 6621 .handle_external_intr = svm_handle_external_intr,
ae97a3b8
RK
6622
6623 .sched_in = svm_sched_in,
25462f7f
WH
6624
6625 .pmu_ops = &amd_pmu_ops,
340d3bc3 6626 .deliver_posted_interrupt = svm_deliver_avic_intr,
411b44ba 6627 .update_pi_irte = svm_update_pi_irte,
74f16909 6628 .setup_mce = svm_setup_mce,
0234bf88 6629
72d7b374 6630 .smi_allowed = svm_smi_allowed,
0234bf88
LP
6631 .pre_enter_smm = svm_pre_enter_smm,
6632 .pre_leave_smm = svm_pre_leave_smm,
cc3d967f 6633 .enable_smi_window = enable_smi_window,
1654efcb
BS
6634
6635 .mem_enc_op = svm_mem_enc_op,
6aa8b732
AK
6636};
6637
6638static int __init svm_init(void)
6639{
cb498ea2 6640 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
0ee75bea 6641 __alignof__(struct vcpu_svm), THIS_MODULE);
6aa8b732
AK
6642}
6643
6644static void __exit svm_exit(void)
6645{
cb498ea2 6646 kvm_exit();
6aa8b732
AK
6647}
6648
6649module_init(svm_init)
6650module_exit(svm_exit)