KVM: SVM: ensure that EFER.SVME is set when running nested guest or on nested vmexit
[linux-block.git] / arch / x86 / kvm / svm / nested.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "lapic.h"
30 #include "svm.h"
31
32 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
33                                        struct x86_exception *fault)
34 {
35         struct vcpu_svm *svm = to_svm(vcpu);
36
37         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
38                 /*
39                  * TODO: track the cause of the nested page fault, and
40                  * correctly fill in the high bits of exit_info_1.
41                  */
42                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43                 svm->vmcb->control.exit_code_hi = 0;
44                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
45                 svm->vmcb->control.exit_info_2 = fault->address;
46         }
47
48         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49         svm->vmcb->control.exit_info_1 |= fault->error_code;
50
51         nested_svm_vmexit(svm);
52 }
53
54 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
55 {
56         struct vcpu_svm *svm = to_svm(vcpu);
57         u64 cr3 = svm->nested.ctl.nested_cr3;
58         u64 pdpte;
59         int ret;
60
61         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
62                                        offset_in_page(cr3) + index * 8, 8);
63         if (ret)
64                 return 0;
65         return pdpte;
66 }
67
68 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
69 {
70         struct vcpu_svm *svm = to_svm(vcpu);
71
72         return svm->nested.ctl.nested_cr3;
73 }
74
75 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
76 {
77         struct vcpu_svm *svm = to_svm(vcpu);
78         struct vmcb *hsave = svm->nested.hsave;
79
80         WARN_ON(mmu_is_nested(vcpu));
81
82         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
83         kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
84                                 svm->nested.ctl.nested_cr3);
85         vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
86         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
87         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
88         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
89         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
90 }
91
92 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
93 {
94         vcpu->arch.mmu = &vcpu->arch.root_mmu;
95         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
96 }
97
98 void recalc_intercepts(struct vcpu_svm *svm)
99 {
100         struct vmcb_control_area *c, *h, *g;
101         unsigned int i;
102
103         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
104
105         if (!is_guest_mode(&svm->vcpu))
106                 return;
107
108         c = &svm->vmcb->control;
109         h = &svm->nested.hsave->control;
110         g = &svm->nested.ctl;
111
112         for (i = 0; i < MAX_INTERCEPT; i++)
113                 c->intercepts[i] = h->intercepts[i];
114
115         if (g->int_ctl & V_INTR_MASKING_MASK) {
116                 /* We only want the cr8 intercept bits of L1 */
117                 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
118                 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
119
120                 /*
121                  * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
122                  * affect any interrupt we may want to inject; therefore,
123                  * interrupt window vmexits are irrelevant to L0.
124                  */
125                 vmcb_clr_intercept(c, INTERCEPT_VINTR);
126         }
127
128         /* We don't want to see VMMCALLs from a nested guest */
129         vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
130
131         for (i = 0; i < MAX_INTERCEPT; i++)
132                 c->intercepts[i] |= g->intercepts[i];
133 }
134
135 static void copy_vmcb_control_area(struct vmcb_control_area *dst,
136                                    struct vmcb_control_area *from)
137 {
138         unsigned int i;
139
140         for (i = 0; i < MAX_INTERCEPT; i++)
141                 dst->intercepts[i] = from->intercepts[i];
142
143         dst->iopm_base_pa         = from->iopm_base_pa;
144         dst->msrpm_base_pa        = from->msrpm_base_pa;
145         dst->tsc_offset           = from->tsc_offset;
146         /* asid not copied, it is handled manually for svm->vmcb.  */
147         dst->tlb_ctl              = from->tlb_ctl;
148         dst->int_ctl              = from->int_ctl;
149         dst->int_vector           = from->int_vector;
150         dst->int_state            = from->int_state;
151         dst->exit_code            = from->exit_code;
152         dst->exit_code_hi         = from->exit_code_hi;
153         dst->exit_info_1          = from->exit_info_1;
154         dst->exit_info_2          = from->exit_info_2;
155         dst->exit_int_info        = from->exit_int_info;
156         dst->exit_int_info_err    = from->exit_int_info_err;
157         dst->nested_ctl           = from->nested_ctl;
158         dst->event_inj            = from->event_inj;
159         dst->event_inj_err        = from->event_inj_err;
160         dst->nested_cr3           = from->nested_cr3;
161         dst->virt_ext              = from->virt_ext;
162         dst->pause_filter_count   = from->pause_filter_count;
163         dst->pause_filter_thresh  = from->pause_filter_thresh;
164 }
165
166 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
167 {
168         /*
169          * This function merges the msr permission bitmaps of kvm and the
170          * nested vmcb. It is optimized in that it only merges the parts where
171          * the kvm msr permission bitmap may contain zero bits
172          */
173         int i;
174
175         if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
176                 return true;
177
178         for (i = 0; i < MSRPM_OFFSETS; i++) {
179                 u32 value, p;
180                 u64 offset;
181
182                 if (msrpm_offsets[i] == 0xffffffff)
183                         break;
184
185                 p      = msrpm_offsets[i];
186                 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
187
188                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
189                         return false;
190
191                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
192         }
193
194         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
195
196         return true;
197 }
198
199 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
200 {
201         struct vcpu_svm *svm = to_svm(vcpu);
202         if (!nested_svm_vmrun_msrpm(svm)) {
203                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
204                 vcpu->run->internal.suberror =
205                         KVM_INTERNAL_ERROR_EMULATION;
206                 vcpu->run->internal.ndata = 0;
207                 return false;
208         }
209
210         return true;
211 }
212
213 static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
214 {
215         if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
216                 return false;
217
218         if (control->asid == 0)
219                 return false;
220
221         if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
222             !npt_enabled)
223                 return false;
224
225         return true;
226 }
227
228 static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
229 {
230         bool vmcb12_lma;
231
232         /*
233          * FIXME: these should be done after copying the fields,
234          * to avoid TOC/TOU races.  For these save area checks
235          * the possible damage is limited since kvm_set_cr0 and
236          * kvm_set_cr4 handle failure; EFER_SVME is an exception
237          * so it is force-set later in nested_prepare_vmcb_save.
238          */
239         if ((vmcb12->save.efer & EFER_SVME) == 0)
240                 return false;
241
242         if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
243                 return false;
244
245         if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
246                 return false;
247
248         vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
249
250         if (!vmcb12_lma) {
251                 if (vmcb12->save.cr4 & X86_CR4_PAE) {
252                         if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
253                                 return false;
254                 } else {
255                         if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
256                                 return false;
257                 }
258         } else {
259                 if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
260                     !(vmcb12->save.cr0 & X86_CR0_PE) ||
261                     (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
262                         return false;
263         }
264         if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
265                 return false;
266
267         return true;
268 }
269
270 static void load_nested_vmcb_control(struct vcpu_svm *svm,
271                                      struct vmcb_control_area *control)
272 {
273         copy_vmcb_control_area(&svm->nested.ctl, control);
274
275         /* Copy it here because nested_svm_check_controls will check it.  */
276         svm->nested.ctl.asid           = control->asid;
277         svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
278         svm->nested.ctl.iopm_base_pa  &= ~0x0fffULL;
279 }
280
281 /*
282  * Synchronize fields that are written by the processor, so that
283  * they can be copied back into the nested_vmcb.
284  */
285 void sync_nested_vmcb_control(struct vcpu_svm *svm)
286 {
287         u32 mask;
288         svm->nested.ctl.event_inj      = svm->vmcb->control.event_inj;
289         svm->nested.ctl.event_inj_err  = svm->vmcb->control.event_inj_err;
290
291         /* Only a few fields of int_ctl are written by the processor.  */
292         mask = V_IRQ_MASK | V_TPR_MASK;
293         if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
294             svm_is_intercept(svm, INTERCEPT_VINTR)) {
295                 /*
296                  * In order to request an interrupt window, L0 is usurping
297                  * svm->vmcb->control.int_ctl and possibly setting V_IRQ
298                  * even if it was clear in L1's VMCB.  Restoring it would be
299                  * wrong.  However, in this case V_IRQ will remain true until
300                  * interrupt_window_interception calls svm_clear_vintr and
301                  * restores int_ctl.  We can just leave it aside.
302                  */
303                 mask &= ~V_IRQ_MASK;
304         }
305         svm->nested.ctl.int_ctl        &= ~mask;
306         svm->nested.ctl.int_ctl        |= svm->vmcb->control.int_ctl & mask;
307 }
308
309 /*
310  * Transfer any event that L0 or L1 wanted to inject into L2 to
311  * EXIT_INT_INFO.
312  */
313 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
314                                            struct vmcb *vmcb12)
315 {
316         struct kvm_vcpu *vcpu = &svm->vcpu;
317         u32 exit_int_info = 0;
318         unsigned int nr;
319
320         if (vcpu->arch.exception.injected) {
321                 nr = vcpu->arch.exception.nr;
322                 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
323
324                 if (vcpu->arch.exception.has_error_code) {
325                         exit_int_info |= SVM_EVTINJ_VALID_ERR;
326                         vmcb12->control.exit_int_info_err =
327                                 vcpu->arch.exception.error_code;
328                 }
329
330         } else if (vcpu->arch.nmi_injected) {
331                 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
332
333         } else if (vcpu->arch.interrupt.injected) {
334                 nr = vcpu->arch.interrupt.nr;
335                 exit_int_info = nr | SVM_EVTINJ_VALID;
336
337                 if (vcpu->arch.interrupt.soft)
338                         exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
339                 else
340                         exit_int_info |= SVM_EVTINJ_TYPE_INTR;
341         }
342
343         vmcb12->control.exit_int_info = exit_int_info;
344 }
345
346 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
347 {
348         return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
349 }
350
351 /*
352  * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
353  * if we are emulating VM-Entry into a guest with NPT enabled.
354  */
355 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
356                                bool nested_npt)
357 {
358         if (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))
359                 return -EINVAL;
360
361         if (!nested_npt && is_pae_paging(vcpu) &&
362             (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
363                 if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
364                         return -EINVAL;
365         }
366
367         /*
368          * TODO: optimize unconditional TLB flush/MMU sync here and in
369          * kvm_init_shadow_npt_mmu().
370          */
371         if (!nested_npt)
372                 kvm_mmu_new_pgd(vcpu, cr3, false, false);
373
374         vcpu->arch.cr3 = cr3;
375         kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
376
377         kvm_init_mmu(vcpu, false);
378
379         return 0;
380 }
381
382 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
383 {
384         /* Load the nested guest state */
385         svm->vmcb->save.es = vmcb12->save.es;
386         svm->vmcb->save.cs = vmcb12->save.cs;
387         svm->vmcb->save.ss = vmcb12->save.ss;
388         svm->vmcb->save.ds = vmcb12->save.ds;
389         svm->vmcb->save.gdtr = vmcb12->save.gdtr;
390         svm->vmcb->save.idtr = vmcb12->save.idtr;
391         kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
392
393         /*
394          * Force-set EFER_SVME even though it is checked earlier on the
395          * VMCB12, because the guest can flip the bit between the check
396          * and now.  Clearing EFER_SVME would call svm_free_nested.
397          */
398         svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
399
400         svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
401         svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
402         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
403         kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
404         kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
405         kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
406
407         /* In case we don't even reach vcpu_run, the fields are not updated */
408         svm->vmcb->save.rax = vmcb12->save.rax;
409         svm->vmcb->save.rsp = vmcb12->save.rsp;
410         svm->vmcb->save.rip = vmcb12->save.rip;
411         svm->vmcb->save.dr7 = vmcb12->save.dr7;
412         svm->vcpu.arch.dr6  = vmcb12->save.dr6;
413         svm->vmcb->save.cpl = vmcb12->save.cpl;
414 }
415
416 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
417 {
418         const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
419
420         if (nested_npt_enabled(svm))
421                 nested_svm_init_mmu_context(&svm->vcpu);
422
423         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
424                 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
425
426         svm->vmcb->control.int_ctl             =
427                 (svm->nested.ctl.int_ctl & ~mask) |
428                 (svm->nested.hsave->control.int_ctl & mask);
429
430         svm->vmcb->control.virt_ext            = svm->nested.ctl.virt_ext;
431         svm->vmcb->control.int_vector          = svm->nested.ctl.int_vector;
432         svm->vmcb->control.int_state           = svm->nested.ctl.int_state;
433         svm->vmcb->control.event_inj           = svm->nested.ctl.event_inj;
434         svm->vmcb->control.event_inj_err       = svm->nested.ctl.event_inj_err;
435
436         svm->vmcb->control.pause_filter_count  = svm->nested.ctl.pause_filter_count;
437         svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
438
439         /* Enter Guest-Mode */
440         enter_guest_mode(&svm->vcpu);
441
442         /*
443          * Merge guest and host intercepts - must be called  with vcpu in
444          * guest-mode to take affect here
445          */
446         recalc_intercepts(svm);
447
448         vmcb_mark_all_dirty(svm->vmcb);
449 }
450
451 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
452                          struct vmcb *vmcb12)
453 {
454         int ret;
455
456         svm->nested.vmcb12_gpa = vmcb12_gpa;
457         nested_prepare_vmcb_save(svm, vmcb12);
458         nested_prepare_vmcb_control(svm);
459
460         ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
461                                   nested_npt_enabled(svm));
462         if (ret)
463                 return ret;
464
465         svm_set_gif(svm, true);
466
467         return 0;
468 }
469
470 int nested_svm_vmrun(struct vcpu_svm *svm)
471 {
472         int ret;
473         struct vmcb *vmcb12;
474         struct vmcb *hsave = svm->nested.hsave;
475         struct vmcb *vmcb = svm->vmcb;
476         struct kvm_host_map map;
477         u64 vmcb12_gpa;
478
479         if (is_smm(&svm->vcpu)) {
480                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
481                 return 1;
482         }
483
484         vmcb12_gpa = svm->vmcb->save.rax;
485         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
486         if (ret == -EINVAL) {
487                 kvm_inject_gp(&svm->vcpu, 0);
488                 return 1;
489         } else if (ret) {
490                 return kvm_skip_emulated_instruction(&svm->vcpu);
491         }
492
493         ret = kvm_skip_emulated_instruction(&svm->vcpu);
494
495         vmcb12 = map.hva;
496
497         if (WARN_ON_ONCE(!svm->nested.initialized))
498                 return -EINVAL;
499
500         load_nested_vmcb_control(svm, &vmcb12->control);
501
502         if (!nested_vmcb_check_save(svm, vmcb12) ||
503             !nested_vmcb_check_controls(&svm->nested.ctl)) {
504                 vmcb12->control.exit_code    = SVM_EXIT_ERR;
505                 vmcb12->control.exit_code_hi = 0;
506                 vmcb12->control.exit_info_1  = 0;
507                 vmcb12->control.exit_info_2  = 0;
508                 goto out;
509         }
510
511         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
512                                vmcb12->save.rip,
513                                vmcb12->control.int_ctl,
514                                vmcb12->control.event_inj,
515                                vmcb12->control.nested_ctl);
516
517         trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
518                                     vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
519                                     vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
520                                     vmcb12->control.intercepts[INTERCEPT_WORD3],
521                                     vmcb12->control.intercepts[INTERCEPT_WORD4],
522                                     vmcb12->control.intercepts[INTERCEPT_WORD5]);
523
524         /* Clear internal status */
525         kvm_clear_exception_queue(&svm->vcpu);
526         kvm_clear_interrupt_queue(&svm->vcpu);
527
528         /*
529          * Save the old vmcb, so we don't need to pick what we save, but can
530          * restore everything when a VMEXIT occurs
531          */
532         hsave->save.es     = vmcb->save.es;
533         hsave->save.cs     = vmcb->save.cs;
534         hsave->save.ss     = vmcb->save.ss;
535         hsave->save.ds     = vmcb->save.ds;
536         hsave->save.gdtr   = vmcb->save.gdtr;
537         hsave->save.idtr   = vmcb->save.idtr;
538         hsave->save.efer   = svm->vcpu.arch.efer;
539         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
540         hsave->save.cr4    = svm->vcpu.arch.cr4;
541         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
542         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
543         hsave->save.rsp    = vmcb->save.rsp;
544         hsave->save.rax    = vmcb->save.rax;
545         if (npt_enabled)
546                 hsave->save.cr3    = vmcb->save.cr3;
547         else
548                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
549
550         copy_vmcb_control_area(&hsave->control, &vmcb->control);
551
552         svm->nested.nested_run_pending = 1;
553
554         if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
555                 goto out_exit_err;
556
557         if (nested_svm_vmrun_msrpm(svm))
558                 goto out;
559
560 out_exit_err:
561         svm->nested.nested_run_pending = 0;
562
563         svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
564         svm->vmcb->control.exit_code_hi = 0;
565         svm->vmcb->control.exit_info_1  = 0;
566         svm->vmcb->control.exit_info_2  = 0;
567
568         nested_svm_vmexit(svm);
569
570 out:
571         kvm_vcpu_unmap(&svm->vcpu, &map, true);
572
573         return ret;
574 }
575
576 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
577 {
578         to_vmcb->save.fs = from_vmcb->save.fs;
579         to_vmcb->save.gs = from_vmcb->save.gs;
580         to_vmcb->save.tr = from_vmcb->save.tr;
581         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
582         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
583         to_vmcb->save.star = from_vmcb->save.star;
584         to_vmcb->save.lstar = from_vmcb->save.lstar;
585         to_vmcb->save.cstar = from_vmcb->save.cstar;
586         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
587         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
588         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
589         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
590 }
591
592 int nested_svm_vmexit(struct vcpu_svm *svm)
593 {
594         int rc;
595         struct vmcb *vmcb12;
596         struct vmcb *hsave = svm->nested.hsave;
597         struct vmcb *vmcb = svm->vmcb;
598         struct kvm_host_map map;
599
600         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
601         if (rc) {
602                 if (rc == -EINVAL)
603                         kvm_inject_gp(&svm->vcpu, 0);
604                 return 1;
605         }
606
607         vmcb12 = map.hva;
608
609         /* Exit Guest-Mode */
610         leave_guest_mode(&svm->vcpu);
611         svm->nested.vmcb12_gpa = 0;
612         WARN_ON_ONCE(svm->nested.nested_run_pending);
613
614         /* in case we halted in L2 */
615         svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
616
617         /* Give the current vmcb to the guest */
618
619         vmcb12->save.es     = vmcb->save.es;
620         vmcb12->save.cs     = vmcb->save.cs;
621         vmcb12->save.ss     = vmcb->save.ss;
622         vmcb12->save.ds     = vmcb->save.ds;
623         vmcb12->save.gdtr   = vmcb->save.gdtr;
624         vmcb12->save.idtr   = vmcb->save.idtr;
625         vmcb12->save.efer   = svm->vcpu.arch.efer;
626         vmcb12->save.cr0    = kvm_read_cr0(&svm->vcpu);
627         vmcb12->save.cr3    = kvm_read_cr3(&svm->vcpu);
628         vmcb12->save.cr2    = vmcb->save.cr2;
629         vmcb12->save.cr4    = svm->vcpu.arch.cr4;
630         vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
631         vmcb12->save.rip    = kvm_rip_read(&svm->vcpu);
632         vmcb12->save.rsp    = kvm_rsp_read(&svm->vcpu);
633         vmcb12->save.rax    = kvm_rax_read(&svm->vcpu);
634         vmcb12->save.dr7    = vmcb->save.dr7;
635         vmcb12->save.dr6    = svm->vcpu.arch.dr6;
636         vmcb12->save.cpl    = vmcb->save.cpl;
637
638         vmcb12->control.int_state         = vmcb->control.int_state;
639         vmcb12->control.exit_code         = vmcb->control.exit_code;
640         vmcb12->control.exit_code_hi      = vmcb->control.exit_code_hi;
641         vmcb12->control.exit_info_1       = vmcb->control.exit_info_1;
642         vmcb12->control.exit_info_2       = vmcb->control.exit_info_2;
643
644         if (vmcb12->control.exit_code != SVM_EXIT_ERR)
645                 nested_vmcb_save_pending_event(svm, vmcb12);
646
647         if (svm->nrips_enabled)
648                 vmcb12->control.next_rip  = vmcb->control.next_rip;
649
650         vmcb12->control.int_ctl           = svm->nested.ctl.int_ctl;
651         vmcb12->control.tlb_ctl           = svm->nested.ctl.tlb_ctl;
652         vmcb12->control.event_inj         = svm->nested.ctl.event_inj;
653         vmcb12->control.event_inj_err     = svm->nested.ctl.event_inj_err;
654
655         vmcb12->control.pause_filter_count =
656                 svm->vmcb->control.pause_filter_count;
657         vmcb12->control.pause_filter_thresh =
658                 svm->vmcb->control.pause_filter_thresh;
659
660         /* Restore the original control entries */
661         copy_vmcb_control_area(&vmcb->control, &hsave->control);
662
663         /* On vmexit the  GIF is set to false */
664         svm_set_gif(svm, false);
665
666         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
667                 svm->vcpu.arch.l1_tsc_offset;
668
669         svm->nested.ctl.nested_cr3 = 0;
670
671         /* Restore selected save entries */
672         svm->vmcb->save.es = hsave->save.es;
673         svm->vmcb->save.cs = hsave->save.cs;
674         svm->vmcb->save.ss = hsave->save.ss;
675         svm->vmcb->save.ds = hsave->save.ds;
676         svm->vmcb->save.gdtr = hsave->save.gdtr;
677         svm->vmcb->save.idtr = hsave->save.idtr;
678         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
679         svm_set_efer(&svm->vcpu, hsave->save.efer);
680         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
681         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
682         kvm_rax_write(&svm->vcpu, hsave->save.rax);
683         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
684         kvm_rip_write(&svm->vcpu, hsave->save.rip);
685         svm->vmcb->save.dr7 = 0;
686         svm->vmcb->save.cpl = 0;
687         svm->vmcb->control.exit_int_info = 0;
688
689         vmcb_mark_all_dirty(svm->vmcb);
690
691         trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
692                                        vmcb12->control.exit_info_1,
693                                        vmcb12->control.exit_info_2,
694                                        vmcb12->control.exit_int_info,
695                                        vmcb12->control.exit_int_info_err,
696                                        KVM_ISA_SVM);
697
698         kvm_vcpu_unmap(&svm->vcpu, &map, true);
699
700         nested_svm_uninit_mmu_context(&svm->vcpu);
701
702         rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
703         if (rc)
704                 return 1;
705
706         if (npt_enabled)
707                 svm->vmcb->save.cr3 = hsave->save.cr3;
708
709         /*
710          * Drop what we picked up for L2 via svm_complete_interrupts() so it
711          * doesn't end up in L1.
712          */
713         svm->vcpu.arch.nmi_injected = false;
714         kvm_clear_exception_queue(&svm->vcpu);
715         kvm_clear_interrupt_queue(&svm->vcpu);
716
717         return 0;
718 }
719
720 int svm_allocate_nested(struct vcpu_svm *svm)
721 {
722         struct page *hsave_page;
723
724         if (svm->nested.initialized)
725                 return 0;
726
727         hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
728         if (!hsave_page)
729                 return -ENOMEM;
730         svm->nested.hsave = page_address(hsave_page);
731
732         svm->nested.msrpm = svm_vcpu_alloc_msrpm();
733         if (!svm->nested.msrpm)
734                 goto err_free_hsave;
735         svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
736
737         svm->nested.initialized = true;
738         return 0;
739
740 err_free_hsave:
741         __free_page(hsave_page);
742         return -ENOMEM;
743 }
744
745 void svm_free_nested(struct vcpu_svm *svm)
746 {
747         if (!svm->nested.initialized)
748                 return;
749
750         svm_vcpu_free_msrpm(svm->nested.msrpm);
751         svm->nested.msrpm = NULL;
752
753         __free_page(virt_to_page(svm->nested.hsave));
754         svm->nested.hsave = NULL;
755
756         svm->nested.initialized = false;
757 }
758
759 /*
760  * Forcibly leave nested mode in order to be able to reset the VCPU later on.
761  */
762 void svm_leave_nested(struct vcpu_svm *svm)
763 {
764         if (is_guest_mode(&svm->vcpu)) {
765                 struct vmcb *hsave = svm->nested.hsave;
766                 struct vmcb *vmcb = svm->vmcb;
767
768                 svm->nested.nested_run_pending = 0;
769                 leave_guest_mode(&svm->vcpu);
770                 copy_vmcb_control_area(&vmcb->control, &hsave->control);
771                 nested_svm_uninit_mmu_context(&svm->vcpu);
772         }
773
774         kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
775 }
776
777 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
778 {
779         u32 offset, msr, value;
780         int write, mask;
781
782         if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
783                 return NESTED_EXIT_HOST;
784
785         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
786         offset = svm_msrpm_offset(msr);
787         write  = svm->vmcb->control.exit_info_1 & 1;
788         mask   = 1 << ((2 * (msr & 0xf)) + write);
789
790         if (offset == MSR_INVALID)
791                 return NESTED_EXIT_DONE;
792
793         /* Offset is in 32 bit units but need in 8 bit units */
794         offset *= 4;
795
796         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
797                 return NESTED_EXIT_DONE;
798
799         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
800 }
801
802 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
803 {
804         unsigned port, size, iopm_len;
805         u16 val, mask;
806         u8 start_bit;
807         u64 gpa;
808
809         if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
810                 return NESTED_EXIT_HOST;
811
812         port = svm->vmcb->control.exit_info_1 >> 16;
813         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
814                 SVM_IOIO_SIZE_SHIFT;
815         gpa  = svm->nested.ctl.iopm_base_pa + (port / 8);
816         start_bit = port % 8;
817         iopm_len = (start_bit + size > 8) ? 2 : 1;
818         mask = (0xf >> (4 - size)) << start_bit;
819         val = 0;
820
821         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
822                 return NESTED_EXIT_DONE;
823
824         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
825 }
826
827 static int nested_svm_intercept(struct vcpu_svm *svm)
828 {
829         u32 exit_code = svm->vmcb->control.exit_code;
830         int vmexit = NESTED_EXIT_HOST;
831
832         switch (exit_code) {
833         case SVM_EXIT_MSR:
834                 vmexit = nested_svm_exit_handled_msr(svm);
835                 break;
836         case SVM_EXIT_IOIO:
837                 vmexit = nested_svm_intercept_ioio(svm);
838                 break;
839         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
840                 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
841                         vmexit = NESTED_EXIT_DONE;
842                 break;
843         }
844         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
845                 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
846                         vmexit = NESTED_EXIT_DONE;
847                 break;
848         }
849         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
850                 /*
851                  * Host-intercepted exceptions have been checked already in
852                  * nested_svm_exit_special.  There is nothing to do here,
853                  * the vmexit is injected by svm_check_nested_events.
854                  */
855                 vmexit = NESTED_EXIT_DONE;
856                 break;
857         }
858         case SVM_EXIT_ERR: {
859                 vmexit = NESTED_EXIT_DONE;
860                 break;
861         }
862         default: {
863                 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
864                         vmexit = NESTED_EXIT_DONE;
865         }
866         }
867
868         return vmexit;
869 }
870
871 int nested_svm_exit_handled(struct vcpu_svm *svm)
872 {
873         int vmexit;
874
875         vmexit = nested_svm_intercept(svm);
876
877         if (vmexit == NESTED_EXIT_DONE)
878                 nested_svm_vmexit(svm);
879
880         return vmexit;
881 }
882
883 int nested_svm_check_permissions(struct vcpu_svm *svm)
884 {
885         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
886             !is_paging(&svm->vcpu)) {
887                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
888                 return 1;
889         }
890
891         if (svm->vmcb->save.cpl) {
892                 kvm_inject_gp(&svm->vcpu, 0);
893                 return 1;
894         }
895
896         return 0;
897 }
898
899 static bool nested_exit_on_exception(struct vcpu_svm *svm)
900 {
901         unsigned int nr = svm->vcpu.arch.exception.nr;
902
903         return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
904 }
905
906 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
907 {
908         unsigned int nr = svm->vcpu.arch.exception.nr;
909
910         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
911         svm->vmcb->control.exit_code_hi = 0;
912
913         if (svm->vcpu.arch.exception.has_error_code)
914                 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
915
916         /*
917          * EXITINFO2 is undefined for all exception intercepts other
918          * than #PF.
919          */
920         if (nr == PF_VECTOR) {
921                 if (svm->vcpu.arch.exception.nested_apf)
922                         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
923                 else if (svm->vcpu.arch.exception.has_payload)
924                         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
925                 else
926                         svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
927         } else if (nr == DB_VECTOR) {
928                 /* See inject_pending_event.  */
929                 kvm_deliver_exception_payload(&svm->vcpu);
930                 if (svm->vcpu.arch.dr7 & DR7_GD) {
931                         svm->vcpu.arch.dr7 &= ~DR7_GD;
932                         kvm_update_dr7(&svm->vcpu);
933                 }
934         } else
935                 WARN_ON(svm->vcpu.arch.exception.has_payload);
936
937         nested_svm_vmexit(svm);
938 }
939
940 static void nested_svm_smi(struct vcpu_svm *svm)
941 {
942         svm->vmcb->control.exit_code = SVM_EXIT_SMI;
943         svm->vmcb->control.exit_info_1 = 0;
944         svm->vmcb->control.exit_info_2 = 0;
945
946         nested_svm_vmexit(svm);
947 }
948
949 static void nested_svm_nmi(struct vcpu_svm *svm)
950 {
951         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
952         svm->vmcb->control.exit_info_1 = 0;
953         svm->vmcb->control.exit_info_2 = 0;
954
955         nested_svm_vmexit(svm);
956 }
957
958 static void nested_svm_intr(struct vcpu_svm *svm)
959 {
960         trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
961
962         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
963         svm->vmcb->control.exit_info_1 = 0;
964         svm->vmcb->control.exit_info_2 = 0;
965
966         nested_svm_vmexit(svm);
967 }
968
969 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
970 {
971         return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
972 }
973
974 static void nested_svm_init(struct vcpu_svm *svm)
975 {
976         svm->vmcb->control.exit_code   = SVM_EXIT_INIT;
977         svm->vmcb->control.exit_info_1 = 0;
978         svm->vmcb->control.exit_info_2 = 0;
979
980         nested_svm_vmexit(svm);
981 }
982
983
984 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
985 {
986         struct vcpu_svm *svm = to_svm(vcpu);
987         bool block_nested_events =
988                 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
989         struct kvm_lapic *apic = vcpu->arch.apic;
990
991         if (lapic_in_kernel(vcpu) &&
992             test_bit(KVM_APIC_INIT, &apic->pending_events)) {
993                 if (block_nested_events)
994                         return -EBUSY;
995                 if (!nested_exit_on_init(svm))
996                         return 0;
997                 nested_svm_init(svm);
998                 return 0;
999         }
1000
1001         if (vcpu->arch.exception.pending) {
1002                 if (block_nested_events)
1003                         return -EBUSY;
1004                 if (!nested_exit_on_exception(svm))
1005                         return 0;
1006                 nested_svm_inject_exception_vmexit(svm);
1007                 return 0;
1008         }
1009
1010         if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1011                 if (block_nested_events)
1012                         return -EBUSY;
1013                 if (!nested_exit_on_smi(svm))
1014                         return 0;
1015                 nested_svm_smi(svm);
1016                 return 0;
1017         }
1018
1019         if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1020                 if (block_nested_events)
1021                         return -EBUSY;
1022                 if (!nested_exit_on_nmi(svm))
1023                         return 0;
1024                 nested_svm_nmi(svm);
1025                 return 0;
1026         }
1027
1028         if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1029                 if (block_nested_events)
1030                         return -EBUSY;
1031                 if (!nested_exit_on_intr(svm))
1032                         return 0;
1033                 nested_svm_intr(svm);
1034                 return 0;
1035         }
1036
1037         return 0;
1038 }
1039
1040 int nested_svm_exit_special(struct vcpu_svm *svm)
1041 {
1042         u32 exit_code = svm->vmcb->control.exit_code;
1043
1044         switch (exit_code) {
1045         case SVM_EXIT_INTR:
1046         case SVM_EXIT_NMI:
1047         case SVM_EXIT_NPF:
1048                 return NESTED_EXIT_HOST;
1049         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1050                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1051
1052                 if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1053                                 excp_bits)
1054                         return NESTED_EXIT_HOST;
1055                 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1056                          svm->vcpu.arch.apf.host_apf_flags)
1057                         /* Trap async PF even if not shadowing */
1058                         return NESTED_EXIT_HOST;
1059                 break;
1060         }
1061         default:
1062                 break;
1063         }
1064
1065         return NESTED_EXIT_CONTINUE;
1066 }
1067
1068 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1069                                 struct kvm_nested_state __user *user_kvm_nested_state,
1070                                 u32 user_data_size)
1071 {
1072         struct vcpu_svm *svm;
1073         struct kvm_nested_state kvm_state = {
1074                 .flags = 0,
1075                 .format = KVM_STATE_NESTED_FORMAT_SVM,
1076                 .size = sizeof(kvm_state),
1077         };
1078         struct vmcb __user *user_vmcb = (struct vmcb __user *)
1079                 &user_kvm_nested_state->data.svm[0];
1080
1081         if (!vcpu)
1082                 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1083
1084         svm = to_svm(vcpu);
1085
1086         if (user_data_size < kvm_state.size)
1087                 goto out;
1088
1089         /* First fill in the header and copy it out.  */
1090         if (is_guest_mode(vcpu)) {
1091                 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1092                 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1093                 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1094
1095                 if (svm->nested.nested_run_pending)
1096                         kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1097         }
1098
1099         if (gif_set(svm))
1100                 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1101
1102         if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1103                 return -EFAULT;
1104
1105         if (!is_guest_mode(vcpu))
1106                 goto out;
1107
1108         /*
1109          * Copy over the full size of the VMCB rather than just the size
1110          * of the structs.
1111          */
1112         if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1113                 return -EFAULT;
1114         if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1115                          sizeof(user_vmcb->control)))
1116                 return -EFAULT;
1117         if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1118                          sizeof(user_vmcb->save)))
1119                 return -EFAULT;
1120
1121 out:
1122         return kvm_state.size;
1123 }
1124
1125 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1126                                 struct kvm_nested_state __user *user_kvm_nested_state,
1127                                 struct kvm_nested_state *kvm_state)
1128 {
1129         struct vcpu_svm *svm = to_svm(vcpu);
1130         struct vmcb *hsave = svm->nested.hsave;
1131         struct vmcb __user *user_vmcb = (struct vmcb __user *)
1132                 &user_kvm_nested_state->data.svm[0];
1133         struct vmcb_control_area *ctl;
1134         struct vmcb_save_area *save;
1135         int ret;
1136         u32 cr0;
1137
1138         BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1139                      KVM_STATE_NESTED_SVM_VMCB_SIZE);
1140
1141         if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1142                 return -EINVAL;
1143
1144         if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1145                                  KVM_STATE_NESTED_RUN_PENDING |
1146                                  KVM_STATE_NESTED_GIF_SET))
1147                 return -EINVAL;
1148
1149         /*
1150          * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1151          * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1152          */
1153         if (!(vcpu->arch.efer & EFER_SVME)) {
1154                 /* GIF=1 and no guest mode are required if SVME=0.  */
1155                 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1156                         return -EINVAL;
1157         }
1158
1159         /* SMM temporarily disables SVM, so we cannot be in guest mode.  */
1160         if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1161                 return -EINVAL;
1162
1163         if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1164                 svm_leave_nested(svm);
1165                 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1166                 return 0;
1167         }
1168
1169         if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1170                 return -EINVAL;
1171         if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1172                 return -EINVAL;
1173
1174         ret  = -ENOMEM;
1175         ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL);
1176         save = kzalloc(sizeof(*save), GFP_KERNEL);
1177         if (!ctl || !save)
1178                 goto out_free;
1179
1180         ret = -EFAULT;
1181         if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1182                 goto out_free;
1183         if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1184                 goto out_free;
1185
1186         ret = -EINVAL;
1187         if (!nested_vmcb_check_controls(ctl))
1188                 goto out_free;
1189
1190         /*
1191          * Processor state contains L2 state.  Check that it is
1192          * valid for guest mode (see nested_vmcb_checks).
1193          */
1194         cr0 = kvm_read_cr0(vcpu);
1195         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1196                 goto out_free;
1197
1198         /*
1199          * Validate host state saved from before VMRUN (see
1200          * nested_svm_check_permissions).
1201          * TODO: validate reserved bits for all saved state.
1202          */
1203         if (!(save->cr0 & X86_CR0_PG))
1204                 goto out_free;
1205         if (!(save->efer & EFER_SVME))
1206                 goto out_free;
1207
1208         /*
1209          * All checks done, we can enter guest mode.  L1 control fields
1210          * come from the nested save state.  Guest state is already
1211          * in the registers, the save area of the nested state instead
1212          * contains saved L1 state.
1213          */
1214         copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1215         hsave->save = *save;
1216
1217         svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1218         load_nested_vmcb_control(svm, ctl);
1219         nested_prepare_vmcb_control(svm);
1220
1221         kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1222         ret = 0;
1223 out_free:
1224         kfree(save);
1225         kfree(ctl);
1226
1227         return ret;
1228 }
1229
1230 struct kvm_x86_nested_ops svm_nested_ops = {
1231         .check_events = svm_check_nested_events,
1232         .get_nested_state_pages = svm_get_nested_state_pages,
1233         .get_state = svm_get_nested_state,
1234         .set_state = svm_set_nested_state,
1235 };