1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bits.h>
23 #include <asm/sev-common.h>
25 #include "kvm_cache_regs.h"
27 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29 #define IOPM_SIZE PAGE_SIZE * 3
30 #define MSRPM_SIZE PAGE_SIZE * 2
32 #define MAX_DIRECT_ACCESS_MSRS 46
33 #define MSRPM_OFFSETS 32
34 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
35 extern bool npt_enabled;
37 extern bool intercept_smi;
38 extern bool x2avic_enabled;
43 * VMCB_ALL_CLEAN_MASK might also need to
44 * be updated if this enum is modified.
47 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
49 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
51 VMCB_INTR, /* int_ctl, int_vector */
52 VMCB_NPT, /* npt_en, nCR3, gPAT */
53 VMCB_CR, /* CR0, CR3, CR4, EFER */
54 VMCB_DR, /* DR6, DR7 */
55 VMCB_DT, /* GDT, IDT */
56 VMCB_SEG, /* CS, DS, SS, ES, CPL */
57 VMCB_CR2, /* CR2 only */
58 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
59 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
60 * AVIC PHYSICAL_TABLE pointer,
61 * AVIC LOGICAL_TABLE pointer
63 VMCB_SW = 31, /* Reserved for hypervisor/software use */
66 #define VMCB_ALL_CLEAN_MASK ( \
67 (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
68 (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
69 (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
70 (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
71 (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
74 /* TPR and CR2 are always written before VMRUN */
75 #define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
78 bool active; /* SEV enabled guest */
79 bool es_active; /* SEV-ES enabled guest */
80 unsigned int asid; /* ASID used for this guest */
81 unsigned int handle; /* SEV firmware handle */
82 int fd; /* SEV device fd */
83 unsigned long pages_locked; /* Number of pages locked */
84 struct list_head regions_list; /* List of registered regions */
85 u64 ap_jump_table; /* SEV-ES AP Jump Table address */
86 struct kvm *enc_context_owner; /* Owner of copied encryption context */
87 struct list_head mirror_vms; /* List of VMs mirroring */
88 struct list_head mirror_entry; /* Use as a list entry of mirrors */
89 struct misc_cg *misc_cg; /* For misc cgroup accounting */
90 atomic_t migration_in_progress;
96 /* Struct members for AVIC */
98 struct page *avic_logical_id_table_page;
99 struct page *avic_physical_id_table_page;
100 struct hlist_node hnode;
102 struct kvm_sev_info sev_info;
107 struct kvm_vmcb_info {
111 uint64_t asid_generation;
114 struct vmcb_save_area_cached {
123 struct vmcb_ctrl_area_cached {
124 u32 intercepts[MAX_INTERCEPT];
125 u16 pause_filter_thresh;
126 u16 pause_filter_count;
140 u32 exit_int_info_err;
149 struct hv_vmcb_enlightenments hv_enlightenments;
154 struct svm_nested_state {
155 struct kvm_vmcb_info vmcb02;
161 /* These are the merged vectors */
164 /* A VMRUN has started but has not yet been performed, so
165 * we cannot inject a nested vmexit yet. */
166 bool nested_run_pending;
168 /* cache for control fields of the guest */
169 struct vmcb_ctrl_area_cached ctl;
172 * Note: this struct is not kept up-to-date while L2 runs; it is only
173 * valid within nested_svm_vmrun.
175 struct vmcb_save_area_cached save;
180 * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
181 * changes in MSR bitmap for L1 or switching to a different L2. Note,
182 * this flag can only be used reliably in conjunction with a paravirt L1
183 * which informs L0 whether any changes to MSR bitmap for L2 were done
186 bool force_msr_bitmap_recalc;
189 struct vcpu_sev_es_state {
191 struct sev_es_save_area *vmsa;
194 struct kvm_host_map ghcb_map;
195 bool received_first_sipi;
197 /* SEV-ES scratch area support */
206 struct kvm_vcpu vcpu;
207 /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
209 struct kvm_vmcb_info vmcb01;
210 struct kvm_vmcb_info *current_vmcb;
224 * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
225 * translated into the appropriate L2_CFG bits on the host to
226 * perform speculative control.
234 struct svm_nested_state nested;
236 /* NMI mask value, used when vNMI is not enabled */
240 * True when NMIs are still masked but guest IRET was just intercepted
241 * and KVM is waiting for RIP to change, which will signal that the
242 * intercepted IRET was retired and thus NMI can be unmasked.
244 bool awaiting_iret_completion;
247 * Set when KVM is awaiting IRET completion and needs to inject NMIs as
248 * soon as the IRET completes (e.g. NMI is pending injection). KVM
249 * temporarily steals RFLAGS.TF to single-step the guest in this case
250 * in order to regain control as soon as the NMI-blocking condition
254 u64 nmi_singlestep_guest_rflags;
258 unsigned long soft_int_csbase;
259 unsigned long soft_int_old_rip;
260 unsigned long soft_int_next_rip;
261 bool soft_int_injected;
263 /* optional nested SVM features that are enabled for this guest */
264 bool nrips_enabled : 1;
265 bool tsc_scaling_enabled : 1;
266 bool v_vmload_vmsave_enabled : 1;
267 bool lbrv_enabled : 1;
268 bool pause_filter_enabled : 1;
269 bool pause_threshold_enabled : 1;
270 bool vgif_enabled : 1;
271 bool vnmi_enabled : 1;
275 struct page *avic_backing_page;
276 u64 *avic_physical_id_cache;
279 * Per-vcpu list of struct amd_svm_iommu_ir:
280 * This is used mainly to store interrupt remapping information used
281 * when update the vcpu affinity. This avoids the need to scan for
282 * IRTE and try to match ga_tag in the IOMMU driver.
284 struct list_head ir_list;
285 spinlock_t ir_list_lock;
287 /* Save desired MSR intercept (read: pass-through) state */
289 DECLARE_BITMAP(read, MAX_DIRECT_ACCESS_MSRS);
290 DECLARE_BITMAP(write, MAX_DIRECT_ACCESS_MSRS);
291 } shadow_msr_intercept;
293 struct vcpu_sev_es_state sev_es;
295 bool guest_state_loaded;
297 bool x2avic_msrs_intercepted;
299 /* Guest GIF value, used when vGIF is not enabled */
303 struct svm_cpu_data {
309 struct page *save_area;
310 unsigned long save_area_pa;
312 struct vmcb *current_vmcb;
314 /* index = sev_asid, value = vmcb pointer */
315 struct vmcb **sev_vmcbs;
318 DECLARE_PER_CPU(struct svm_cpu_data, svm_data);
320 void recalc_intercepts(struct vcpu_svm *svm);
322 static __always_inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
324 return container_of(kvm, struct kvm_svm, kvm);
327 static __always_inline bool sev_guest(struct kvm *kvm)
329 #ifdef CONFIG_KVM_AMD_SEV
330 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
338 static __always_inline bool sev_es_guest(struct kvm *kvm)
340 #ifdef CONFIG_KVM_AMD_SEV
341 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
343 return sev->es_active && !WARN_ON_ONCE(!sev->active);
349 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
351 vmcb->control.clean = 0;
354 static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
356 vmcb->control.clean = VMCB_ALL_CLEAN_MASK
357 & ~VMCB_ALWAYS_DIRTY_MASK;
360 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
362 vmcb->control.clean &= ~(1 << bit);
365 static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
367 return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
370 static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
372 return container_of(vcpu, struct vcpu_svm, vcpu);
376 * Only the PDPTRs are loaded on demand into the shadow MMU. All other
377 * fields are synchronized on VM-Exit, because accessing the VMCB is cheap.
379 * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
380 * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
381 * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
383 #define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
385 static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
387 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
388 __set_bit(bit, (unsigned long *)&control->intercepts);
391 static inline void vmcb_clr_intercept(struct vmcb_control_area *control, u32 bit)
393 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
394 __clear_bit(bit, (unsigned long *)&control->intercepts);
397 static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
399 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
400 return test_bit(bit, (unsigned long *)&control->intercepts);
403 static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
405 WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
406 return test_bit(bit, (unsigned long *)&control->intercepts);
409 static inline void set_dr_intercepts(struct vcpu_svm *svm)
411 struct vmcb *vmcb = svm->vmcb01.ptr;
413 if (!sev_es_guest(svm->vcpu.kvm)) {
414 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
415 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
416 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
417 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
418 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
419 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
420 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
421 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
422 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
423 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
424 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
425 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
426 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
427 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
430 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
431 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
433 recalc_intercepts(svm);
436 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
438 struct vmcb *vmcb = svm->vmcb01.ptr;
440 vmcb->control.intercepts[INTERCEPT_DR] = 0;
442 /* DR7 access must remain intercepted for an SEV-ES guest */
443 if (sev_es_guest(svm->vcpu.kvm)) {
444 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
445 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
448 recalc_intercepts(svm);
451 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
453 struct vmcb *vmcb = svm->vmcb01.ptr;
455 WARN_ON_ONCE(bit >= 32);
456 vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
458 recalc_intercepts(svm);
461 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
463 struct vmcb *vmcb = svm->vmcb01.ptr;
465 WARN_ON_ONCE(bit >= 32);
466 vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
468 recalc_intercepts(svm);
471 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
473 struct vmcb *vmcb = svm->vmcb01.ptr;
475 vmcb_set_intercept(&vmcb->control, bit);
477 recalc_intercepts(svm);
480 static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
482 struct vmcb *vmcb = svm->vmcb01.ptr;
484 vmcb_clr_intercept(&vmcb->control, bit);
486 recalc_intercepts(svm);
489 static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
491 return vmcb_is_intercept(&svm->vmcb->control, bit);
494 static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
496 return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
499 static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
504 if (is_guest_mode(&svm->vcpu) && !nested_vgif_enabled(svm))
505 return svm->nested.vmcb02.ptr;
507 return svm->vmcb01.ptr;
510 static inline void enable_gif(struct vcpu_svm *svm)
512 struct vmcb *vmcb = get_vgif_vmcb(svm);
515 vmcb->control.int_ctl |= V_GIF_MASK;
517 svm->guest_gif = true;
520 static inline void disable_gif(struct vcpu_svm *svm)
522 struct vmcb *vmcb = get_vgif_vmcb(svm);
525 vmcb->control.int_ctl &= ~V_GIF_MASK;
527 svm->guest_gif = false;
530 static inline bool gif_set(struct vcpu_svm *svm)
532 struct vmcb *vmcb = get_vgif_vmcb(svm);
535 return !!(vmcb->control.int_ctl & V_GIF_MASK);
537 return svm->guest_gif;
540 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
542 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
545 static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
547 return svm->vnmi_enabled &&
548 (svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
551 static inline bool is_x2apic_msrpm_offset(u32 offset)
553 /* 4 msrs per u8, and 4 u8 in u32 */
554 u32 msr = offset * 16;
556 return (msr >= APIC_BASE_MSR) &&
557 (msr < (APIC_BASE_MSR + 0x100));
560 static inline struct vmcb *get_vnmi_vmcb_l1(struct vcpu_svm *svm)
565 if (is_guest_mode(&svm->vcpu))
568 return svm->vmcb01.ptr;
571 static inline bool is_vnmi_enabled(struct vcpu_svm *svm)
573 struct vmcb *vmcb = get_vnmi_vmcb_l1(svm);
576 return !!(vmcb->control.int_ctl & V_NMI_ENABLE_MASK);
582 #define MSR_INVALID 0xffffffffU
584 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
586 extern bool dump_invalid_vmcb;
588 u32 svm_msrpm_offset(u32 msr);
589 u32 *svm_vcpu_alloc_msrpm(void);
590 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm);
591 void svm_vcpu_free_msrpm(u32 *msrpm);
592 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
593 void svm_update_lbrv(struct kvm_vcpu *vcpu);
595 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
596 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
597 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
598 void disable_nmi_singlestep(struct vcpu_svm *svm);
599 bool svm_smi_blocked(struct kvm_vcpu *vcpu);
600 bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
601 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
602 void svm_set_gif(struct vcpu_svm *svm, bool value);
603 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
604 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
605 int read, int write);
606 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
607 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
608 int trig_mode, int vec);
612 #define NESTED_EXIT_HOST 0 /* Exit handled on host level */
613 #define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
614 #define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
616 static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
618 struct vcpu_svm *svm = to_svm(vcpu);
620 return is_guest_mode(vcpu) && (svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK);
623 static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
625 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
628 static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
630 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
633 static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
635 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
638 int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
639 u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
640 void svm_leave_nested(struct kvm_vcpu *vcpu);
641 void svm_free_nested(struct vcpu_svm *svm);
642 int svm_allocate_nested(struct vcpu_svm *svm);
643 int nested_svm_vmrun(struct kvm_vcpu *vcpu);
644 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
645 struct vmcb_save_area *from_save);
646 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb);
647 int nested_svm_vmexit(struct vcpu_svm *svm);
649 static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
651 svm->vmcb->control.exit_code = exit_code;
652 svm->vmcb->control.exit_info_1 = 0;
653 svm->vmcb->control.exit_info_2 = 0;
654 return nested_svm_vmexit(svm);
657 int nested_svm_exit_handled(struct vcpu_svm *svm);
658 int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
659 int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
660 bool has_error_code, u32 error_code);
661 int nested_svm_exit_special(struct vcpu_svm *svm);
662 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
663 void __svm_write_tsc_multiplier(u64 multiplier);
664 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
665 struct vmcb_control_area *control);
666 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
667 struct vmcb_save_area *save);
668 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
669 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
670 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
672 extern struct kvm_x86_nested_ops svm_nested_ops;
675 #define AVIC_REQUIRED_APICV_INHIBITS \
677 BIT(APICV_INHIBIT_REASON_DISABLE) | \
678 BIT(APICV_INHIBIT_REASON_ABSENT) | \
679 BIT(APICV_INHIBIT_REASON_HYPERV) | \
680 BIT(APICV_INHIBIT_REASON_NESTED) | \
681 BIT(APICV_INHIBIT_REASON_IRQWIN) | \
682 BIT(APICV_INHIBIT_REASON_PIT_REINJ) | \
683 BIT(APICV_INHIBIT_REASON_BLOCKIRQ) | \
684 BIT(APICV_INHIBIT_REASON_SEV) | \
685 BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED) | \
686 BIT(APICV_INHIBIT_REASON_APIC_ID_MODIFIED) | \
687 BIT(APICV_INHIBIT_REASON_APIC_BASE_MODIFIED) | \
688 BIT(APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED) \
691 bool avic_hardware_setup(void);
692 int avic_ga_log_notifier(u32 ga_tag);
693 void avic_vm_destroy(struct kvm *kvm);
694 int avic_vm_init(struct kvm *kvm);
695 void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb);
696 int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
697 int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
698 int avic_init_vcpu(struct vcpu_svm *svm);
699 void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
700 void avic_vcpu_put(struct kvm_vcpu *vcpu);
701 void avic_apicv_post_state_restore(struct kvm_vcpu *vcpu);
702 void avic_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu);
703 int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
704 uint32_t guest_irq, bool set);
705 void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
706 void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
707 void avic_ring_doorbell(struct kvm_vcpu *vcpu);
708 unsigned long avic_vcpu_get_apicv_inhibit_reasons(struct kvm_vcpu *vcpu);
709 void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
714 #define GHCB_VERSION_MAX 1ULL
715 #define GHCB_VERSION_MIN 1ULL
718 extern unsigned int max_sev_asid;
720 void sev_vm_destroy(struct kvm *kvm);
721 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp);
722 int sev_mem_enc_register_region(struct kvm *kvm,
723 struct kvm_enc_region *range);
724 int sev_mem_enc_unregister_region(struct kvm *kvm,
725 struct kvm_enc_region *range);
726 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
727 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
728 void sev_guest_memory_reclaimed(struct kvm *kvm);
730 void pre_sev_run(struct vcpu_svm *svm, int cpu);
731 void __init sev_set_cpu_caps(void);
732 void __init sev_hardware_setup(void);
733 void sev_hardware_unsetup(void);
734 int sev_cpu_init(struct svm_cpu_data *sd);
735 void sev_init_vmcb(struct vcpu_svm *svm);
736 void sev_free_vcpu(struct kvm_vcpu *vcpu);
737 int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
738 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
739 void sev_es_vcpu_reset(struct vcpu_svm *svm);
740 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
741 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
742 void sev_es_unmap_ghcb(struct vcpu_svm *svm);
746 void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
747 void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
749 #define DEFINE_KVM_GHCB_ACCESSORS(field) \
750 static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
752 return test_bit(GHCB_BITMAP_IDX(field), \
753 (unsigned long *)&svm->sev_es.valid_bitmap); \
756 static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
758 return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
761 DEFINE_KVM_GHCB_ACCESSORS(cpl)
762 DEFINE_KVM_GHCB_ACCESSORS(rax)
763 DEFINE_KVM_GHCB_ACCESSORS(rcx)
764 DEFINE_KVM_GHCB_ACCESSORS(rdx)
765 DEFINE_KVM_GHCB_ACCESSORS(rbx)
766 DEFINE_KVM_GHCB_ACCESSORS(rsi)
767 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_code)
768 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
769 DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
770 DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
771 DEFINE_KVM_GHCB_ACCESSORS(xcr0)