KVM: VMX: Stub out enable_evmcs static key for CONFIG_HYPERV=n
authorSean Christopherson <seanjc@google.com>
Sat, 11 Feb 2023 00:35:33 +0000 (00:35 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 14 Mar 2023 14:28:57 +0000 (10:28 -0400)
Wrap enable_evmcs in a helper and stub it out when CONFIG_HYPERV=n in
order to eliminate the static branch nop placeholders.  clang-14 is clever
enough to elide the nop, but gcc-12 is not.  Stubbing out the key reduces
the size of kvm-intel.ko by ~7.5% (200KiB) when compiled with gcc-12
(there are a _lot_ of VMCS accesses throughout KVM).

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20230211003534.564198-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/hyperv.c
arch/x86/kvm/vmx/hyperv.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx_ops.h

index b6748055c586e292c618ad29d6e5a656a4398200..274fbd38c64e2828ed9aecb1422883a7c9f7d3b3 100644 (file)
 
 #define EVMCS1_SUPPORTED_VMFUNC (0)
 
-DEFINE_STATIC_KEY_FALSE(enable_evmcs);
-
 #define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
 #define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
                {EVMCS1_OFFSET(name), clean_field}
@@ -611,6 +609,8 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
 }
 
 #if IS_ENABLED(CONFIG_HYPERV)
+DEFINE_STATIC_KEY_FALSE(enable_evmcs);
+
 /*
  * KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
  * is: in case a feature has corresponding fields in eVMCS described and it was
index 1299143d00df7a074e5e16a64c1622e13f1bc8c1..a54a2fdf0a5b7c2d08a62e19ca83e7104d48d637 100644 (file)
@@ -16,8 +16,6 @@
 
 struct vmcs_config;
 
-DECLARE_STATIC_KEY_FALSE(enable_evmcs);
-
 #define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
 
 #define KVM_EVMCS_VERSION 1
@@ -69,6 +67,13 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
 
 #if IS_ENABLED(CONFIG_HYPERV)
 
+DECLARE_STATIC_KEY_FALSE(enable_evmcs);
+
+static __always_inline bool kvm_is_using_evmcs(void)
+{
+       return static_branch_unlikely(&enable_evmcs);
+}
+
 static __always_inline int get_evmcs_offset(unsigned long field,
                                            u16 *clean_field)
 {
@@ -158,6 +163,7 @@ static inline void evmcs_load(u64 phys_addr)
 
 void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
 #else /* !IS_ENABLED(CONFIG_HYPERV) */
+static __always_inline bool kvm_is_using_evmcs(void) { return false; }
 static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
 static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
 static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
index d2d6e1b6c7882779c657adc062c83ae049445bc9..09e07fb83c37f0c9e5c1e0ed360f36bdb0fa536e 100644 (file)
@@ -595,7 +595,7 @@ static void hv_reset_evmcs(void)
 {
        struct hv_vp_assist_page *vp_ap;
 
-       if (!static_branch_unlikely(&enable_evmcs))
+       if (!kvm_is_using_evmcs())
                return;
 
        /*
@@ -2816,8 +2816,7 @@ static int vmx_hardware_enable(void)
         * This can happen if we hot-added a CPU but failed to allocate
         * VP assist page for it.
         */
-       if (static_branch_unlikely(&enable_evmcs) &&
-           !hv_get_vp_assist_page(cpu))
+       if (kvm_is_using_evmcs() && !hv_get_vp_assist_page(cpu))
                return -EFAULT;
 
        intel_pt_handle_vmx(1);
@@ -2869,7 +2868,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
        memset(vmcs, 0, vmcs_config.size);
 
        /* KVM supports Enlightened VMCS v1 only */
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
        else
                vmcs->hdr.revision_id = vmcs_config.revision_id;
@@ -2964,7 +2963,7 @@ static __init int alloc_kvm_area(void)
                 * still be marked with revision_id reported by
                 * physical CPU.
                 */
-               if (static_branch_unlikely(&enable_evmcs))
+               if (kvm_is_using_evmcs())
                        vmcs->hdr.revision_id = vmcs_config.revision_id;
 
                per_cpu(vmxarea, cpu) = vmcs;
@@ -3931,7 +3930,7 @@ static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
         * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
         * bitmap has changed.
         */
-       if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs)) {
+       if (kvm_is_using_evmcs()) {
                struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
 
                if (evmcs->hv_enlightenments_control.msr_bitmap)
@@ -7310,7 +7309,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
 
        /* All fields are clean at this point */
-       if (static_branch_unlikely(&enable_evmcs)) {
+       if (kvm_is_using_evmcs()) {
                current_evmcs->hv_clean_fields |=
                        HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
 
@@ -7440,7 +7439,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
         * feature only for vmcs01, KVM currently isn't equipped to realize any
         * performance benefits from enabling it for vmcs02.
         */
-       if (IS_ENABLED(CONFIG_HYPERV) && static_branch_unlikely(&enable_evmcs) &&
+       if (kvm_is_using_evmcs() &&
            (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
                struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
 
index db95bde52998fcf1d5073876289e535f1eef7f54..ce47dc265f89a298b0fd3f827eeec65c3c54b10f 100644 (file)
@@ -147,7 +147,7 @@ do_exception:
 static __always_inline u16 vmcs_read16(unsigned long field)
 {
        vmcs_check16(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_read16(field);
        return __vmcs_readl(field);
 }
@@ -155,7 +155,7 @@ static __always_inline u16 vmcs_read16(unsigned long field)
 static __always_inline u32 vmcs_read32(unsigned long field)
 {
        vmcs_check32(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_read32(field);
        return __vmcs_readl(field);
 }
@@ -163,7 +163,7 @@ static __always_inline u32 vmcs_read32(unsigned long field)
 static __always_inline u64 vmcs_read64(unsigned long field)
 {
        vmcs_check64(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_read64(field);
 #ifdef CONFIG_X86_64
        return __vmcs_readl(field);
@@ -175,7 +175,7 @@ static __always_inline u64 vmcs_read64(unsigned long field)
 static __always_inline unsigned long vmcs_readl(unsigned long field)
 {
        vmcs_checkl(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_read64(field);
        return __vmcs_readl(field);
 }
@@ -222,7 +222,7 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
 static __always_inline void vmcs_write16(unsigned long field, u16 value)
 {
        vmcs_check16(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_write16(field, value);
 
        __vmcs_writel(field, value);
@@ -231,7 +231,7 @@ static __always_inline void vmcs_write16(unsigned long field, u16 value)
 static __always_inline void vmcs_write32(unsigned long field, u32 value)
 {
        vmcs_check32(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_write32(field, value);
 
        __vmcs_writel(field, value);
@@ -240,7 +240,7 @@ static __always_inline void vmcs_write32(unsigned long field, u32 value)
 static __always_inline void vmcs_write64(unsigned long field, u64 value)
 {
        vmcs_check64(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_write64(field, value);
 
        __vmcs_writel(field, value);
@@ -252,7 +252,7 @@ static __always_inline void vmcs_write64(unsigned long field, u64 value)
 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
 {
        vmcs_checkl(field);
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_write64(field, value);
 
        __vmcs_writel(field, value);
@@ -262,7 +262,7 @@ static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
 {
        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
                         "vmcs_clear_bits does not support 64-bit fields");
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_write32(field, evmcs_read32(field) & ~mask);
 
        __vmcs_writel(field, __vmcs_readl(field) & ~mask);
@@ -272,7 +272,7 @@ static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
 {
        BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
                         "vmcs_set_bits does not support 64-bit fields");
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_write32(field, evmcs_read32(field) | mask);
 
        __vmcs_writel(field, __vmcs_readl(field) | mask);
@@ -289,7 +289,7 @@ static inline void vmcs_load(struct vmcs *vmcs)
 {
        u64 phys_addr = __pa(vmcs);
 
-       if (static_branch_unlikely(&enable_evmcs))
+       if (kvm_is_using_evmcs())
                return evmcs_load(phys_addr);
 
        vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);