KVM: VMX: Always inline to_vmx() and to_kvm_vmx()
authorSean Christopherson <seanjc@google.com>
Tue, 13 Dec 2022 06:09:09 +0000 (06:09 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 24 Jan 2023 18:36:40 +0000 (10:36 -0800)
Tag to_vmx() and to_kvm_vmx() __always_inline as they both just reflect
the passed in pointer (the embedded struct is the first field in the
container), and drop the @vmx param from vmx_vcpu_enter_exit(), which
likely existed purely to make noinstr validation happy.

Amusingly, when the compiler decides to not inline the helpers, e.g. for
KASAN builds, to_vmx() and to_kvm_vmx() may end up pointing at the same
symbol, which generates very confusing objtool warnings.  E.g. the use of
to_vmx() in a future patch led to objtool complaining about to_kvm_vmx(),
and only once all use of to_kvm_vmx() was commented out did to_vmx() pop
up in the obj tool report.

  vmlinux.o: warning: objtool: vmx_vcpu_enter_exit+0x160: call to to_kvm_vmx()
                               leaves .noinstr.text section

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20221213060912.654668-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 506c324d46b1ba1754b1aa17f30ffc37b94bf78e..59a051350f8f5f0fdf0ad1a7b1f6ee38a079aa99 100644 (file)
@@ -7171,9 +7171,10 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 }
 
 static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
-                                       struct vcpu_vmx *vmx,
                                        unsigned int flags)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
        guest_state_enter_irqoff();
 
        /* L1D Flush includes CPU buffer clear to mitigate MDS */
@@ -7291,7 +7292,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        kvm_wait_lapic_expire(vcpu);
 
        /* The actual VMENTER/EXIT is in the .noinstr.text section. */
-       vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
+       vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
 
        /* All fields are clean at this point */
        if (static_branch_unlikely(&enable_evmcs)) {
index bb720a2f11abd9be23e2563cd87c5b39d545c966..2acdc54bc34b18bd71d3020b837617c7ad488dd9 100644 (file)
@@ -640,12 +640,12 @@ BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64)
                                (1 << VCPU_EXREG_EXIT_INFO_1) | \
                                (1 << VCPU_EXREG_EXIT_INFO_2))
 
-static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
+static __always_inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
 {
        return container_of(kvm, struct kvm_vmx, kvm);
 }
 
-static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
+static __always_inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
 {
        return container_of(vcpu, struct vcpu_vmx, vcpu);
 }