KVM: x86: Refactor __kvm_emulate_hypercall() into a macro
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 10 Dec 2024 16:21:03 +0000 (11:21 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Sun, 22 Dec 2024 18:00:25 +0000 (13:00 -0500)
Rework __kvm_emulate_hypercall() into a macro so that completion of
hypercalls that don't exit to userspace use direct function calls to the
completion helper, i.e. don't trigger a retpoline when RETPOLINE=y.

Opportunistically take the names of the input registers, as opposed to
taking the input values, to preemptively dedup more of the calling code
(TDX needs to use different registers).  Use the direct GPR accessors to
read values to avoid the pointless marking of the registers as available
(KVM requires GPRs to always be available).

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Message-ID: <20241128004344.4072099-7-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 53ef7fa7094b849eea4baeafe9b0fa2f36ba967c..ad3fc35703a8de175c73f14f28965d958dfbcd49 100644 (file)
@@ -9982,11 +9982,11 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
        return kvm_skip_emulated_instruction(vcpu);
 }
 
-int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
-                           unsigned long a0, unsigned long a1,
-                           unsigned long a2, unsigned long a3,
-                           int op_64_bit, int cpl,
-                           int (*complete_hypercall)(struct kvm_vcpu *))
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+                             unsigned long a0, unsigned long a1,
+                             unsigned long a2, unsigned long a3,
+                             int op_64_bit, int cpl,
+                             int (*complete_hypercall)(struct kvm_vcpu *))
 {
        unsigned long ret;
 
@@ -10079,31 +10079,21 @@ int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
 
 out:
        vcpu->run->hypercall.ret = ret;
-       return complete_hypercall(vcpu);
+       return 1;
 }
-EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
+EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
-       unsigned long nr, a0, a1, a2, a3;
-       int op_64_bit;
-       int cpl;
-
        if (kvm_xen_hypercall_enabled(vcpu->kvm))
                return kvm_xen_hypercall(vcpu);
 
        if (kvm_hv_hypercall_enabled(vcpu))
                return kvm_hv_hypercall(vcpu);
 
-       nr = kvm_rax_read(vcpu);
-       a0 = kvm_rbx_read(vcpu);
-       a1 = kvm_rcx_read(vcpu);
-       a2 = kvm_rdx_read(vcpu);
-       a3 = kvm_rsi_read(vcpu);
-       op_64_bit = is_64_bit_hypercall(vcpu);
-       cpl = kvm_x86_call(get_cpl)(vcpu);
-
-       return __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl,
+       return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi,
+                                      is_64_bit_hypercall(vcpu),
+                                      kvm_x86_call(get_cpl)(vcpu),
                                       complete_hypercall_exit);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
index 28adc8ea04bfbab0911a85810b78e431e699a32d..b00ecbfef0005d69cca4e70150ffc7c5825f108d 100644 (file)
@@ -617,11 +617,26 @@ static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
        return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
 }
 
-int __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
-                           unsigned long a0, unsigned long a1,
-                           unsigned long a2, unsigned long a3,
-                           int op_64_bit, int cpl,
-                           int (*complete_hypercall)(struct kvm_vcpu *));
+int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
+                             unsigned long a0, unsigned long a1,
+                             unsigned long a2, unsigned long a3,
+                             int op_64_bit, int cpl,
+                             int (*complete_hypercall)(struct kvm_vcpu *));
+
+#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, complete_hypercall) \
+({                                                                                             \
+       int __ret;                                                                              \
+                                                                                               \
+       __ret = ____kvm_emulate_hypercall(_vcpu,                                                \
+                                         kvm_##nr##_read(_vcpu), kvm_##a0##_read(_vcpu),       \
+                                         kvm_##a1##_read(_vcpu), kvm_##a2##_read(_vcpu),       \
+                                         kvm_##a3##_read(_vcpu), op_64_bit, cpl,               \
+                                         complete_hypercall);                                  \
+                                                                                               \
+       if (__ret > 0)                                                                          \
+               __ret = complete_hypercall(_vcpu);                                              \
+       __ret;                                                                                  \
+})
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);