KVM: SVM: Drop 32-bit "support" from __svm_sev_es_vcpu_run()
authorSean Christopherson <seanjc@google.com>
Fri, 23 Feb 2024 20:42:28 +0000 (12:42 -0800)
committerSean Christopherson <seanjc@google.com>
Tue, 9 Apr 2024 17:20:29 +0000 (10:20 -0700)
Drop 32-bit "support" from __svm_sev_es_vcpu_run(), as SEV/SEV-ES firmly
64-bit only.  The "support" was purely the result of bad copy+paste from
__svm_vcpu_run(), which in turn was slightly less bad copy+paste from
__vmx_vcpu_run().

Opportunistically convert to unadulterated register accesses so that it's
easier (but still not easy) to follow which registers hold what arguments,
and when.

Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20240223204233.3337324-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/vmenter.S

index 6f57d496867a14201aba499a6e474db28322819c..c057866a459b6786c5813cd7d16e8fddfbe26b1e 100644 (file)
@@ -298,17 +298,12 @@ SYM_FUNC_END(__svm_vcpu_run)
  * @spec_ctrl_intercepted: bool
  */
 SYM_FUNC_START(__svm_sev_es_vcpu_run)
-       push %_ASM_BP
-#ifdef CONFIG_X86_64
+       push %rbp
        push %r15
        push %r14
        push %r13
        push %r12
-#else
-       push %edi
-       push %esi
-#endif
-       push %_ASM_BX
+       push %rbx
 
        /*
         * Save variables needed after vmexit on the stack, in inverse
@@ -316,39 +311,31 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
         */
 
        /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
-       push %_ASM_ARG2
+       push %rsi
 
        /* Save @svm. */
-       push %_ASM_ARG1
-
-.ifnc _ASM_ARG1, _ASM_DI
-       /*
-        * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
-        * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
-        */
-       mov %_ASM_ARG1, %_ASM_DI
-.endif
+       push %rdi
 
        /* Clobbers RAX, RCX, RDX.  */
        RESTORE_GUEST_SPEC_CTRL
 
        /* Get svm->current_vmcb->pa into RAX. */
-       mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
-       mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+       mov SVM_current_vmcb(%rdi), %rax
+       mov KVM_VMCB_pa(%rax), %rax
 
        /* Enter guest mode */
        sti
 
-1:     vmrun %_ASM_AX
+1:     vmrun %rax
 
 2:     cli
 
        /* Pop @svm to RDI, guest registers have been saved already. */
-       pop %_ASM_DI
+       pop %rdi
 
 #ifdef CONFIG_MITIGATION_RETPOLINE
        /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
-       FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+       FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
 #endif
 
        /* Clobbers RAX, RCX, RDX.  */
@@ -364,26 +351,21 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
        UNTRAIN_RET_VM
 
        /* "Pop" @spec_ctrl_intercepted.  */
-       pop %_ASM_BX
+       pop %rbx
 
-       pop %_ASM_BX
+       pop %rbx
 
-#ifdef CONFIG_X86_64
        pop %r12
        pop %r13
        pop %r14
        pop %r15
-#else
-       pop %esi
-       pop %edi
-#endif
-       pop %_ASM_BP
+       pop %rbp
        RET
 
        RESTORE_GUEST_SPEC_CTRL_BODY
        RESTORE_HOST_SPEC_CTRL_BODY
 
-3:     cmpb $0, _ASM_RIP(kvm_rebooting)
+3:     cmpb $0, kvm_rebooting(%rip)
        jne 2b
        ud2