KVM: VMX: Combine "check" and "get" APIs for passthrough MSR lookups
authorSean Christopherson <seanjc@google.com>
Fri, 23 Feb 2024 20:21:04 +0000 (12:21 -0800)
committerSean Christopherson <seanjc@google.com>
Tue, 27 Feb 2024 20:29:46 +0000 (12:29 -0800)
Combine possible_passthrough_msr_slot() and is_valid_passthrough_msr()
into a single function, vmx_get_passthrough_msr_slot(), and have the
combined helper return the slot on success, using a negative value to
indicate "failure".

Combining the operations avoids iterating over the array of passthrough
MSRs twice for relevant MSRs.

Suggested-by: Dongli Zhang <dongli.zhang@oracle.com>
Reviewed-by: Dongli Zhang <dongli.zhang@oracle.com>
Link: https://lore.kernel.org/r/20240223202104.3330974-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmx.c

index 5a866d3c2bc861a6bc676a375ef2262c8ea74664..7e7d044e06696f94909d88ec4541f2ca25898824 100644 (file)
@@ -658,25 +658,14 @@ static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
        return flexpriority_enabled && lapic_in_kernel(vcpu);
 }
 
-static int possible_passthrough_msr_slot(u32 msr)
+static int vmx_get_passthrough_msr_slot(u32 msr)
 {
-       u32 i;
-
-       for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++)
-               if (vmx_possible_passthrough_msrs[i] == msr)
-                       return i;
-
-       return -ENOENT;
-}
-
-static bool is_valid_passthrough_msr(u32 msr)
-{
-       bool r;
+       int i;
 
        switch (msr) {
        case 0x800 ... 0x8ff:
                /* x2APIC MSRs. These are handled in vmx_update_msr_bitmap_x2apic() */
-               return true;
+               return -ENOENT;
        case MSR_IA32_RTIT_STATUS:
        case MSR_IA32_RTIT_OUTPUT_BASE:
        case MSR_IA32_RTIT_OUTPUT_MASK:
@@ -691,14 +680,16 @@ static bool is_valid_passthrough_msr(u32 msr)
        case MSR_LBR_CORE_FROM ... MSR_LBR_CORE_FROM + 8:
        case MSR_LBR_CORE_TO ... MSR_LBR_CORE_TO + 8:
                /* LBR MSRs. These are handled in vmx_update_intercept_for_lbr_msrs() */
-               return true;
+               return -ENOENT;
        }
 
-       r = possible_passthrough_msr_slot(msr) != -ENOENT;
-
-       WARN(!r, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
+       for (i = 0; i < ARRAY_SIZE(vmx_possible_passthrough_msrs); i++) {
+               if (vmx_possible_passthrough_msrs[i] == msr)
+                       return i;
+       }
 
-       return r;
+       WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
+       return -ENOENT;
 }
 
 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
@@ -3954,6 +3945,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+       int idx;
 
        if (!cpu_has_vmx_msr_bitmap())
                return;
@@ -3963,16 +3955,13 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
        /*
         * Mark the desired intercept state in shadow bitmap, this is needed
         * for resync when the MSR filters change.
-       */
-       if (is_valid_passthrough_msr(msr)) {
-               int idx = possible_passthrough_msr_slot(msr);
-
-               if (idx != -ENOENT) {
-                       if (type & MSR_TYPE_R)
-                               clear_bit(idx, vmx->shadow_msr_intercept.read);
-                       if (type & MSR_TYPE_W)
-                               clear_bit(idx, vmx->shadow_msr_intercept.write);
-               }
+        */
+       idx = vmx_get_passthrough_msr_slot(msr);
+       if (idx >= 0) {
+               if (type & MSR_TYPE_R)
+                       clear_bit(idx, vmx->shadow_msr_intercept.read);
+               if (type & MSR_TYPE_W)
+                       clear_bit(idx, vmx->shadow_msr_intercept.write);
        }
 
        if ((type & MSR_TYPE_R) &&
@@ -3998,6 +3987,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+       int idx;
 
        if (!cpu_has_vmx_msr_bitmap())
                return;
@@ -4007,16 +3997,13 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
        /*
         * Mark the desired intercept state in shadow bitmap, this is needed
         * for resync when the MSR filter changes.
-       */
-       if (is_valid_passthrough_msr(msr)) {
-               int idx = possible_passthrough_msr_slot(msr);
-
-               if (idx != -ENOENT) {
-                       if (type & MSR_TYPE_R)
-                               set_bit(idx, vmx->shadow_msr_intercept.read);
-                       if (type & MSR_TYPE_W)
-                               set_bit(idx, vmx->shadow_msr_intercept.write);
-               }
+        */
+       idx = vmx_get_passthrough_msr_slot(msr);
+       if (idx >= 0) {
+               if (type & MSR_TYPE_R)
+                       set_bit(idx, vmx->shadow_msr_intercept.read);
+               if (type & MSR_TYPE_W)
+                       set_bit(idx, vmx->shadow_msr_intercept.write);
        }
 
        if (type & MSR_TYPE_R)