KVM: x86/mmu: Allow passing '-1' for "all" as_id for TDP MMU iterators
authorSean Christopherson <seanjc@google.com>
Thu, 11 Jan 2024 02:00:43 +0000 (18:00 -0800)
committerSean Christopherson <seanjc@google.com>
Fri, 23 Feb 2024 00:28:45 +0000 (16:28 -0800)
Modify for_each_tdp_mmu_root() and __for_each_tdp_mmu_root_yield_safe() to
accept -1 for _as_id to mean "process all memslot address spaces".  That
way code that wants to process both SMM and !SMM doesn't need to iterate
over roots twice (and likely copy+paste code in the process).

Deliberately don't cast _as_id to an "int", just in case not casting helps
the compiler elide the "_as_id >=0" check when being passed an unsigned
value, e.g. from a memslot.

No functional change intended.

Link: https://lore.kernel.org/r/20240111020048.844847-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/tdp_mmu.c

index 68920877370b24d7f31a167c718f52186da29481..60fff2aad59edaae3392550839483efe67d3074e 100644 (file)
@@ -149,11 +149,11 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * If shared is set, this function is operating under the MMU lock in read
  * mode.
  */
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);        \
-            ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;      \
-            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))       \
-               if (kvm_mmu_page_as_id(_root) != _as_id) {              \
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)   \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);                \
+            ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;              \
+            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))               \
+               if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {       \
                } else
 
 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)    \
@@ -171,10 +171,10 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * Holding mmu_lock for write obviates the need for RCU protection as the list
  * is guaranteed to be stable.
  */
-#define for_each_tdp_mmu_root(_kvm, _root, _as_id)                     \
-       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)     \
-               if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&    \
-                   kvm_mmu_page_as_id(_root) != _as_id) {              \
+#define for_each_tdp_mmu_root(_kvm, _root, _as_id)                             \
+       list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)             \
+               if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&            \
+                   _as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {       \
                } else
 
 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)