KVM: x86/tdp_mmu: Introduce KVM MMU root types to specify page table type
authorIsaku Yamahata <isaku.yamahata@intel.com>
Thu, 18 Jul 2024 21:12:22 +0000 (14:12 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 23 Dec 2024 13:31:54 +0000 (08:31 -0500)
Define an enum kvm_tdp_mmu_root_types to specify the KVM MMU root type [1]
so that the iterator on the root page table can consistently filter the
root page table type instead of only_valid.

TDX KVM will operate on KVM page tables with specified types.  Shared page
table, private page table, or both.  Introduce an enum instead of bool
only_valid so that we can easily enhance page table types applicable to
shared, private, or both in addition to valid or not.  Replace
only_valid=false with KVM_ANY_ROOTS and only_valid=true with
KVM_ANY_VALID_ROOTS.  Use KVM_ANY_ROOTS and KVM_ANY_VALID_ROOTS to wrap
KVM_VALID_ROOTS to avoid further code churn when direct vs mirror root
concepts are introduced in future patches.

Link: https://lore.kernel.org/kvm/ZivazWQw1oCU8VBC@google.com/
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Message-ID: <20240718211230.1492011-11-rick.p.edgecombe@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h

index e0ccfdd4200bd1a9d1c86e0f5a9210ca74cc61f1..9fbf4770ba3e6f14aa2f0042e91345b866fd6820 100644 (file)
@@ -92,9 +92,13 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
        call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
 }
 
-static bool tdp_mmu_root_match(struct kvm_mmu_page *root, bool only_valid)
+static bool tdp_mmu_root_match(struct kvm_mmu_page *root,
+                              enum kvm_tdp_mmu_root_types types)
 {
-       if (only_valid && root->role.invalid)
+       if (WARN_ON_ONCE(!(types & KVM_VALID_ROOTS)))
+               return false;
+
+       if (root->role.invalid && !(types & KVM_INVALID_ROOTS))
                return false;
 
        return true;
@@ -102,17 +106,17 @@ static bool tdp_mmu_root_match(struct kvm_mmu_page *root, bool only_valid)
 
 /*
  * Returns the next root after @prev_root (or the first root if @prev_root is
- * NULL).  A reference to the returned root is acquired, and the reference to
- * @prev_root is released (the caller obviously must hold a reference to
- * @prev_root if it's non-NULL).
+ * NULL) that matches with @types.  A reference to the returned root is
+ * acquired, and the reference to @prev_root is released (the caller obviously
+ * must hold a reference to @prev_root if it's non-NULL).
  *
- * If @only_valid is true, invalid roots are skipped.
+ * Roots that doesn't match with @types are skipped.
  *
  * Returns NULL if the end of tdp_mmu_roots was reached.
  */
 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
                                              struct kvm_mmu_page *prev_root,
-                                             bool only_valid)
+                                             enum kvm_tdp_mmu_root_types types)
 {
        struct kvm_mmu_page *next_root;
 
@@ -133,7 +137,7 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
                                                   typeof(*next_root), link);
 
        while (next_root) {
-               if (tdp_mmu_root_match(next_root, only_valid) &&
+               if (tdp_mmu_root_match(next_root, types) &&
                    kvm_tdp_mmu_get_root(next_root))
                        break;
 
@@ -158,20 +162,20 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * If shared is set, this function is operating under the MMU lock in read
  * mode.
  */
-#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)   \
-       for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid);                \
+#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _types)        \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, _types);             \
             ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;              \
-            _root = tdp_mmu_next_root(_kvm, _root, _only_valid))               \
+            _root = tdp_mmu_next_root(_kvm, _root, _types))            \
                if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) {       \
                } else
 
 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id)    \
-       __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, true)
+       __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, KVM_VALID_ROOTS)
 
 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root)                  \
-       for (_root = tdp_mmu_next_root(_kvm, NULL, false);              \
+       for (_root = tdp_mmu_next_root(_kvm, NULL, KVM_ALL_ROOTS);              \
             ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root;      \
-            _root = tdp_mmu_next_root(_kvm, _root, false))
+            _root = tdp_mmu_next_root(_kvm, _root, KVM_ALL_ROOTS))
 
 /*
  * Iterate over all TDP MMU roots.  Requires that mmu_lock be held for write,
@@ -180,18 +184,18 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
  * Holding mmu_lock for write obviates the need for RCU protection as the list
  * is guaranteed to be stable.
  */
-#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid)              \
+#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _types)                   \
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)             \
                if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) &&            \
                    ((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) ||    \
-                    !tdp_mmu_root_match((_root), (_only_valid)))) {            \
+                    !tdp_mmu_root_match((_root), (_types)))) {                 \
                } else
 
 #define for_each_tdp_mmu_root(_kvm, _root, _as_id)                     \
-       __for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
+       __for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_ALL_ROOTS)
 
 #define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id)               \
-       __for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
+       __for_each_tdp_mmu_root(_kvm, _root, _as_id, KVM_VALID_ROOTS)
 
 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
 {
@@ -1164,7 +1168,7 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
 {
        struct kvm_mmu_page *root;
 
-       __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+       __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, KVM_ALL_ROOTS)
                flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
                                          range->may_block, flush);
 
index 51884fc6a512705ada2ba06a0b2090d99c77ebc9..a2028d036c0c2eb5d8020910b82cc3376077bb1d 100644 (file)
@@ -19,6 +19,13 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
 
 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
+enum kvm_tdp_mmu_root_types {
+       KVM_INVALID_ROOTS = BIT(0),
+
+       KVM_VALID_ROOTS = BIT(1),
+       KVM_ALL_ROOTS = KVM_VALID_ROOTS | KVM_INVALID_ROOTS,
+};
+
 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);