KVM: x86/tdp_mmu: Take struct kvm in iter loops
authorIsaku Yamahata <isaku.yamahata@intel.com>
Thu, 18 Jul 2024 21:12:19 +0000 (14:12 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 23 Dec 2024 13:31:54 +0000 (08:31 -0500)
Add a struct kvm argument to the TDP MMU iterators.

Future changes will want to change how the iterator behaves based on a
member of struct kvm. Change the signature and callers of the iterator
loop helpers in a separate patch to make the future one easier to review.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Message-ID: <20240718211230.1492011-8-rick.p.edgecombe@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/tdp_iter.h
arch/x86/kvm/mmu/tdp_mmu.c

index 2880fd392e0cbb9921aed5b8b5992c54e484530f..d8f2884e3c666bff60e7e69ac54acc1ba10b16b4 100644 (file)
@@ -122,13 +122,13 @@ struct tdp_iter {
  * Iterates over every SPTE mapping the GFN range [start, end) in a
  * preorder traversal.
  */
-#define for_each_tdp_pte_min_level(iter, root, min_level, start, end) \
+#define for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end)               \
        for (tdp_iter_start(&iter, root, min_level, start); \
             iter.valid && iter.gfn < end;                   \
             tdp_iter_next(&iter))
 
-#define for_each_tdp_pte(iter, root, start, end) \
-       for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end)
+#define for_each_tdp_pte(iter, kvm, root, start, end)                          \
+       for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end)
 
 tdp_ptep_t spte_to_child_pt(u64 pte, int level);
 
index d29c06cc86d773006aaaa996ee71a920d016f41f..30eefc710aec3a040589ad41e747c2a3437b0c5c 100644 (file)
@@ -625,18 +625,18 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
                                          iter->gfn, iter->level);
 }
 
-#define tdp_root_for_each_pte(_iter, _root, _start, _end) \
-       for_each_tdp_pte(_iter, _root, _start, _end)
+#define tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end)        \
+       for_each_tdp_pte(_iter, _kvm, _root, _start, _end)
 
-#define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
-       tdp_root_for_each_pte(_iter, _root, _start, _end)               \
+#define tdp_root_for_each_leaf_pte(_iter, _kvm, _root, _start, _end)   \
+       tdp_root_for_each_pte(_iter, _kvm, _root, _start, _end)         \
                if (!is_shadow_present_pte(_iter.old_spte) ||           \
                    !is_last_spte(_iter.old_spte, _iter.level))         \
                        continue;                                       \
                else
 
-#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end)                \
-       for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
+#define tdp_mmu_for_each_pte(_iter, _kvm, _mmu, _start, _end)          \
+       for_each_tdp_pte(_iter, _kvm, root_to_sp(_mmu->root.hpa), _start, _end)
 
 static inline bool __must_check tdp_mmu_iter_need_resched(struct kvm *kvm,
                                                          struct tdp_iter *iter)
@@ -708,7 +708,7 @@ static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
        gfn_t end = tdp_mmu_max_gfn_exclusive();
        gfn_t start = 0;
 
-       for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
+       for_each_tdp_pte_min_level(iter, kvm, root, zap_level, start, end) {
 retry:
                if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
                        continue;
@@ -812,7 +812,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
 
        rcu_read_lock();
 
-       for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
+       for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_4K, start, end) {
                if (can_yield &&
                    tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
                        flush = false;
@@ -1086,7 +1086,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 
        rcu_read_lock();
 
-       tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
+       tdp_mmu_for_each_pte(iter, kvm, mmu, fault->gfn, fault->gfn + 1) {
                int r;
 
                if (fault->nx_huge_page_workaround_enabled)
@@ -1212,7 +1212,7 @@ static bool __kvm_tdp_mmu_age_gfn_range(struct kvm *kvm,
        for_each_valid_tdp_mmu_root(kvm, root, range->slot->as_id) {
                guard(rcu)();
 
-               tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) {
+               tdp_root_for_each_leaf_pte(iter, kvm, root, range->start, range->end) {
                        if (!is_accessed_spte(iter.old_spte))
                                continue;
 
@@ -1253,7 +1253,7 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 
        BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
 
-       for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
+       for_each_tdp_pte_min_level(iter, kvm, root, min_level, start, end) {
 retry:
                if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
                        continue;
@@ -1372,7 +1372,7 @@ static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
         * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
         * and then splitting each of those to 512 4KB pages).
         */
-       for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
+       for_each_tdp_pte_min_level(iter, kvm, root, target_level + 1, start, end) {
 retry:
                if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
                        continue;
@@ -1470,7 +1470,7 @@ static void clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
 
        rcu_read_lock();
 
-       tdp_root_for_each_pte(iter, root, start, end) {
+       tdp_root_for_each_pte(iter, kvm, root, start, end) {
 retry:
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
@@ -1518,7 +1518,7 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
 
        rcu_read_lock();
 
-       tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
+       tdp_root_for_each_leaf_pte(iter, kvm, root, gfn + __ffs(mask),
                                    gfn + BITS_PER_LONG) {
                if (!mask)
                        break;
@@ -1572,7 +1572,7 @@ static int tdp_mmu_make_huge_spte(struct kvm *kvm,
        gfn_t end = start + KVM_PAGES_PER_HPAGE(parent->level);
        struct tdp_iter iter;
 
-       tdp_root_for_each_leaf_pte(iter, root, start, end) {
+       tdp_root_for_each_leaf_pte(iter, kvm, root, start, end) {
                /*
                 * Use the parent iterator when checking for forward progress so
                 * that KVM doesn't get stuck continuously trying to yield (i.e.
@@ -1606,7 +1606,7 @@ static void recover_huge_pages_range(struct kvm *kvm,
 
        rcu_read_lock();
 
-       for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
+       for_each_tdp_pte_min_level(iter, kvm, root, PG_LEVEL_2M, start, end) {
 retry:
                if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
                        flush = false;
@@ -1687,7 +1687,7 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
 
        rcu_read_lock();
 
-       for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
+       for_each_tdp_pte_min_level(iter, kvm, root, min_level, gfn, gfn + 1) {
                if (!is_shadow_present_pte(iter.old_spte) ||
                    !is_last_spte(iter.old_spte, iter.level))
                        continue;
@@ -1742,7 +1742,7 @@ int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
 
        *root_level = vcpu->arch.mmu->root_role.level;
 
-       tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
+       tdp_mmu_for_each_pte(iter, vcpu->kvm, mmu, gfn, gfn + 1) {
                leaf = iter.level;
                sptes[leaf] = iter.old_spte;
        }
@@ -1768,7 +1768,7 @@ u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
        struct kvm_mmu *mmu = vcpu->arch.mmu;
        tdp_ptep_t sptep = NULL;
 
-       tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
+       tdp_mmu_for_each_pte(iter, vcpu->kvm, mmu, gfn, gfn + 1) {
                *spte = iter.old_spte;
                sptep = iter.sptep;
        }