KVM: x86/mmu: Add tracepoint for splitting huge pages
authorDavid Matlack <dmatlack@google.com>
Wed, 19 Jan 2022 23:07:38 +0000 (23:07 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 10 Feb 2022 18:50:43 +0000 (13:50 -0500)
Add a tracepoint that records whenever KVM eagerly splits a huge page
and the error status of the split to indicate if it succeeded or failed
and why.

Reviewed-by: Peter Xu <peterx@redhat.com>
Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20220119230739.2234394-18-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmutrace.h
arch/x86/kvm/mmu/tdp_mmu.c

index de5e8e4e1aa7fc4b060fb1bde1118a8383e2eeb6..12247b96af012e893fa9101e7c0ae9d78b52a5d2 100644 (file)
@@ -416,6 +416,29 @@ TRACE_EVENT(
        )
 );
 
+TRACE_EVENT(
+       kvm_mmu_split_huge_page,
+       TP_PROTO(u64 gfn, u64 spte, int level, int errno),
+       TP_ARGS(gfn, spte, level, errno),
+
+       TP_STRUCT__entry(
+               __field(u64, gfn)
+               __field(u64, spte)
+               __field(int, level)
+               __field(int, errno)
+       ),
+
+       TP_fast_assign(
+               __entry->gfn = gfn;
+               __entry->spte = spte;
+               __entry->level = level;
+               __entry->errno = errno;
+       ),
+
+       TP_printk("gfn %llx spte %llx level %d errno %d",
+                 __entry->gfn, __entry->spte, __entry->level, __entry->errno)
+);
+
 #endif /* _TRACE_KVMMMU_H */
 
 #undef TRACE_INCLUDE_PATH
index dae2cebcf8b5c41d6f66f40d2635d8a2f318e78d..8def8f810cb003e9eb48becb41f6e0fd89f21fd8 100644 (file)
@@ -1347,7 +1347,7 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
         */
        ret = tdp_mmu_link_sp(kvm, iter, sp, false, shared);
        if (ret)
-               return ret;
+               goto out;
 
        /*
         * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
@@ -1356,7 +1356,9 @@ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
         */
        kvm_update_page_stats(kvm, level - 1, PT64_ENT_PER_PAGE);
 
-       return 0;
+out:
+       trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
+       return ret;
 }
 
 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
@@ -1393,6 +1395,9 @@ retry:
                        sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
                        if (!sp) {
                                ret = -ENOMEM;
+                               trace_kvm_mmu_split_huge_page(iter.gfn,
+                                                             iter.old_spte,
+                                                             iter.level, ret);
                                break;
                        }
 
@@ -1416,7 +1421,6 @@ retry:
        if (sp)
                tdp_mmu_free_sp(sp);
 
-
        return ret;
 }