mm/migration: add trace events for base page and HugeTLB migrations
authorAnshuman Khandual <anshuman.khandual@arm.com>
Fri, 25 Mar 2022 01:10:01 +0000 (18:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 25 Mar 2022 02:06:45 +0000 (19:06 -0700)
This adds two trace events for base page and HugeTLB page migrations.
These events, closely follow the implementation details like setting and
removing of PTE migration entries, which are essential operations for
migration.  The new CREATE_TRACE_POINTS in <mm/rmap.c> covers both
<events/migration.h> and <events/tlb.h> based trace events.  Hence drop
redundant CREATE_TRACE_POINTS from other places which could have otherwise
conflicted during build.

Link: https://lkml.kernel.org/r/1643368182-9588-3-git-send-email-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reported-by: kernel test robot <lkp@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/mm/init.c
include/trace/events/migrate.h
mm/migrate.c
mm/rmap.c

index 4ba024d5b63ae7b02fda4add0cac3945d8d02ee8..d8cfce221275e078e06a4371a31acad6467671b9 100644 (file)
@@ -31,7 +31,6 @@
  * We need to define the tracepoints somewhere, and tlb.c
  * is only compiled when SMP=y.
  */
-#define CREATE_TRACE_POINTS
 #include <trace/events/tlb.h>
 
 #include "mm_internal.h"
index 779f3fad9ecd5419408ada5fdc17425058a9863d..061b5128f335a75267a78bcb1119c684eac59c01 100644 (file)
@@ -105,6 +105,37 @@ TRACE_EVENT(mm_migrate_pages_start,
                  __print_symbolic(__entry->reason, MIGRATE_REASON))
 );
 
+DECLARE_EVENT_CLASS(migration_pte,
+
+               TP_PROTO(unsigned long addr, unsigned long pte, int order),
+
+               TP_ARGS(addr, pte, order),
+
+               TP_STRUCT__entry(
+                       __field(unsigned long, addr)
+                       __field(unsigned long, pte)
+                       __field(int, order)
+               ),
+
+               TP_fast_assign(
+                       __entry->addr = addr;
+                       __entry->pte = pte;
+                       __entry->order = order;
+               ),
+
+               TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order)
+);
+
+DEFINE_EVENT(migration_pte, set_migration_pte,
+       TP_PROTO(unsigned long addr, unsigned long pte, int order),
+       TP_ARGS(addr, pte, order)
+);
+
+DEFINE_EVENT(migration_pte, remove_migration_pte,
+       TP_PROTO(unsigned long addr, unsigned long pte, int order),
+       TP_ARGS(addr, pte, order)
+);
+
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
index 4f30ed37856f0ee5a066afc69d40994682f80f37..3d60823afd2d3171fafe7ec3e3279e7156850ddd 100644 (file)
@@ -53,7 +53,6 @@
 
 #include <asm/tlbflush.h>
 
-#define CREATE_TRACE_POINTS
 #include <trace/events/migrate.h>
 
 #include "internal.h"
@@ -249,6 +248,9 @@ static bool remove_migration_pte(struct folio *folio,
                if (vma->vm_flags & VM_LOCKED)
                        mlock_page_drain(smp_processor_id());
 
+               trace_remove_migration_pte(pvmw.address, pte_val(pte),
+                                          compound_order(new));
+
                /* No need to invalidate - it was non-present before */
                update_mmu_cache(vma, pvmw.address, pvmw.pte);
        }
index ee1f10df984da598f7427092e0e44c90bcd1016e..bfcc8e3d412f7c4562c28c7719916eaef406b80c 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -76,7 +76,9 @@
 
 #include <asm/tlbflush.h>
 
+#define CREATE_TRACE_POINTS
 #include <trace/events/tlb.h>
+#include <trace/events/migrate.h>
 
 #include "internal.h"
 
@@ -1849,6 +1851,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        if (pte_swp_uffd_wp(pteval))
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
+                       trace_set_migration_pte(pvmw.address, pte_val(swp_pte),
+                                               compound_order(&folio->page));
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.
@@ -1917,6 +1921,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
                        if (pte_uffd_wp(pteval))
                                swp_pte = pte_swp_mkuffd_wp(swp_pte);
                        set_pte_at(mm, address, pvmw.pte, swp_pte);
+                       trace_set_migration_pte(address, pte_val(swp_pte),
+                                               compound_order(&folio->page));
                        /*
                         * No need to invalidate here it will synchronize on
                         * against the special swap migration pte.