tracing: Have persistent trace instances save module addresses
authorSteven Rostedt <rostedt@goodmis.org>
Wed, 5 Mar 2025 16:45:45 +0000 (11:45 -0500)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Fri, 28 Mar 2025 12:39:27 +0000 (08:39 -0400)
For trace instances that are mapped to persistent memory, have them use
the scratch area to save the currently loaded modules. This will allow
where the modules have been loaded on the next boot so that their
addresses can be deciphered by using where they were loaded previously.

Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lore.kernel.org/20250305164609.129741650@goodmis.org
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/trace.c

index d4df56657b9da7646219b2a13f5bd030e88db8d1..a75e03994312f9e6fbf298633d8e53dfe7680e76 100644 (file)
@@ -5988,14 +5988,60 @@ ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
        return __tracing_resize_ring_buffer(tr, size, cpu_id);
 }
 
+struct trace_mod_entry {
+       unsigned long   mod_addr;
+       char            mod_name[MODULE_NAME_LEN];
+};
+
 struct trace_scratch {
        unsigned long           kaslr_addr;
+       unsigned long           nr_entries;
+       struct trace_mod_entry  entries[];
 };
 
+static int save_mod(struct module *mod, void *data)
+{
+       struct trace_array *tr = data;
+       struct trace_scratch *tscratch;
+       struct trace_mod_entry *entry;
+       unsigned int size;
+
+       tscratch = tr->scratch;
+       if (!tscratch)
+               return -1;
+       size = tr->scratch_size;
+
+       if (struct_size(tscratch, entries, tscratch->nr_entries + 1) > size)
+               return -1;
+
+       entry = &tscratch->entries[tscratch->nr_entries];
+
+       tscratch->nr_entries++;
+
+       entry->mod_addr = (unsigned long)mod->mem[MOD_TEXT].base;
+       strscpy(entry->mod_name, mod->name);
+
+       return 0;
+}
+
 static void update_last_data(struct trace_array *tr)
 {
        struct trace_scratch *tscratch;
 
+       if (!(tr->flags & TRACE_ARRAY_FL_BOOT))
+               return;
+
+       /* Reset the module list and reload them */
+       if (tr->scratch) {
+               struct trace_scratch *tscratch = tr->scratch;
+
+               memset(tscratch->entries, 0,
+                      flex_array_size(tscratch, entries, tscratch->nr_entries));
+               tscratch->nr_entries = 0;
+
+               module_for_each_mod(save_mod, tr);
+       }
+
        if (!(tr->flags & TRACE_ARRAY_FL_LAST_BOOT))
                return;
 
@@ -9224,6 +9270,46 @@ static struct dentry *trace_instance_dir;
 static void
 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
 
+static void setup_trace_scratch(struct trace_array *tr,
+                               struct trace_scratch *tscratch, unsigned int size)
+{
+       struct trace_mod_entry *entry;
+
+       if (!tscratch)
+               return;
+
+       tr->scratch = tscratch;
+       tr->scratch_size = size;
+
+#ifdef CONFIG_RANDOMIZE_BASE
+       if (tscratch->kaslr_addr)
+               tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
+#endif
+
+       if (struct_size(tscratch, entries, tscratch->nr_entries) > size)
+               goto reset;
+
+       /* Check if each module name is a valid string */
+       for (int i = 0; i < tscratch->nr_entries; i++) {
+               int n;
+
+               entry = &tscratch->entries[i];
+
+               for (n = 0; n < MODULE_NAME_LEN; n++) {
+                       if (entry->mod_name[n] == '\0')
+                               break;
+                       if (!isprint(entry->mod_name[n]))
+                               goto reset;
+               }
+               if (n == MODULE_NAME_LEN)
+                       goto reset;
+       }
+       return;
+ reset:
+       /* Invalid trace modules */
+       memset(tscratch, 0, size);
+}
+
 static int
 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
 {
@@ -9236,21 +9322,15 @@ allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size
        buf->tr = tr;
 
        if (tr->range_addr_start && tr->range_addr_size) {
+               /* Add scratch buffer to handle 128 modules */
                buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0,
                                                      tr->range_addr_start,
                                                      tr->range_addr_size,
-                                                     sizeof(*tscratch));
+                                                     struct_size(tscratch, entries, 128));
 
                tscratch = ring_buffer_meta_scratch(buf->buffer, &scratch_size);
-               if (tscratch) {
-                       tr->scratch = tscratch;
-                       tr->scratch_size = scratch_size;
+               setup_trace_scratch(tr, tscratch, scratch_size);
 
-#ifdef CONFIG_RANDOMIZE_BASE
-                       if (tscratch->kaslr_addr)
-                               tr->text_delta = kaslr_offset() - tscratch->kaslr_addr;
-#endif
-               }
                /*
                 * This is basically the same as a mapped buffer,
                 * with the same restrictions.