1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
52 #include <asm/setup.h> /* COMMAND_LINE_SIZE */
55 #include "trace_output.h"
57 #ifdef CONFIG_FTRACE_STARTUP_TEST
59 * We need to change this state when a selftest is running.
60 * A selftest will lurk into the ring-buffer to count the
61 * entries inserted during the selftest although some concurrent
62 * insertions into the ring-buffer such as trace_printk could occurred
63 * at the same time, giving false positive or negative results.
65 static bool __read_mostly tracing_selftest_running;
68 * If boot-time tracing including tracers/events via kernel cmdline
69 * is running, we do not want to run SELFTEST.
71 bool __read_mostly tracing_selftest_disabled;
73 void __init disable_tracing_selftest(const char *reason)
75 if (!tracing_selftest_disabled) {
76 tracing_selftest_disabled = true;
77 pr_info("Ftrace startup test is disabled due to %s\n", reason);
81 #define tracing_selftest_running 0
82 #define tracing_selftest_disabled 0
85 /* Pipe tracepoints to printk */
86 static struct trace_iterator *tracepoint_print_iter;
87 int tracepoint_printk;
88 static bool tracepoint_printk_stop_on_boot __initdata;
89 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
91 /* For tracers that don't implement custom flags */
92 static struct tracer_opt dummy_tracer_opt[] = {
97 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
103 * To prevent the comm cache from being overwritten when no
104 * tracing is active, only save the comm when a trace event
107 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
110 * Kill all tracing for good (never come back).
111 * It is initialized to 1 but will turn to zero if the initialization
112 * of the tracer is successful. But that is the only place that sets
115 static int tracing_disabled = 1;
117 cpumask_var_t __read_mostly tracing_buffer_mask;
120 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
122 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
123 * is set, then ftrace_dump is called. This will output the contents
124 * of the ftrace buffers to the console. This is very useful for
125 * capturing traces that lead to crashes and outputing it to a
128 * It is default off, but you can enable it with either specifying
129 * "ftrace_dump_on_oops" in the kernel command line, or setting
130 * /proc/sys/kernel/ftrace_dump_on_oops
131 * Set 1 if you want to dump buffers of all CPUs
132 * Set 2 if you want to dump the buffer of the CPU that triggered oops
135 enum ftrace_dump_mode ftrace_dump_on_oops;
137 /* When set, tracing will stop when a WARN*() is hit */
138 int __disable_trace_on_warning;
140 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
141 /* Map of enums to their values, for "eval_map" file */
142 struct trace_eval_map_head {
144 unsigned long length;
147 union trace_eval_map_item;
149 struct trace_eval_map_tail {
151 * "end" is first and points to NULL as it must be different
152 * than "mod" or "eval_string"
154 union trace_eval_map_item *next;
155 const char *end; /* points to NULL */
158 static DEFINE_MUTEX(trace_eval_mutex);
161 * The trace_eval_maps are saved in an array with two extra elements,
162 * one at the beginning, and one at the end. The beginning item contains
163 * the count of the saved maps (head.length), and the module they
164 * belong to if not built in (head.mod). The ending item contains a
165 * pointer to the next array of saved eval_map items.
167 union trace_eval_map_item {
168 struct trace_eval_map map;
169 struct trace_eval_map_head head;
170 struct trace_eval_map_tail tail;
173 static union trace_eval_map_item *trace_eval_maps;
174 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
176 int tracing_set_tracer(struct trace_array *tr, const char *buf);
177 static void ftrace_trace_userstack(struct trace_array *tr,
178 struct trace_buffer *buffer,
179 unsigned int trace_ctx);
181 #define MAX_TRACER_SIZE 100
182 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
183 static char *default_bootup_tracer;
185 static bool allocate_snapshot;
186 static bool snapshot_at_boot;
188 static char boot_instance_info[COMMAND_LINE_SIZE] __initdata;
189 static int boot_instance_index;
191 static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata;
192 static int boot_snapshot_index;
194 static int __init set_cmdline_ftrace(char *str)
196 strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
197 default_bootup_tracer = bootup_tracer_buf;
198 /* We are using ftrace early, expand it */
199 trace_set_ring_buffer_expanded(NULL);
202 __setup("ftrace=", set_cmdline_ftrace);
204 static int __init set_ftrace_dump_on_oops(char *str)
206 if (*str++ != '=' || !*str || !strcmp("1", str)) {
207 ftrace_dump_on_oops = DUMP_ALL;
211 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
212 ftrace_dump_on_oops = DUMP_ORIG;
218 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
220 static int __init stop_trace_on_warning(char *str)
222 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
223 __disable_trace_on_warning = 1;
226 __setup("traceoff_on_warning", stop_trace_on_warning);
228 static int __init boot_alloc_snapshot(char *str)
230 char *slot = boot_snapshot_info + boot_snapshot_index;
231 int left = sizeof(boot_snapshot_info) - boot_snapshot_index;
236 if (strlen(str) >= left)
239 ret = snprintf(slot, left, "%s\t", str);
240 boot_snapshot_index += ret;
242 allocate_snapshot = true;
243 /* We also need the main ring buffer expanded */
244 trace_set_ring_buffer_expanded(NULL);
248 __setup("alloc_snapshot", boot_alloc_snapshot);
251 static int __init boot_snapshot(char *str)
253 snapshot_at_boot = true;
254 boot_alloc_snapshot(str);
257 __setup("ftrace_boot_snapshot", boot_snapshot);
260 static int __init boot_instance(char *str)
262 char *slot = boot_instance_info + boot_instance_index;
263 int left = sizeof(boot_instance_info) - boot_instance_index;
266 if (strlen(str) >= left)
269 ret = snprintf(slot, left, "%s\t", str);
270 boot_instance_index += ret;
274 __setup("trace_instance=", boot_instance);
277 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
279 static int __init set_trace_boot_options(char *str)
281 strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
284 __setup("trace_options=", set_trace_boot_options);
286 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
287 static char *trace_boot_clock __initdata;
289 static int __init set_trace_boot_clock(char *str)
291 strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
292 trace_boot_clock = trace_boot_clock_buf;
295 __setup("trace_clock=", set_trace_boot_clock);
297 static int __init set_tracepoint_printk(char *str)
299 /* Ignore the "tp_printk_stop_on_boot" param */
303 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
304 tracepoint_printk = 1;
307 __setup("tp_printk", set_tracepoint_printk);
309 static int __init set_tracepoint_printk_stop(char *str)
311 tracepoint_printk_stop_on_boot = true;
314 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
316 unsigned long long ns2usecs(u64 nsec)
324 trace_process_export(struct trace_export *export,
325 struct ring_buffer_event *event, int flag)
327 struct trace_entry *entry;
328 unsigned int size = 0;
330 if (export->flags & flag) {
331 entry = ring_buffer_event_data(event);
332 size = ring_buffer_event_length(event);
333 export->write(export, entry, size);
337 static DEFINE_MUTEX(ftrace_export_lock);
339 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
341 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
342 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
343 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
345 static inline void ftrace_exports_enable(struct trace_export *export)
347 if (export->flags & TRACE_EXPORT_FUNCTION)
348 static_branch_inc(&trace_function_exports_enabled);
350 if (export->flags & TRACE_EXPORT_EVENT)
351 static_branch_inc(&trace_event_exports_enabled);
353 if (export->flags & TRACE_EXPORT_MARKER)
354 static_branch_inc(&trace_marker_exports_enabled);
357 static inline void ftrace_exports_disable(struct trace_export *export)
359 if (export->flags & TRACE_EXPORT_FUNCTION)
360 static_branch_dec(&trace_function_exports_enabled);
362 if (export->flags & TRACE_EXPORT_EVENT)
363 static_branch_dec(&trace_event_exports_enabled);
365 if (export->flags & TRACE_EXPORT_MARKER)
366 static_branch_dec(&trace_marker_exports_enabled);
369 static void ftrace_exports(struct ring_buffer_event *event, int flag)
371 struct trace_export *export;
373 preempt_disable_notrace();
375 export = rcu_dereference_raw_check(ftrace_exports_list);
377 trace_process_export(export, event, flag);
378 export = rcu_dereference_raw_check(export->next);
381 preempt_enable_notrace();
385 add_trace_export(struct trace_export **list, struct trace_export *export)
387 rcu_assign_pointer(export->next, *list);
389 * We are entering export into the list but another
390 * CPU might be walking that list. We need to make sure
391 * the export->next pointer is valid before another CPU sees
392 * the export pointer included into the list.
394 rcu_assign_pointer(*list, export);
398 rm_trace_export(struct trace_export **list, struct trace_export *export)
400 struct trace_export **p;
402 for (p = list; *p != NULL; p = &(*p)->next)
409 rcu_assign_pointer(*p, (*p)->next);
415 add_ftrace_export(struct trace_export **list, struct trace_export *export)
417 ftrace_exports_enable(export);
419 add_trace_export(list, export);
423 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
427 ret = rm_trace_export(list, export);
428 ftrace_exports_disable(export);
433 int register_ftrace_export(struct trace_export *export)
435 if (WARN_ON_ONCE(!export->write))
438 mutex_lock(&ftrace_export_lock);
440 add_ftrace_export(&ftrace_exports_list, export);
442 mutex_unlock(&ftrace_export_lock);
446 EXPORT_SYMBOL_GPL(register_ftrace_export);
448 int unregister_ftrace_export(struct trace_export *export)
452 mutex_lock(&ftrace_export_lock);
454 ret = rm_ftrace_export(&ftrace_exports_list, export);
456 mutex_unlock(&ftrace_export_lock);
460 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
462 /* trace_flags holds trace_options default values */
463 #define TRACE_DEFAULT_FLAGS \
464 (FUNCTION_DEFAULT_FLAGS | \
465 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
466 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
467 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
468 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
471 /* trace_options that are only supported by global_trace */
472 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
473 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
475 /* trace_flags that are default zero for instances */
476 #define ZEROED_TRACE_FLAGS \
477 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
480 * The global_trace is the descriptor that holds the top-level tracing
481 * buffers for the live tracing.
483 static struct trace_array global_trace = {
484 .trace_flags = TRACE_DEFAULT_FLAGS,
487 void trace_set_ring_buffer_expanded(struct trace_array *tr)
491 tr->ring_buffer_expanded = true;
494 LIST_HEAD(ftrace_trace_arrays);
496 int trace_array_get(struct trace_array *this_tr)
498 struct trace_array *tr;
501 mutex_lock(&trace_types_lock);
502 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
509 mutex_unlock(&trace_types_lock);
514 static void __trace_array_put(struct trace_array *this_tr)
516 WARN_ON(!this_tr->ref);
521 * trace_array_put - Decrement the reference counter for this trace array.
522 * @this_tr : pointer to the trace array
524 * NOTE: Use this when we no longer need the trace array returned by
525 * trace_array_get_by_name(). This ensures the trace array can be later
529 void trace_array_put(struct trace_array *this_tr)
534 mutex_lock(&trace_types_lock);
535 __trace_array_put(this_tr);
536 mutex_unlock(&trace_types_lock);
538 EXPORT_SYMBOL_GPL(trace_array_put);
540 int tracing_check_open_get_tr(struct trace_array *tr)
544 ret = security_locked_down(LOCKDOWN_TRACEFS);
548 if (tracing_disabled)
551 if (tr && trace_array_get(tr) < 0)
557 int call_filter_check_discard(struct trace_event_call *call, void *rec,
558 struct trace_buffer *buffer,
559 struct ring_buffer_event *event)
561 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
562 !filter_match_preds(call->filter, rec)) {
563 __trace_event_discard_commit(buffer, event);
571 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
572 * @filtered_pids: The list of pids to check
573 * @search_pid: The PID to find in @filtered_pids
575 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
578 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
580 return trace_pid_list_is_set(filtered_pids, search_pid);
584 * trace_ignore_this_task - should a task be ignored for tracing
585 * @filtered_pids: The list of pids to check
586 * @filtered_no_pids: The list of pids not to be traced
587 * @task: The task that should be ignored if not filtered
589 * Checks if @task should be traced or not from @filtered_pids.
590 * Returns true if @task should *NOT* be traced.
591 * Returns false if @task should be traced.
594 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
595 struct trace_pid_list *filtered_no_pids,
596 struct task_struct *task)
599 * If filtered_no_pids is not empty, and the task's pid is listed
600 * in filtered_no_pids, then return true.
601 * Otherwise, if filtered_pids is empty, that means we can
602 * trace all tasks. If it has content, then only trace pids
603 * within filtered_pids.
606 return (filtered_pids &&
607 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
609 trace_find_filtered_pid(filtered_no_pids, task->pid));
613 * trace_filter_add_remove_task - Add or remove a task from a pid_list
614 * @pid_list: The list to modify
615 * @self: The current task for fork or NULL for exit
616 * @task: The task to add or remove
618 * If adding a task, if @self is defined, the task is only added if @self
619 * is also included in @pid_list. This happens on fork and tasks should
620 * only be added when the parent is listed. If @self is NULL, then the
621 * @task pid will be removed from the list, which would happen on exit
624 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
625 struct task_struct *self,
626 struct task_struct *task)
631 /* For forks, we only add if the forking task is listed */
633 if (!trace_find_filtered_pid(pid_list, self->pid))
637 /* "self" is set for forks, and NULL for exits */
639 trace_pid_list_set(pid_list, task->pid);
641 trace_pid_list_clear(pid_list, task->pid);
645 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
646 * @pid_list: The pid list to show
647 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
648 * @pos: The position of the file
650 * This is used by the seq_file "next" operation to iterate the pids
651 * listed in a trace_pid_list structure.
653 * Returns the pid+1 as we want to display pid of zero, but NULL would
654 * stop the iteration.
656 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
658 long pid = (unsigned long)v;
663 /* pid already is +1 of the actual previous bit */
664 if (trace_pid_list_next(pid_list, pid, &next) < 0)
669 /* Return pid + 1 to allow zero to be represented */
670 return (void *)(pid + 1);
674 * trace_pid_start - Used for seq_file to start reading pid lists
675 * @pid_list: The pid list to show
676 * @pos: The position of the file
678 * This is used by seq_file "start" operation to start the iteration
681 * Returns the pid+1 as we want to display pid of zero, but NULL would
682 * stop the iteration.
684 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
690 if (trace_pid_list_first(pid_list, &first) < 0)
695 /* Return pid + 1 so that zero can be the exit value */
696 for (pid++; pid && l < *pos;
697 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
703 * trace_pid_show - show the current pid in seq_file processing
704 * @m: The seq_file structure to write into
705 * @v: A void pointer of the pid (+1) value to display
707 * Can be directly used by seq_file operations to display the current
710 int trace_pid_show(struct seq_file *m, void *v)
712 unsigned long pid = (unsigned long)v - 1;
714 seq_printf(m, "%lu\n", pid);
718 /* 128 should be much more than enough */
719 #define PID_BUF_SIZE 127
721 int trace_pid_write(struct trace_pid_list *filtered_pids,
722 struct trace_pid_list **new_pid_list,
723 const char __user *ubuf, size_t cnt)
725 struct trace_pid_list *pid_list;
726 struct trace_parser parser;
734 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
738 * Always recreate a new array. The write is an all or nothing
739 * operation. Always create a new array when adding new pids by
740 * the user. If the operation fails, then the current list is
743 pid_list = trace_pid_list_alloc();
745 trace_parser_put(&parser);
750 /* copy the current bits to the new max */
751 ret = trace_pid_list_first(filtered_pids, &pid);
753 trace_pid_list_set(pid_list, pid);
754 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
764 ret = trace_get_user(&parser, ubuf, cnt, &pos);
772 if (!trace_parser_loaded(&parser))
776 if (kstrtoul(parser.buffer, 0, &val))
781 if (trace_pid_list_set(pid_list, pid) < 0) {
787 trace_parser_clear(&parser);
790 trace_parser_put(&parser);
793 trace_pid_list_free(pid_list);
798 /* Cleared the list of pids */
799 trace_pid_list_free(pid_list);
803 *new_pid_list = pid_list;
808 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
812 /* Early boot up does not have a buffer yet */
814 return trace_clock_local();
816 ts = ring_buffer_time_stamp(buf->buffer);
817 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
822 u64 ftrace_now(int cpu)
824 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
828 * tracing_is_enabled - Show if global_trace has been enabled
830 * Shows if the global trace has been enabled or not. It uses the
831 * mirror flag "buffer_disabled" to be used in fast paths such as for
832 * the irqsoff tracer. But it may be inaccurate due to races. If you
833 * need to know the accurate state, use tracing_is_on() which is a little
834 * slower, but accurate.
836 int tracing_is_enabled(void)
839 * For quick access (irqsoff uses this in fast path), just
840 * return the mirror variable of the state of the ring buffer.
841 * It's a little racy, but we don't really care.
844 return !global_trace.buffer_disabled;
848 * trace_buf_size is the size in bytes that is allocated
849 * for a buffer. Note, the number of bytes is always rounded
852 * This number is purposely set to a low number of 16384.
853 * If the dump on oops happens, it will be much appreciated
854 * to not have to wait for all that output. Anyway this can be
855 * boot time and run time configurable.
857 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
859 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
861 /* trace_types holds a link list of available tracers. */
862 static struct tracer *trace_types __read_mostly;
865 * trace_types_lock is used to protect the trace_types list.
867 DEFINE_MUTEX(trace_types_lock);
870 * serialize the access of the ring buffer
872 * ring buffer serializes readers, but it is low level protection.
873 * The validity of the events (which returns by ring_buffer_peek() ..etc)
874 * are not protected by ring buffer.
876 * The content of events may become garbage if we allow other process consumes
877 * these events concurrently:
878 * A) the page of the consumed events may become a normal page
879 * (not reader page) in ring buffer, and this page will be rewritten
880 * by events producer.
881 * B) The page of the consumed events may become a page for splice_read,
882 * and this page will be returned to system.
884 * These primitives allow multi process access to different cpu ring buffer
887 * These primitives don't distinguish read-only and read-consume access.
888 * Multi read-only access are also serialized.
892 static DECLARE_RWSEM(all_cpu_access_lock);
893 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
895 static inline void trace_access_lock(int cpu)
897 if (cpu == RING_BUFFER_ALL_CPUS) {
898 /* gain it for accessing the whole ring buffer. */
899 down_write(&all_cpu_access_lock);
901 /* gain it for accessing a cpu ring buffer. */
903 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
904 down_read(&all_cpu_access_lock);
906 /* Secondly block other access to this @cpu ring buffer. */
907 mutex_lock(&per_cpu(cpu_access_lock, cpu));
911 static inline void trace_access_unlock(int cpu)
913 if (cpu == RING_BUFFER_ALL_CPUS) {
914 up_write(&all_cpu_access_lock);
916 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
917 up_read(&all_cpu_access_lock);
921 static inline void trace_access_lock_init(void)
925 for_each_possible_cpu(cpu)
926 mutex_init(&per_cpu(cpu_access_lock, cpu));
931 static DEFINE_MUTEX(access_lock);
933 static inline void trace_access_lock(int cpu)
936 mutex_lock(&access_lock);
939 static inline void trace_access_unlock(int cpu)
942 mutex_unlock(&access_lock);
945 static inline void trace_access_lock_init(void)
951 #ifdef CONFIG_STACKTRACE
952 static void __ftrace_trace_stack(struct trace_buffer *buffer,
953 unsigned int trace_ctx,
954 int skip, struct pt_regs *regs);
955 static inline void ftrace_trace_stack(struct trace_array *tr,
956 struct trace_buffer *buffer,
957 unsigned int trace_ctx,
958 int skip, struct pt_regs *regs);
961 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
962 unsigned int trace_ctx,
963 int skip, struct pt_regs *regs)
966 static inline void ftrace_trace_stack(struct trace_array *tr,
967 struct trace_buffer *buffer,
968 unsigned long trace_ctx,
969 int skip, struct pt_regs *regs)
975 static __always_inline void
976 trace_event_setup(struct ring_buffer_event *event,
977 int type, unsigned int trace_ctx)
979 struct trace_entry *ent = ring_buffer_event_data(event);
981 tracing_generic_entry_update(ent, type, trace_ctx);
984 static __always_inline struct ring_buffer_event *
985 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
988 unsigned int trace_ctx)
990 struct ring_buffer_event *event;
992 event = ring_buffer_lock_reserve(buffer, len);
994 trace_event_setup(event, type, trace_ctx);
999 void tracer_tracing_on(struct trace_array *tr)
1001 if (tr->array_buffer.buffer)
1002 ring_buffer_record_on(tr->array_buffer.buffer);
1004 * This flag is looked at when buffers haven't been allocated
1005 * yet, or by some tracers (like irqsoff), that just want to
1006 * know if the ring buffer has been disabled, but it can handle
1007 * races of where it gets disabled but we still do a record.
1008 * As the check is in the fast path of the tracers, it is more
1009 * important to be fast than accurate.
1011 tr->buffer_disabled = 0;
1012 /* Make the flag seen by readers */
1017 * tracing_on - enable tracing buffers
1019 * This function enables tracing buffers that may have been
1020 * disabled with tracing_off.
1022 void tracing_on(void)
1024 tracer_tracing_on(&global_trace);
1026 EXPORT_SYMBOL_GPL(tracing_on);
1029 static __always_inline void
1030 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
1032 __this_cpu_write(trace_taskinfo_save, true);
1034 /* If this is the temp buffer, we need to commit fully */
1035 if (this_cpu_read(trace_buffered_event) == event) {
1036 /* Length is in event->array[0] */
1037 ring_buffer_write(buffer, event->array[0], &event->array[1]);
1038 /* Release the temp buffer */
1039 this_cpu_dec(trace_buffered_event_cnt);
1040 /* ring_buffer_unlock_commit() enables preemption */
1041 preempt_enable_notrace();
1043 ring_buffer_unlock_commit(buffer);
1046 int __trace_array_puts(struct trace_array *tr, unsigned long ip,
1047 const char *str, int size)
1049 struct ring_buffer_event *event;
1050 struct trace_buffer *buffer;
1051 struct print_entry *entry;
1052 unsigned int trace_ctx;
1055 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
1058 if (unlikely(tracing_selftest_running && tr == &global_trace))
1061 if (unlikely(tracing_disabled))
1064 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1066 trace_ctx = tracing_gen_ctx();
1067 buffer = tr->array_buffer.buffer;
1068 ring_buffer_nest_start(buffer);
1069 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1076 entry = ring_buffer_event_data(event);
1079 memcpy(&entry->buf, str, size);
1081 /* Add a newline if necessary */
1082 if (entry->buf[size - 1] != '\n') {
1083 entry->buf[size] = '\n';
1084 entry->buf[size + 1] = '\0';
1086 entry->buf[size] = '\0';
1088 __buffer_unlock_commit(buffer, event);
1089 ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL);
1091 ring_buffer_nest_end(buffer);
1094 EXPORT_SYMBOL_GPL(__trace_array_puts);
1097 * __trace_puts - write a constant string into the trace buffer.
1098 * @ip: The address of the caller
1099 * @str: The constant string to write
1100 * @size: The size of the string.
1102 int __trace_puts(unsigned long ip, const char *str, int size)
1104 return __trace_array_puts(&global_trace, ip, str, size);
1106 EXPORT_SYMBOL_GPL(__trace_puts);
1109 * __trace_bputs - write the pointer to a constant string into trace buffer
1110 * @ip: The address of the caller
1111 * @str: The constant string to write to the buffer to
1113 int __trace_bputs(unsigned long ip, const char *str)
1115 struct ring_buffer_event *event;
1116 struct trace_buffer *buffer;
1117 struct bputs_entry *entry;
1118 unsigned int trace_ctx;
1119 int size = sizeof(struct bputs_entry);
1122 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1125 if (unlikely(tracing_selftest_running || tracing_disabled))
1128 trace_ctx = tracing_gen_ctx();
1129 buffer = global_trace.array_buffer.buffer;
1131 ring_buffer_nest_start(buffer);
1132 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1137 entry = ring_buffer_event_data(event);
1141 __buffer_unlock_commit(buffer, event);
1142 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1146 ring_buffer_nest_end(buffer);
1149 EXPORT_SYMBOL_GPL(__trace_bputs);
1151 #ifdef CONFIG_TRACER_SNAPSHOT
1152 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1155 struct tracer *tracer = tr->current_trace;
1156 unsigned long flags;
1159 trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1160 trace_array_puts(tr, "*** snapshot is being ignored ***\n");
1164 if (!tr->allocated_snapshot) {
1165 trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n");
1166 trace_array_puts(tr, "*** stopping trace here! ***\n");
1167 tracer_tracing_off(tr);
1171 /* Note, snapshot can not be used when the tracer uses it */
1172 if (tracer->use_max_tr) {
1173 trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n");
1174 trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n");
1178 local_irq_save(flags);
1179 update_max_tr(tr, current, smp_processor_id(), cond_data);
1180 local_irq_restore(flags);
1183 void tracing_snapshot_instance(struct trace_array *tr)
1185 tracing_snapshot_instance_cond(tr, NULL);
1189 * tracing_snapshot - take a snapshot of the current buffer.
1191 * This causes a swap between the snapshot buffer and the current live
1192 * tracing buffer. You can use this to take snapshots of the live
1193 * trace when some condition is triggered, but continue to trace.
1195 * Note, make sure to allocate the snapshot with either
1196 * a tracing_snapshot_alloc(), or by doing it manually
1197 * with: echo 1 > /sys/kernel/tracing/snapshot
1199 * If the snapshot buffer is not allocated, it will stop tracing.
1200 * Basically making a permanent snapshot.
1202 void tracing_snapshot(void)
1204 struct trace_array *tr = &global_trace;
1206 tracing_snapshot_instance(tr);
1208 EXPORT_SYMBOL_GPL(tracing_snapshot);
1211 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1212 * @tr: The tracing instance to snapshot
1213 * @cond_data: The data to be tested conditionally, and possibly saved
1215 * This is the same as tracing_snapshot() except that the snapshot is
1216 * conditional - the snapshot will only happen if the
1217 * cond_snapshot.update() implementation receiving the cond_data
1218 * returns true, which means that the trace array's cond_snapshot
1219 * update() operation used the cond_data to determine whether the
1220 * snapshot should be taken, and if it was, presumably saved it along
1221 * with the snapshot.
1223 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1225 tracing_snapshot_instance_cond(tr, cond_data);
1227 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1230 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1231 * @tr: The tracing instance
1233 * When the user enables a conditional snapshot using
1234 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1235 * with the snapshot. This accessor is used to retrieve it.
1237 * Should not be called from cond_snapshot.update(), since it takes
1238 * the tr->max_lock lock, which the code calling
1239 * cond_snapshot.update() has already done.
1241 * Returns the cond_data associated with the trace array's snapshot.
1243 void *tracing_cond_snapshot_data(struct trace_array *tr)
1245 void *cond_data = NULL;
1247 local_irq_disable();
1248 arch_spin_lock(&tr->max_lock);
1250 if (tr->cond_snapshot)
1251 cond_data = tr->cond_snapshot->cond_data;
1253 arch_spin_unlock(&tr->max_lock);
1258 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1260 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1261 struct array_buffer *size_buf, int cpu_id);
1262 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1264 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1268 if (!tr->allocated_snapshot) {
1270 /* allocate spare buffer */
1271 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1272 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1276 tr->allocated_snapshot = true;
1282 static void free_snapshot(struct trace_array *tr)
1285 * We don't free the ring buffer. instead, resize it because
1286 * The max_tr ring buffer has some state (e.g. ring->clock) and
1287 * we want preserve it.
1289 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1290 set_buffer_entries(&tr->max_buffer, 1);
1291 tracing_reset_online_cpus(&tr->max_buffer);
1292 tr->allocated_snapshot = false;
1296 * tracing_alloc_snapshot - allocate snapshot buffer.
1298 * This only allocates the snapshot buffer if it isn't already
1299 * allocated - it doesn't also take a snapshot.
1301 * This is meant to be used in cases where the snapshot buffer needs
1302 * to be set up for events that can't sleep but need to be able to
1303 * trigger a snapshot.
1305 int tracing_alloc_snapshot(void)
1307 struct trace_array *tr = &global_trace;
1310 ret = tracing_alloc_snapshot_instance(tr);
1315 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1318 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1320 * This is similar to tracing_snapshot(), but it will allocate the
1321 * snapshot buffer if it isn't already allocated. Use this only
1322 * where it is safe to sleep, as the allocation may sleep.
1324 * This causes a swap between the snapshot buffer and the current live
1325 * tracing buffer. You can use this to take snapshots of the live
1326 * trace when some condition is triggered, but continue to trace.
1328 void tracing_snapshot_alloc(void)
1332 ret = tracing_alloc_snapshot();
1338 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1341 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1342 * @tr: The tracing instance
1343 * @cond_data: User data to associate with the snapshot
1344 * @update: Implementation of the cond_snapshot update function
1346 * Check whether the conditional snapshot for the given instance has
1347 * already been enabled, or if the current tracer is already using a
1348 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1349 * save the cond_data and update function inside.
1351 * Returns 0 if successful, error otherwise.
1353 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1354 cond_update_fn_t update)
1356 struct cond_snapshot *cond_snapshot;
1359 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1363 cond_snapshot->cond_data = cond_data;
1364 cond_snapshot->update = update;
1366 mutex_lock(&trace_types_lock);
1368 ret = tracing_alloc_snapshot_instance(tr);
1372 if (tr->current_trace->use_max_tr) {
1378 * The cond_snapshot can only change to NULL without the
1379 * trace_types_lock. We don't care if we race with it going
1380 * to NULL, but we want to make sure that it's not set to
1381 * something other than NULL when we get here, which we can
1382 * do safely with only holding the trace_types_lock and not
1383 * having to take the max_lock.
1385 if (tr->cond_snapshot) {
1390 local_irq_disable();
1391 arch_spin_lock(&tr->max_lock);
1392 tr->cond_snapshot = cond_snapshot;
1393 arch_spin_unlock(&tr->max_lock);
1396 mutex_unlock(&trace_types_lock);
1401 mutex_unlock(&trace_types_lock);
1402 kfree(cond_snapshot);
1405 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1408 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1409 * @tr: The tracing instance
1411 * Check whether the conditional snapshot for the given instance is
1412 * enabled; if so, free the cond_snapshot associated with it,
1413 * otherwise return -EINVAL.
1415 * Returns 0 if successful, error otherwise.
1417 int tracing_snapshot_cond_disable(struct trace_array *tr)
1421 local_irq_disable();
1422 arch_spin_lock(&tr->max_lock);
1424 if (!tr->cond_snapshot)
1427 kfree(tr->cond_snapshot);
1428 tr->cond_snapshot = NULL;
1431 arch_spin_unlock(&tr->max_lock);
1436 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1438 void tracing_snapshot(void)
1440 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1442 EXPORT_SYMBOL_GPL(tracing_snapshot);
1443 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1445 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1447 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1448 int tracing_alloc_snapshot(void)
1450 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1453 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1454 void tracing_snapshot_alloc(void)
1459 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1460 void *tracing_cond_snapshot_data(struct trace_array *tr)
1464 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1465 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1469 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1470 int tracing_snapshot_cond_disable(struct trace_array *tr)
1474 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1475 #define free_snapshot(tr) do { } while (0)
1476 #endif /* CONFIG_TRACER_SNAPSHOT */
1478 void tracer_tracing_off(struct trace_array *tr)
1480 if (tr->array_buffer.buffer)
1481 ring_buffer_record_off(tr->array_buffer.buffer);
1483 * This flag is looked at when buffers haven't been allocated
1484 * yet, or by some tracers (like irqsoff), that just want to
1485 * know if the ring buffer has been disabled, but it can handle
1486 * races of where it gets disabled but we still do a record.
1487 * As the check is in the fast path of the tracers, it is more
1488 * important to be fast than accurate.
1490 tr->buffer_disabled = 1;
1491 /* Make the flag seen by readers */
1496 * tracing_off - turn off tracing buffers
1498 * This function stops the tracing buffers from recording data.
1499 * It does not disable any overhead the tracers themselves may
1500 * be causing. This function simply causes all recording to
1501 * the ring buffers to fail.
1503 void tracing_off(void)
1505 tracer_tracing_off(&global_trace);
1507 EXPORT_SYMBOL_GPL(tracing_off);
1509 void disable_trace_on_warning(void)
1511 if (__disable_trace_on_warning) {
1512 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1513 "Disabling tracing due to warning\n");
1519 * tracer_tracing_is_on - show real state of ring buffer enabled
1520 * @tr : the trace array to know if ring buffer is enabled
1522 * Shows real state of the ring buffer if it is enabled or not.
1524 bool tracer_tracing_is_on(struct trace_array *tr)
1526 if (tr->array_buffer.buffer)
1527 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1528 return !tr->buffer_disabled;
1532 * tracing_is_on - show state of ring buffers enabled
1534 int tracing_is_on(void)
1536 return tracer_tracing_is_on(&global_trace);
1538 EXPORT_SYMBOL_GPL(tracing_is_on);
1540 static int __init set_buf_size(char *str)
1542 unsigned long buf_size;
1546 buf_size = memparse(str, &str);
1548 * nr_entries can not be zero and the startup
1549 * tests require some buffer space. Therefore
1550 * ensure we have at least 4096 bytes of buffer.
1552 trace_buf_size = max(4096UL, buf_size);
1555 __setup("trace_buf_size=", set_buf_size);
1557 static int __init set_tracing_thresh(char *str)
1559 unsigned long threshold;
1564 ret = kstrtoul(str, 0, &threshold);
1567 tracing_thresh = threshold * 1000;
1570 __setup("tracing_thresh=", set_tracing_thresh);
1572 unsigned long nsecs_to_usecs(unsigned long nsecs)
1574 return nsecs / 1000;
1578 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1579 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1580 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1581 * of strings in the order that the evals (enum) were defined.
1586 /* These must match the bit positions in trace_iterator_flags */
1587 static const char *trace_options[] = {
1595 int in_ns; /* is this clock in nanoseconds? */
1596 } trace_clocks[] = {
1597 { trace_clock_local, "local", 1 },
1598 { trace_clock_global, "global", 1 },
1599 { trace_clock_counter, "counter", 0 },
1600 { trace_clock_jiffies, "uptime", 0 },
1601 { trace_clock, "perf", 1 },
1602 { ktime_get_mono_fast_ns, "mono", 1 },
1603 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1604 { ktime_get_boot_fast_ns, "boot", 1 },
1605 { ktime_get_tai_fast_ns, "tai", 1 },
1609 bool trace_clock_in_ns(struct trace_array *tr)
1611 if (trace_clocks[tr->clock_id].in_ns)
1618 * trace_parser_get_init - gets the buffer for trace parser
1620 int trace_parser_get_init(struct trace_parser *parser, int size)
1622 memset(parser, 0, sizeof(*parser));
1624 parser->buffer = kmalloc(size, GFP_KERNEL);
1625 if (!parser->buffer)
1628 parser->size = size;
1633 * trace_parser_put - frees the buffer for trace parser
1635 void trace_parser_put(struct trace_parser *parser)
1637 kfree(parser->buffer);
1638 parser->buffer = NULL;
1642 * trace_get_user - reads the user input string separated by space
1643 * (matched by isspace(ch))
1645 * For each string found the 'struct trace_parser' is updated,
1646 * and the function returns.
1648 * Returns number of bytes read.
1650 * See kernel/trace/trace.h for 'struct trace_parser' details.
1652 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1653 size_t cnt, loff_t *ppos)
1660 trace_parser_clear(parser);
1662 ret = get_user(ch, ubuf++);
1670 * The parser is not finished with the last write,
1671 * continue reading the user input without skipping spaces.
1673 if (!parser->cont) {
1674 /* skip white space */
1675 while (cnt && isspace(ch)) {
1676 ret = get_user(ch, ubuf++);
1685 /* only spaces were written */
1686 if (isspace(ch) || !ch) {
1693 /* read the non-space input */
1694 while (cnt && !isspace(ch) && ch) {
1695 if (parser->idx < parser->size - 1)
1696 parser->buffer[parser->idx++] = ch;
1701 ret = get_user(ch, ubuf++);
1708 /* We either got finished input or we have to wait for another call. */
1709 if (isspace(ch) || !ch) {
1710 parser->buffer[parser->idx] = 0;
1711 parser->cont = false;
1712 } else if (parser->idx < parser->size - 1) {
1713 parser->cont = true;
1714 parser->buffer[parser->idx++] = ch;
1715 /* Make sure the parsed string always terminates with '\0'. */
1716 parser->buffer[parser->idx] = 0;
1729 /* TODO add a seq_buf_to_buffer() */
1730 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1734 if (trace_seq_used(s) <= s->readpos)
1737 len = trace_seq_used(s) - s->readpos;
1740 memcpy(buf, s->buffer + s->readpos, cnt);
1746 unsigned long __read_mostly tracing_thresh;
1748 #ifdef CONFIG_TRACER_MAX_TRACE
1749 static const struct file_operations tracing_max_lat_fops;
1751 #ifdef LATENCY_FS_NOTIFY
1753 static struct workqueue_struct *fsnotify_wq;
1755 static void latency_fsnotify_workfn(struct work_struct *work)
1757 struct trace_array *tr = container_of(work, struct trace_array,
1759 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1762 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1764 struct trace_array *tr = container_of(iwork, struct trace_array,
1766 queue_work(fsnotify_wq, &tr->fsnotify_work);
1769 static void trace_create_maxlat_file(struct trace_array *tr,
1770 struct dentry *d_tracer)
1772 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1773 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1774 tr->d_max_latency = trace_create_file("tracing_max_latency",
1777 &tracing_max_lat_fops);
1780 __init static int latency_fsnotify_init(void)
1782 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1783 WQ_UNBOUND | WQ_HIGHPRI, 0);
1785 pr_err("Unable to allocate tr_max_lat_wq\n");
1791 late_initcall_sync(latency_fsnotify_init);
1793 void latency_fsnotify(struct trace_array *tr)
1798 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1799 * possible that we are called from __schedule() or do_idle(), which
1800 * could cause a deadlock.
1802 irq_work_queue(&tr->fsnotify_irqwork);
1805 #else /* !LATENCY_FS_NOTIFY */
1807 #define trace_create_maxlat_file(tr, d_tracer) \
1808 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1809 d_tracer, tr, &tracing_max_lat_fops)
1814 * Copy the new maximum trace into the separate maximum-trace
1815 * structure. (this way the maximum trace is permanently saved,
1816 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1819 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1821 struct array_buffer *trace_buf = &tr->array_buffer;
1822 struct array_buffer *max_buf = &tr->max_buffer;
1823 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1824 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1827 max_buf->time_start = data->preempt_timestamp;
1829 max_data->saved_latency = tr->max_latency;
1830 max_data->critical_start = data->critical_start;
1831 max_data->critical_end = data->critical_end;
1833 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1834 max_data->pid = tsk->pid;
1836 * If tsk == current, then use current_uid(), as that does not use
1837 * RCU. The irq tracer can be called out of RCU scope.
1840 max_data->uid = current_uid();
1842 max_data->uid = task_uid(tsk);
1844 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1845 max_data->policy = tsk->policy;
1846 max_data->rt_priority = tsk->rt_priority;
1848 /* record this tasks comm */
1849 tracing_record_cmdline(tsk);
1850 latency_fsnotify(tr);
1854 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1856 * @tsk: the task with the latency
1857 * @cpu: The cpu that initiated the trace.
1858 * @cond_data: User data associated with a conditional snapshot
1860 * Flip the buffers between the @tr and the max_tr and record information
1861 * about which task was the cause of this latency.
1864 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1870 WARN_ON_ONCE(!irqs_disabled());
1872 if (!tr->allocated_snapshot) {
1873 /* Only the nop tracer should hit this when disabling */
1874 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1878 arch_spin_lock(&tr->max_lock);
1880 /* Inherit the recordable setting from array_buffer */
1881 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1882 ring_buffer_record_on(tr->max_buffer.buffer);
1884 ring_buffer_record_off(tr->max_buffer.buffer);
1886 #ifdef CONFIG_TRACER_SNAPSHOT
1887 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1888 arch_spin_unlock(&tr->max_lock);
1892 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1894 __update_max_tr(tr, tsk, cpu);
1896 arch_spin_unlock(&tr->max_lock);
1898 /* Any waiters on the old snapshot buffer need to wake up */
1899 ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS);
1903 * update_max_tr_single - only copy one trace over, and reset the rest
1905 * @tsk: task with the latency
1906 * @cpu: the cpu of the buffer to copy.
1908 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1911 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1918 WARN_ON_ONCE(!irqs_disabled());
1919 if (!tr->allocated_snapshot) {
1920 /* Only the nop tracer should hit this when disabling */
1921 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1925 arch_spin_lock(&tr->max_lock);
1927 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1929 if (ret == -EBUSY) {
1931 * We failed to swap the buffer due to a commit taking
1932 * place on this CPU. We fail to record, but we reset
1933 * the max trace buffer (no one writes directly to it)
1934 * and flag that it failed.
1935 * Another reason is resize is in progress.
1937 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1938 "Failed to swap buffers due to commit or resize in progress\n");
1941 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1943 __update_max_tr(tr, tsk, cpu);
1944 arch_spin_unlock(&tr->max_lock);
1947 #endif /* CONFIG_TRACER_MAX_TRACE */
1949 static int wait_on_pipe(struct trace_iterator *iter, int full)
1953 /* Iterators are static, they should be filled or empty */
1954 if (trace_buffer_iter(iter, iter->cpu_file))
1957 ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full);
1959 #ifdef CONFIG_TRACER_MAX_TRACE
1961 * Make sure this is still the snapshot buffer, as if a snapshot were
1962 * to happen, this would now be the main buffer.
1965 iter->array_buffer = &iter->tr->max_buffer;
1970 #ifdef CONFIG_FTRACE_STARTUP_TEST
1971 static bool selftests_can_run;
1973 struct trace_selftests {
1974 struct list_head list;
1975 struct tracer *type;
1978 static LIST_HEAD(postponed_selftests);
1980 static int save_selftest(struct tracer *type)
1982 struct trace_selftests *selftest;
1984 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1988 selftest->type = type;
1989 list_add(&selftest->list, &postponed_selftests);
1993 static int run_tracer_selftest(struct tracer *type)
1995 struct trace_array *tr = &global_trace;
1996 struct tracer *saved_tracer = tr->current_trace;
1999 if (!type->selftest || tracing_selftest_disabled)
2003 * If a tracer registers early in boot up (before scheduling is
2004 * initialized and such), then do not run its selftests yet.
2005 * Instead, run it a little later in the boot process.
2007 if (!selftests_can_run)
2008 return save_selftest(type);
2010 if (!tracing_is_on()) {
2011 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
2017 * Run a selftest on this tracer.
2018 * Here we reset the trace buffer, and set the current
2019 * tracer to be this tracer. The tracer can then run some
2020 * internal tracing to verify that everything is in order.
2021 * If we fail, we do not register this tracer.
2023 tracing_reset_online_cpus(&tr->array_buffer);
2025 tr->current_trace = type;
2027 #ifdef CONFIG_TRACER_MAX_TRACE
2028 if (type->use_max_tr) {
2029 /* If we expanded the buffers, make sure the max is expanded too */
2030 if (tr->ring_buffer_expanded)
2031 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
2032 RING_BUFFER_ALL_CPUS);
2033 tr->allocated_snapshot = true;
2037 /* the test is responsible for initializing and enabling */
2038 pr_info("Testing tracer %s: ", type->name);
2039 ret = type->selftest(type, tr);
2040 /* the test is responsible for resetting too */
2041 tr->current_trace = saved_tracer;
2043 printk(KERN_CONT "FAILED!\n");
2044 /* Add the warning after printing 'FAILED' */
2048 /* Only reset on passing, to avoid touching corrupted buffers */
2049 tracing_reset_online_cpus(&tr->array_buffer);
2051 #ifdef CONFIG_TRACER_MAX_TRACE
2052 if (type->use_max_tr) {
2053 tr->allocated_snapshot = false;
2055 /* Shrink the max buffer again */
2056 if (tr->ring_buffer_expanded)
2057 ring_buffer_resize(tr->max_buffer.buffer, 1,
2058 RING_BUFFER_ALL_CPUS);
2062 printk(KERN_CONT "PASSED\n");
2066 static int do_run_tracer_selftest(struct tracer *type)
2071 * Tests can take a long time, especially if they are run one after the
2072 * other, as does happen during bootup when all the tracers are
2073 * registered. This could cause the soft lockup watchdog to trigger.
2077 tracing_selftest_running = true;
2078 ret = run_tracer_selftest(type);
2079 tracing_selftest_running = false;
2084 static __init int init_trace_selftests(void)
2086 struct trace_selftests *p, *n;
2087 struct tracer *t, **last;
2090 selftests_can_run = true;
2092 mutex_lock(&trace_types_lock);
2094 if (list_empty(&postponed_selftests))
2097 pr_info("Running postponed tracer tests:\n");
2099 tracing_selftest_running = true;
2100 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2101 /* This loop can take minutes when sanitizers are enabled, so
2102 * lets make sure we allow RCU processing.
2105 ret = run_tracer_selftest(p->type);
2106 /* If the test fails, then warn and remove from available_tracers */
2108 WARN(1, "tracer: %s failed selftest, disabling\n",
2110 last = &trace_types;
2111 for (t = trace_types; t; t = t->next) {
2122 tracing_selftest_running = false;
2125 mutex_unlock(&trace_types_lock);
2129 core_initcall(init_trace_selftests);
2131 static inline int run_tracer_selftest(struct tracer *type)
2135 static inline int do_run_tracer_selftest(struct tracer *type)
2139 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2141 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2143 static void __init apply_trace_boot_options(void);
2146 * register_tracer - register a tracer with the ftrace system.
2147 * @type: the plugin for the tracer
2149 * Register a new plugin tracer.
2151 int __init register_tracer(struct tracer *type)
2157 pr_info("Tracer must have a name\n");
2161 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2162 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2166 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2167 pr_warn("Can not register tracer %s due to lockdown\n",
2172 mutex_lock(&trace_types_lock);
2174 for (t = trace_types; t; t = t->next) {
2175 if (strcmp(type->name, t->name) == 0) {
2177 pr_info("Tracer %s already registered\n",
2184 if (!type->set_flag)
2185 type->set_flag = &dummy_set_flag;
2187 /*allocate a dummy tracer_flags*/
2188 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2193 type->flags->val = 0;
2194 type->flags->opts = dummy_tracer_opt;
2196 if (!type->flags->opts)
2197 type->flags->opts = dummy_tracer_opt;
2199 /* store the tracer for __set_tracer_option */
2200 type->flags->trace = type;
2202 ret = do_run_tracer_selftest(type);
2206 type->next = trace_types;
2208 add_tracer_options(&global_trace, type);
2211 mutex_unlock(&trace_types_lock);
2213 if (ret || !default_bootup_tracer)
2216 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2219 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2220 /* Do we want this tracer to start on bootup? */
2221 tracing_set_tracer(&global_trace, type->name);
2222 default_bootup_tracer = NULL;
2224 apply_trace_boot_options();
2226 /* disable other selftests, since this will break it. */
2227 disable_tracing_selftest("running a tracer");
2233 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2235 struct trace_buffer *buffer = buf->buffer;
2240 ring_buffer_record_disable(buffer);
2242 /* Make sure all commits have finished */
2244 ring_buffer_reset_cpu(buffer, cpu);
2246 ring_buffer_record_enable(buffer);
2249 void tracing_reset_online_cpus(struct array_buffer *buf)
2251 struct trace_buffer *buffer = buf->buffer;
2256 ring_buffer_record_disable(buffer);
2258 /* Make sure all commits have finished */
2261 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2263 ring_buffer_reset_online_cpus(buffer);
2265 ring_buffer_record_enable(buffer);
2268 /* Must have trace_types_lock held */
2269 void tracing_reset_all_online_cpus_unlocked(void)
2271 struct trace_array *tr;
2273 lockdep_assert_held(&trace_types_lock);
2275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2276 if (!tr->clear_trace)
2278 tr->clear_trace = false;
2279 tracing_reset_online_cpus(&tr->array_buffer);
2280 #ifdef CONFIG_TRACER_MAX_TRACE
2281 tracing_reset_online_cpus(&tr->max_buffer);
2286 void tracing_reset_all_online_cpus(void)
2288 mutex_lock(&trace_types_lock);
2289 tracing_reset_all_online_cpus_unlocked();
2290 mutex_unlock(&trace_types_lock);
2294 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2295 * is the tgid last observed corresponding to pid=i.
2297 static int *tgid_map;
2299 /* The maximum valid index into tgid_map. */
2300 static size_t tgid_map_max;
2302 #define SAVED_CMDLINES_DEFAULT 128
2303 #define NO_CMDLINE_MAP UINT_MAX
2305 * Preemption must be disabled before acquiring trace_cmdline_lock.
2306 * The various trace_arrays' max_lock must be acquired in a context
2307 * where interrupt is disabled.
2309 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2310 struct saved_cmdlines_buffer {
2311 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2312 unsigned *map_cmdline_to_pid;
2313 unsigned cmdline_num;
2315 char *saved_cmdlines;
2317 static struct saved_cmdlines_buffer *savedcmd;
2319 static inline char *get_saved_cmdlines(int idx)
2321 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2324 static inline void set_cmdline(int idx, const char *cmdline)
2326 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2329 static int allocate_cmdlines_buffer(unsigned int val,
2330 struct saved_cmdlines_buffer *s)
2332 s->map_cmdline_to_pid = kmalloc_array(val,
2333 sizeof(*s->map_cmdline_to_pid),
2335 if (!s->map_cmdline_to_pid)
2338 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2339 if (!s->saved_cmdlines) {
2340 kfree(s->map_cmdline_to_pid);
2345 s->cmdline_num = val;
2346 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2347 sizeof(s->map_pid_to_cmdline));
2348 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2349 val * sizeof(*s->map_cmdline_to_pid));
2354 static int trace_create_savedcmd(void)
2358 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2362 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2372 int is_tracing_stopped(void)
2374 return global_trace.stop_count;
2377 static void tracing_start_tr(struct trace_array *tr)
2379 struct trace_buffer *buffer;
2380 unsigned long flags;
2382 if (tracing_disabled)
2385 raw_spin_lock_irqsave(&tr->start_lock, flags);
2386 if (--tr->stop_count) {
2387 if (WARN_ON_ONCE(tr->stop_count < 0)) {
2388 /* Someone screwed up their debugging */
2394 /* Prevent the buffers from switching */
2395 arch_spin_lock(&tr->max_lock);
2397 buffer = tr->array_buffer.buffer;
2399 ring_buffer_record_enable(buffer);
2401 #ifdef CONFIG_TRACER_MAX_TRACE
2402 buffer = tr->max_buffer.buffer;
2404 ring_buffer_record_enable(buffer);
2407 arch_spin_unlock(&tr->max_lock);
2410 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2414 * tracing_start - quick start of the tracer
2416 * If tracing is enabled but was stopped by tracing_stop,
2417 * this will start the tracer back up.
2419 void tracing_start(void)
2422 return tracing_start_tr(&global_trace);
2425 static void tracing_stop_tr(struct trace_array *tr)
2427 struct trace_buffer *buffer;
2428 unsigned long flags;
2430 raw_spin_lock_irqsave(&tr->start_lock, flags);
2431 if (tr->stop_count++)
2434 /* Prevent the buffers from switching */
2435 arch_spin_lock(&tr->max_lock);
2437 buffer = tr->array_buffer.buffer;
2439 ring_buffer_record_disable(buffer);
2441 #ifdef CONFIG_TRACER_MAX_TRACE
2442 buffer = tr->max_buffer.buffer;
2444 ring_buffer_record_disable(buffer);
2447 arch_spin_unlock(&tr->max_lock);
2450 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2454 * tracing_stop - quick stop of the tracer
2456 * Light weight way to stop tracing. Use in conjunction with
2459 void tracing_stop(void)
2461 return tracing_stop_tr(&global_trace);
2464 static int trace_save_cmdline(struct task_struct *tsk)
2468 /* treat recording of idle task as a success */
2472 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2475 * It's not the end of the world if we don't get
2476 * the lock, but we also don't want to spin
2477 * nor do we want to disable interrupts,
2478 * so if we miss here, then better luck next time.
2480 * This is called within the scheduler and wake up, so interrupts
2481 * had better been disabled and run queue lock been held.
2483 lockdep_assert_preemption_disabled();
2484 if (!arch_spin_trylock(&trace_cmdline_lock))
2487 idx = savedcmd->map_pid_to_cmdline[tpid];
2488 if (idx == NO_CMDLINE_MAP) {
2489 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2491 savedcmd->map_pid_to_cmdline[tpid] = idx;
2492 savedcmd->cmdline_idx = idx;
2495 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2496 set_cmdline(idx, tsk->comm);
2498 arch_spin_unlock(&trace_cmdline_lock);
2503 static void __trace_find_cmdline(int pid, char comm[])
2509 strcpy(comm, "<idle>");
2513 if (WARN_ON_ONCE(pid < 0)) {
2514 strcpy(comm, "<XXX>");
2518 tpid = pid & (PID_MAX_DEFAULT - 1);
2519 map = savedcmd->map_pid_to_cmdline[tpid];
2520 if (map != NO_CMDLINE_MAP) {
2521 tpid = savedcmd->map_cmdline_to_pid[map];
2523 strscpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2527 strcpy(comm, "<...>");
2530 void trace_find_cmdline(int pid, char comm[])
2533 arch_spin_lock(&trace_cmdline_lock);
2535 __trace_find_cmdline(pid, comm);
2537 arch_spin_unlock(&trace_cmdline_lock);
2541 static int *trace_find_tgid_ptr(int pid)
2544 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2545 * if we observe a non-NULL tgid_map then we also observe the correct
2548 int *map = smp_load_acquire(&tgid_map);
2550 if (unlikely(!map || pid > tgid_map_max))
2556 int trace_find_tgid(int pid)
2558 int *ptr = trace_find_tgid_ptr(pid);
2560 return ptr ? *ptr : 0;
2563 static int trace_save_tgid(struct task_struct *tsk)
2567 /* treat recording of idle task as a success */
2571 ptr = trace_find_tgid_ptr(tsk->pid);
2579 static bool tracing_record_taskinfo_skip(int flags)
2581 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2583 if (!__this_cpu_read(trace_taskinfo_save))
2589 * tracing_record_taskinfo - record the task info of a task
2591 * @task: task to record
2592 * @flags: TRACE_RECORD_CMDLINE for recording comm
2593 * TRACE_RECORD_TGID for recording tgid
2595 void tracing_record_taskinfo(struct task_struct *task, int flags)
2599 if (tracing_record_taskinfo_skip(flags))
2603 * Record as much task information as possible. If some fail, continue
2604 * to try to record the others.
2606 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2607 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2609 /* If recording any information failed, retry again soon. */
2613 __this_cpu_write(trace_taskinfo_save, false);
2617 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2619 * @prev: previous task during sched_switch
2620 * @next: next task during sched_switch
2621 * @flags: TRACE_RECORD_CMDLINE for recording comm
2622 * TRACE_RECORD_TGID for recording tgid
2624 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2625 struct task_struct *next, int flags)
2629 if (tracing_record_taskinfo_skip(flags))
2633 * Record as much task information as possible. If some fail, continue
2634 * to try to record the others.
2636 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2637 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2638 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2639 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2641 /* If recording any information failed, retry again soon. */
2645 __this_cpu_write(trace_taskinfo_save, false);
2648 /* Helpers to record a specific task information */
2649 void tracing_record_cmdline(struct task_struct *task)
2651 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2654 void tracing_record_tgid(struct task_struct *task)
2656 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2660 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2661 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2662 * simplifies those functions and keeps them in sync.
2664 enum print_line_t trace_handle_return(struct trace_seq *s)
2666 return trace_seq_has_overflowed(s) ?
2667 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2669 EXPORT_SYMBOL_GPL(trace_handle_return);
2671 static unsigned short migration_disable_value(void)
2673 #if defined(CONFIG_SMP)
2674 return current->migration_disabled;
2680 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2682 unsigned int trace_flags = irqs_status;
2685 pc = preempt_count();
2688 trace_flags |= TRACE_FLAG_NMI;
2689 if (pc & HARDIRQ_MASK)
2690 trace_flags |= TRACE_FLAG_HARDIRQ;
2691 if (in_serving_softirq())
2692 trace_flags |= TRACE_FLAG_SOFTIRQ;
2693 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2694 trace_flags |= TRACE_FLAG_BH_OFF;
2696 if (tif_need_resched())
2697 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2698 if (test_preempt_need_resched())
2699 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2700 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2701 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2704 struct ring_buffer_event *
2705 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2708 unsigned int trace_ctx)
2710 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2713 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2714 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2715 static int trace_buffered_event_ref;
2718 * trace_buffered_event_enable - enable buffering events
2720 * When events are being filtered, it is quicker to use a temporary
2721 * buffer to write the event data into if there's a likely chance
2722 * that it will not be committed. The discard of the ring buffer
2723 * is not as fast as committing, and is much slower than copying
2726 * When an event is to be filtered, allocate per cpu buffers to
2727 * write the event data into, and if the event is filtered and discarded
2728 * it is simply dropped, otherwise, the entire data is to be committed
2731 void trace_buffered_event_enable(void)
2733 struct ring_buffer_event *event;
2737 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2739 if (trace_buffered_event_ref++)
2742 for_each_tracing_cpu(cpu) {
2743 page = alloc_pages_node(cpu_to_node(cpu),
2744 GFP_KERNEL | __GFP_NORETRY, 0);
2745 /* This is just an optimization and can handle failures */
2747 pr_err("Failed to allocate event buffer\n");
2751 event = page_address(page);
2752 memset(event, 0, sizeof(*event));
2754 per_cpu(trace_buffered_event, cpu) = event;
2757 if (cpu == smp_processor_id() &&
2758 __this_cpu_read(trace_buffered_event) !=
2759 per_cpu(trace_buffered_event, cpu))
2765 static void enable_trace_buffered_event(void *data)
2767 /* Probably not needed, but do it anyway */
2769 this_cpu_dec(trace_buffered_event_cnt);
2772 static void disable_trace_buffered_event(void *data)
2774 this_cpu_inc(trace_buffered_event_cnt);
2778 * trace_buffered_event_disable - disable buffering events
2780 * When a filter is removed, it is faster to not use the buffered
2781 * events, and to commit directly into the ring buffer. Free up
2782 * the temp buffers when there are no more users. This requires
2783 * special synchronization with current events.
2785 void trace_buffered_event_disable(void)
2789 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2791 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2794 if (--trace_buffered_event_ref)
2797 /* For each CPU, set the buffer as used. */
2798 on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
2801 /* Wait for all current users to finish */
2804 for_each_tracing_cpu(cpu) {
2805 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2806 per_cpu(trace_buffered_event, cpu) = NULL;
2810 * Wait for all CPUs that potentially started checking if they can use
2811 * their event buffer only after the previous synchronize_rcu() call and
2812 * they still read a valid pointer from trace_buffered_event. It must be
2813 * ensured they don't see cleared trace_buffered_event_cnt else they
2814 * could wrongly decide to use the pointed-to buffer which is now freed.
2818 /* For each CPU, relinquish the buffer */
2819 on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
2823 static struct trace_buffer *temp_buffer;
2825 struct ring_buffer_event *
2826 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2827 struct trace_event_file *trace_file,
2828 int type, unsigned long len,
2829 unsigned int trace_ctx)
2831 struct ring_buffer_event *entry;
2832 struct trace_array *tr = trace_file->tr;
2835 *current_rb = tr->array_buffer.buffer;
2837 if (!tr->no_filter_buffering_ref &&
2838 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2839 preempt_disable_notrace();
2841 * Filtering is on, so try to use the per cpu buffer first.
2842 * This buffer will simulate a ring_buffer_event,
2843 * where the type_len is zero and the array[0] will
2844 * hold the full length.
2845 * (see include/linux/ring-buffer.h for details on
2846 * how the ring_buffer_event is structured).
2848 * Using a temp buffer during filtering and copying it
2849 * on a matched filter is quicker than writing directly
2850 * into the ring buffer and then discarding it when
2851 * it doesn't match. That is because the discard
2852 * requires several atomic operations to get right.
2853 * Copying on match and doing nothing on a failed match
2854 * is still quicker than no copy on match, but having
2855 * to discard out of the ring buffer on a failed match.
2857 if ((entry = __this_cpu_read(trace_buffered_event))) {
2858 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2860 val = this_cpu_inc_return(trace_buffered_event_cnt);
2863 * Preemption is disabled, but interrupts and NMIs
2864 * can still come in now. If that happens after
2865 * the above increment, then it will have to go
2866 * back to the old method of allocating the event
2867 * on the ring buffer, and if the filter fails, it
2868 * will have to call ring_buffer_discard_commit()
2871 * Need to also check the unlikely case that the
2872 * length is bigger than the temp buffer size.
2873 * If that happens, then the reserve is pretty much
2874 * guaranteed to fail, as the ring buffer currently
2875 * only allows events less than a page. But that may
2876 * change in the future, so let the ring buffer reserve
2877 * handle the failure in that case.
2879 if (val == 1 && likely(len <= max_len)) {
2880 trace_event_setup(entry, type, trace_ctx);
2881 entry->array[0] = len;
2882 /* Return with preemption disabled */
2885 this_cpu_dec(trace_buffered_event_cnt);
2887 /* __trace_buffer_lock_reserve() disables preemption */
2888 preempt_enable_notrace();
2891 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2894 * If tracing is off, but we have triggers enabled
2895 * we still need to look at the event data. Use the temp_buffer
2896 * to store the trace event for the trigger to use. It's recursive
2897 * safe and will not be recorded anywhere.
2899 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2900 *current_rb = temp_buffer;
2901 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2906 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2908 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2909 static DEFINE_MUTEX(tracepoint_printk_mutex);
2911 static void output_printk(struct trace_event_buffer *fbuffer)
2913 struct trace_event_call *event_call;
2914 struct trace_event_file *file;
2915 struct trace_event *event;
2916 unsigned long flags;
2917 struct trace_iterator *iter = tracepoint_print_iter;
2919 /* We should never get here if iter is NULL */
2920 if (WARN_ON_ONCE(!iter))
2923 event_call = fbuffer->trace_file->event_call;
2924 if (!event_call || !event_call->event.funcs ||
2925 !event_call->event.funcs->trace)
2928 file = fbuffer->trace_file;
2929 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2930 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2931 !filter_match_preds(file->filter, fbuffer->entry)))
2934 event = &fbuffer->trace_file->event_call->event;
2936 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2937 trace_seq_init(&iter->seq);
2938 iter->ent = fbuffer->entry;
2939 event_call->event.funcs->trace(iter, 0, event);
2940 trace_seq_putc(&iter->seq, 0);
2941 printk("%s", iter->seq.buffer);
2943 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2946 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2947 void *buffer, size_t *lenp,
2950 int save_tracepoint_printk;
2953 mutex_lock(&tracepoint_printk_mutex);
2954 save_tracepoint_printk = tracepoint_printk;
2956 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2959 * This will force exiting early, as tracepoint_printk
2960 * is always zero when tracepoint_printk_iter is not allocated
2962 if (!tracepoint_print_iter)
2963 tracepoint_printk = 0;
2965 if (save_tracepoint_printk == tracepoint_printk)
2968 if (tracepoint_printk)
2969 static_key_enable(&tracepoint_printk_key.key);
2971 static_key_disable(&tracepoint_printk_key.key);
2974 mutex_unlock(&tracepoint_printk_mutex);
2979 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2981 enum event_trigger_type tt = ETT_NONE;
2982 struct trace_event_file *file = fbuffer->trace_file;
2984 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2985 fbuffer->entry, &tt))
2988 if (static_key_false(&tracepoint_printk_key.key))
2989 output_printk(fbuffer);
2991 if (static_branch_unlikely(&trace_event_exports_enabled))
2992 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2994 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2995 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2999 event_triggers_post_call(file, tt);
3002 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
3007 * trace_buffer_unlock_commit_regs()
3008 * trace_event_buffer_commit()
3009 * trace_event_raw_event_xxx()
3011 # define STACK_SKIP 3
3013 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
3014 struct trace_buffer *buffer,
3015 struct ring_buffer_event *event,
3016 unsigned int trace_ctx,
3017 struct pt_regs *regs)
3019 __buffer_unlock_commit(buffer, event);
3022 * If regs is not set, then skip the necessary functions.
3023 * Note, we can still get here via blktrace, wakeup tracer
3024 * and mmiotrace, but that's ok if they lose a function or
3025 * two. They are not that meaningful.
3027 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
3028 ftrace_trace_userstack(tr, buffer, trace_ctx);
3032 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
3035 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
3036 struct ring_buffer_event *event)
3038 __buffer_unlock_commit(buffer, event);
3042 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3043 parent_ip, unsigned int trace_ctx)
3045 struct trace_event_call *call = &event_function;
3046 struct trace_buffer *buffer = tr->array_buffer.buffer;
3047 struct ring_buffer_event *event;
3048 struct ftrace_entry *entry;
3050 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3054 entry = ring_buffer_event_data(event);
3056 entry->parent_ip = parent_ip;
3058 if (!call_filter_check_discard(call, entry, buffer, event)) {
3059 if (static_branch_unlikely(&trace_function_exports_enabled))
3060 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3061 __buffer_unlock_commit(buffer, event);
3065 #ifdef CONFIG_STACKTRACE
3067 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3068 #define FTRACE_KSTACK_NESTING 4
3070 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3072 struct ftrace_stack {
3073 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3077 struct ftrace_stacks {
3078 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3081 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3082 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3084 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3085 unsigned int trace_ctx,
3086 int skip, struct pt_regs *regs)
3088 struct trace_event_call *call = &event_kernel_stack;
3089 struct ring_buffer_event *event;
3090 unsigned int size, nr_entries;
3091 struct ftrace_stack *fstack;
3092 struct stack_entry *entry;
3096 * Add one, for this function and the call to save_stack_trace()
3097 * If regs is set, then these functions will not be in the way.
3099 #ifndef CONFIG_UNWINDER_ORC
3104 preempt_disable_notrace();
3106 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3108 /* This should never happen. If it does, yell once and skip */
3109 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3113 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3114 * interrupt will either see the value pre increment or post
3115 * increment. If the interrupt happens pre increment it will have
3116 * restored the counter when it returns. We just need a barrier to
3117 * keep gcc from moving things around.
3121 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3122 size = ARRAY_SIZE(fstack->calls);
3125 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3128 nr_entries = stack_trace_save(fstack->calls, size, skip);
3131 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3132 struct_size(entry, caller, nr_entries),
3136 entry = ring_buffer_event_data(event);
3138 entry->size = nr_entries;
3139 memcpy(&entry->caller, fstack->calls,
3140 flex_array_size(entry, caller, nr_entries));
3142 if (!call_filter_check_discard(call, entry, buffer, event))
3143 __buffer_unlock_commit(buffer, event);
3146 /* Again, don't let gcc optimize things here */
3148 __this_cpu_dec(ftrace_stack_reserve);
3149 preempt_enable_notrace();
3153 static inline void ftrace_trace_stack(struct trace_array *tr,
3154 struct trace_buffer *buffer,
3155 unsigned int trace_ctx,
3156 int skip, struct pt_regs *regs)
3158 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3161 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3164 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3167 struct trace_buffer *buffer = tr->array_buffer.buffer;
3169 if (rcu_is_watching()) {
3170 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3174 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY)))
3178 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3179 * but if the above rcu_is_watching() failed, then the NMI
3180 * triggered someplace critical, and ct_irq_enter() should
3181 * not be called from NMI.
3183 if (unlikely(in_nmi()))
3186 ct_irq_enter_irqson();
3187 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3188 ct_irq_exit_irqson();
3192 * trace_dump_stack - record a stack back trace in the trace buffer
3193 * @skip: Number of functions to skip (helper handlers)
3195 void trace_dump_stack(int skip)
3197 if (tracing_disabled || tracing_selftest_running)
3200 #ifndef CONFIG_UNWINDER_ORC
3201 /* Skip 1 to skip this function. */
3204 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3205 tracing_gen_ctx(), skip, NULL);
3207 EXPORT_SYMBOL_GPL(trace_dump_stack);
3209 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3210 static DEFINE_PER_CPU(int, user_stack_count);
3213 ftrace_trace_userstack(struct trace_array *tr,
3214 struct trace_buffer *buffer, unsigned int trace_ctx)
3216 struct trace_event_call *call = &event_user_stack;
3217 struct ring_buffer_event *event;
3218 struct userstack_entry *entry;
3220 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3224 * NMIs can not handle page faults, even with fix ups.
3225 * The save user stack can (and often does) fault.
3227 if (unlikely(in_nmi()))
3231 * prevent recursion, since the user stack tracing may
3232 * trigger other kernel events.
3235 if (__this_cpu_read(user_stack_count))
3238 __this_cpu_inc(user_stack_count);
3240 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3241 sizeof(*entry), trace_ctx);
3243 goto out_drop_count;
3244 entry = ring_buffer_event_data(event);
3246 entry->tgid = current->tgid;
3247 memset(&entry->caller, 0, sizeof(entry->caller));
3249 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3250 if (!call_filter_check_discard(call, entry, buffer, event))
3251 __buffer_unlock_commit(buffer, event);
3254 __this_cpu_dec(user_stack_count);
3258 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3259 static void ftrace_trace_userstack(struct trace_array *tr,
3260 struct trace_buffer *buffer,
3261 unsigned int trace_ctx)
3264 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3266 #endif /* CONFIG_STACKTRACE */
3269 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3270 unsigned long long delta)
3272 entry->bottom_delta_ts = delta & U32_MAX;
3273 entry->top_delta_ts = (delta >> 32);
3276 void trace_last_func_repeats(struct trace_array *tr,
3277 struct trace_func_repeats *last_info,
3278 unsigned int trace_ctx)
3280 struct trace_buffer *buffer = tr->array_buffer.buffer;
3281 struct func_repeats_entry *entry;
3282 struct ring_buffer_event *event;
3285 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3286 sizeof(*entry), trace_ctx);
3290 delta = ring_buffer_event_time_stamp(buffer, event) -
3291 last_info->ts_last_call;
3293 entry = ring_buffer_event_data(event);
3294 entry->ip = last_info->ip;
3295 entry->parent_ip = last_info->parent_ip;
3296 entry->count = last_info->count;
3297 func_repeats_set_delta_ts(entry, delta);
3299 __buffer_unlock_commit(buffer, event);
3302 /* created for use with alloc_percpu */
3303 struct trace_buffer_struct {
3305 char buffer[4][TRACE_BUF_SIZE];
3308 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3311 * This allows for lockless recording. If we're nested too deeply, then
3312 * this returns NULL.
3314 static char *get_trace_buf(void)
3316 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3318 if (!trace_percpu_buffer || buffer->nesting >= 4)
3323 /* Interrupts must see nesting incremented before we use the buffer */
3325 return &buffer->buffer[buffer->nesting - 1][0];
3328 static void put_trace_buf(void)
3330 /* Don't let the decrement of nesting leak before this */
3332 this_cpu_dec(trace_percpu_buffer->nesting);
3335 static int alloc_percpu_trace_buffer(void)
3337 struct trace_buffer_struct __percpu *buffers;
3339 if (trace_percpu_buffer)
3342 buffers = alloc_percpu(struct trace_buffer_struct);
3343 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3346 trace_percpu_buffer = buffers;
3350 static int buffers_allocated;
3352 void trace_printk_init_buffers(void)
3354 if (buffers_allocated)
3357 if (alloc_percpu_trace_buffer())
3360 /* trace_printk() is for debug use only. Don't use it in production. */
3363 pr_warn("**********************************************************\n");
3364 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3366 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3368 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3369 pr_warn("** unsafe for production use. **\n");
3371 pr_warn("** If you see this message and you are not debugging **\n");
3372 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3374 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3375 pr_warn("**********************************************************\n");
3377 /* Expand the buffers to set size */
3378 tracing_update_buffers(&global_trace);
3380 buffers_allocated = 1;
3383 * trace_printk_init_buffers() can be called by modules.
3384 * If that happens, then we need to start cmdline recording
3385 * directly here. If the global_trace.buffer is already
3386 * allocated here, then this was called by module code.
3388 if (global_trace.array_buffer.buffer)
3389 tracing_start_cmdline_record();
3391 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3393 void trace_printk_start_comm(void)
3395 /* Start tracing comms if trace printk is set */
3396 if (!buffers_allocated)
3398 tracing_start_cmdline_record();
3401 static void trace_printk_start_stop_comm(int enabled)
3403 if (!buffers_allocated)
3407 tracing_start_cmdline_record();
3409 tracing_stop_cmdline_record();
3413 * trace_vbprintk - write binary msg to tracing buffer
3414 * @ip: The address of the caller
3415 * @fmt: The string format to write to the buffer
3416 * @args: Arguments for @fmt
3418 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3420 struct trace_event_call *call = &event_bprint;
3421 struct ring_buffer_event *event;
3422 struct trace_buffer *buffer;
3423 struct trace_array *tr = &global_trace;
3424 struct bprint_entry *entry;
3425 unsigned int trace_ctx;
3429 if (unlikely(tracing_selftest_running || tracing_disabled))
3432 /* Don't pollute graph traces with trace_vprintk internals */
3433 pause_graph_tracing();
3435 trace_ctx = tracing_gen_ctx();
3436 preempt_disable_notrace();
3438 tbuffer = get_trace_buf();
3444 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3446 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3449 size = sizeof(*entry) + sizeof(u32) * len;
3450 buffer = tr->array_buffer.buffer;
3451 ring_buffer_nest_start(buffer);
3452 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3456 entry = ring_buffer_event_data(event);
3460 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3461 if (!call_filter_check_discard(call, entry, buffer, event)) {
3462 __buffer_unlock_commit(buffer, event);
3463 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3467 ring_buffer_nest_end(buffer);
3472 preempt_enable_notrace();
3473 unpause_graph_tracing();
3477 EXPORT_SYMBOL_GPL(trace_vbprintk);
3481 __trace_array_vprintk(struct trace_buffer *buffer,
3482 unsigned long ip, const char *fmt, va_list args)
3484 struct trace_event_call *call = &event_print;
3485 struct ring_buffer_event *event;
3487 struct print_entry *entry;
3488 unsigned int trace_ctx;
3491 if (tracing_disabled)
3494 /* Don't pollute graph traces with trace_vprintk internals */
3495 pause_graph_tracing();
3497 trace_ctx = tracing_gen_ctx();
3498 preempt_disable_notrace();
3501 tbuffer = get_trace_buf();
3507 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3509 size = sizeof(*entry) + len + 1;
3510 ring_buffer_nest_start(buffer);
3511 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3515 entry = ring_buffer_event_data(event);
3518 memcpy(&entry->buf, tbuffer, len + 1);
3519 if (!call_filter_check_discard(call, entry, buffer, event)) {
3520 __buffer_unlock_commit(buffer, event);
3521 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3525 ring_buffer_nest_end(buffer);
3529 preempt_enable_notrace();
3530 unpause_graph_tracing();
3536 int trace_array_vprintk(struct trace_array *tr,
3537 unsigned long ip, const char *fmt, va_list args)
3539 if (tracing_selftest_running && tr == &global_trace)
3542 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3546 * trace_array_printk - Print a message to a specific instance
3547 * @tr: The instance trace_array descriptor
3548 * @ip: The instruction pointer that this is called from.
3549 * @fmt: The format to print (printf format)
3551 * If a subsystem sets up its own instance, they have the right to
3552 * printk strings into their tracing instance buffer using this
3553 * function. Note, this function will not write into the top level
3554 * buffer (use trace_printk() for that), as writing into the top level
3555 * buffer should only have events that can be individually disabled.
3556 * trace_printk() is only used for debugging a kernel, and should not
3557 * be ever incorporated in normal use.
3559 * trace_array_printk() can be used, as it will not add noise to the
3560 * top level tracing buffer.
3562 * Note, trace_array_init_printk() must be called on @tr before this
3566 int trace_array_printk(struct trace_array *tr,
3567 unsigned long ip, const char *fmt, ...)
3575 /* This is only allowed for created instances */
3576 if (tr == &global_trace)
3579 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3583 ret = trace_array_vprintk(tr, ip, fmt, ap);
3587 EXPORT_SYMBOL_GPL(trace_array_printk);
3590 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3591 * @tr: The trace array to initialize the buffers for
3593 * As trace_array_printk() only writes into instances, they are OK to
3594 * have in the kernel (unlike trace_printk()). This needs to be called
3595 * before trace_array_printk() can be used on a trace_array.
3597 int trace_array_init_printk(struct trace_array *tr)
3602 /* This is only allowed for created instances */
3603 if (tr == &global_trace)
3606 return alloc_percpu_trace_buffer();
3608 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3611 int trace_array_printk_buf(struct trace_buffer *buffer,
3612 unsigned long ip, const char *fmt, ...)
3617 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3621 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3627 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3629 return trace_array_vprintk(&global_trace, ip, fmt, args);
3631 EXPORT_SYMBOL_GPL(trace_vprintk);
3633 static void trace_iterator_increment(struct trace_iterator *iter)
3635 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3639 ring_buffer_iter_advance(buf_iter);
3642 static struct trace_entry *
3643 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3644 unsigned long *lost_events)
3646 struct ring_buffer_event *event;
3647 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3650 event = ring_buffer_iter_peek(buf_iter, ts);
3652 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3653 (unsigned long)-1 : 0;
3655 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3660 iter->ent_size = ring_buffer_event_length(event);
3661 return ring_buffer_event_data(event);
3667 static struct trace_entry *
3668 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3669 unsigned long *missing_events, u64 *ent_ts)
3671 struct trace_buffer *buffer = iter->array_buffer->buffer;
3672 struct trace_entry *ent, *next = NULL;
3673 unsigned long lost_events = 0, next_lost = 0;
3674 int cpu_file = iter->cpu_file;
3675 u64 next_ts = 0, ts;
3681 * If we are in a per_cpu trace file, don't bother by iterating over
3682 * all cpu and peek directly.
3684 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3685 if (ring_buffer_empty_cpu(buffer, cpu_file))
3687 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3689 *ent_cpu = cpu_file;
3694 for_each_tracing_cpu(cpu) {
3696 if (ring_buffer_empty_cpu(buffer, cpu))
3699 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3702 * Pick the entry with the smallest timestamp:
3704 if (ent && (!next || ts < next_ts)) {
3708 next_lost = lost_events;
3709 next_size = iter->ent_size;
3713 iter->ent_size = next_size;
3716 *ent_cpu = next_cpu;
3722 *missing_events = next_lost;
3727 #define STATIC_FMT_BUF_SIZE 128
3728 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3730 char *trace_iter_expand_format(struct trace_iterator *iter)
3735 * iter->tr is NULL when used with tp_printk, which makes
3736 * this get called where it is not safe to call krealloc().
3738 if (!iter->tr || iter->fmt == static_fmt_buf)
3741 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3744 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3751 /* Returns true if the string is safe to dereference from an event */
3752 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3755 unsigned long addr = (unsigned long)str;
3756 struct trace_event *trace_event;
3757 struct trace_event_call *event;
3759 /* Ignore strings with no length */
3763 /* OK if part of the event data */
3764 if ((addr >= (unsigned long)iter->ent) &&
3765 (addr < (unsigned long)iter->ent + iter->ent_size))
3768 /* OK if part of the temp seq buffer */
3769 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3770 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3773 /* Core rodata can not be freed */
3774 if (is_kernel_rodata(addr))
3777 if (trace_is_tracepoint_string(str))
3781 * Now this could be a module event, referencing core module
3782 * data, which is OK.
3787 trace_event = ftrace_find_event(iter->ent->type);
3791 event = container_of(trace_event, struct trace_event_call, event);
3792 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3795 /* Would rather have rodata, but this will suffice */
3796 if (within_module_core(addr, event->module))
3802 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3804 static int test_can_verify_check(const char *fmt, ...)
3811 * The verifier is dependent on vsnprintf() modifies the va_list
3812 * passed to it, where it is sent as a reference. Some architectures
3813 * (like x86_32) passes it by value, which means that vsnprintf()
3814 * does not modify the va_list passed to it, and the verifier
3815 * would then need to be able to understand all the values that
3816 * vsnprintf can use. If it is passed by value, then the verifier
3820 vsnprintf(buf, 16, "%d", ap);
3821 ret = va_arg(ap, int);
3827 static void test_can_verify(void)
3829 if (!test_can_verify_check("%d %d", 0, 1)) {
3830 pr_info("trace event string verifier disabled\n");
3831 static_branch_inc(&trace_no_verify);
3836 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3837 * @iter: The iterator that holds the seq buffer and the event being printed
3838 * @fmt: The format used to print the event
3839 * @ap: The va_list holding the data to print from @fmt.
3841 * This writes the data into the @iter->seq buffer using the data from
3842 * @fmt and @ap. If the format has a %s, then the source of the string
3843 * is examined to make sure it is safe to print, otherwise it will
3844 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3847 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3850 const char *p = fmt;
3854 if (WARN_ON_ONCE(!fmt))
3857 if (static_branch_unlikely(&trace_no_verify))
3860 /* Don't bother checking when doing a ftrace_dump() */
3861 if (iter->fmt == static_fmt_buf)
3870 /* We only care about %s and variants */
3871 for (i = 0; p[i]; i++) {
3872 if (i + 1 >= iter->fmt_size) {
3874 * If we can't expand the copy buffer,
3877 if (!trace_iter_expand_format(iter))
3881 if (p[i] == '\\' && p[i+1]) {
3886 /* Need to test cases like %08.*s */
3887 for (j = 1; p[i+j]; j++) {
3888 if (isdigit(p[i+j]) ||
3891 if (p[i+j] == '*') {
3903 /* If no %s found then just print normally */
3907 /* Copy up to the %s, and print that */
3908 strncpy(iter->fmt, p, i);
3909 iter->fmt[i] = '\0';
3910 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3913 * If iter->seq is full, the above call no longer guarantees
3914 * that ap is in sync with fmt processing, and further calls
3915 * to va_arg() can return wrong positional arguments.
3917 * Ensure that ap is no longer used in this case.
3919 if (iter->seq.full) {
3925 len = va_arg(ap, int);
3927 /* The ap now points to the string data of the %s */
3928 str = va_arg(ap, const char *);
3931 * If you hit this warning, it is likely that the
3932 * trace event in question used %s on a string that
3933 * was saved at the time of the event, but may not be
3934 * around when the trace is read. Use __string(),
3935 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3936 * instead. See samples/trace_events/trace-events-sample.h
3939 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3940 "fmt: '%s' current_buffer: '%s'",
3941 fmt, seq_buf_str(&iter->seq.seq))) {
3944 /* Try to safely read the string */
3946 if (len + 1 > iter->fmt_size)
3947 len = iter->fmt_size - 1;
3950 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3954 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3958 trace_seq_printf(&iter->seq, "(0x%px)", str);
3960 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3962 str = "[UNSAFE-MEMORY]";
3963 strcpy(iter->fmt, "%s");
3965 strncpy(iter->fmt, p + i, j + 1);
3966 iter->fmt[j+1] = '\0';
3969 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3971 trace_seq_printf(&iter->seq, iter->fmt, str);
3977 trace_seq_vprintf(&iter->seq, p, ap);
3980 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3982 const char *p, *new_fmt;
3985 if (WARN_ON_ONCE(!fmt))
3988 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3992 new_fmt = q = iter->fmt;
3994 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3995 if (!trace_iter_expand_format(iter))
3998 q += iter->fmt - new_fmt;
3999 new_fmt = iter->fmt;
4004 /* Replace %p with %px */
4008 } else if (p[0] == 'p' && !isalnum(p[1])) {
4019 #define STATIC_TEMP_BUF_SIZE 128
4020 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
4022 /* Find the next real entry, without updating the iterator itself */
4023 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
4024 int *ent_cpu, u64 *ent_ts)
4026 /* __find_next_entry will reset ent_size */
4027 int ent_size = iter->ent_size;
4028 struct trace_entry *entry;
4031 * If called from ftrace_dump(), then the iter->temp buffer
4032 * will be the static_temp_buf and not created from kmalloc.
4033 * If the entry size is greater than the buffer, we can
4034 * not save it. Just return NULL in that case. This is only
4035 * used to add markers when two consecutive events' time
4036 * stamps have a large delta. See trace_print_lat_context()
4038 if (iter->temp == static_temp_buf &&
4039 STATIC_TEMP_BUF_SIZE < ent_size)
4043 * The __find_next_entry() may call peek_next_entry(), which may
4044 * call ring_buffer_peek() that may make the contents of iter->ent
4045 * undefined. Need to copy iter->ent now.
4047 if (iter->ent && iter->ent != iter->temp) {
4048 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4049 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4051 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4056 iter->temp_size = iter->ent_size;
4058 memcpy(iter->temp, iter->ent, iter->ent_size);
4059 iter->ent = iter->temp;
4061 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4062 /* Put back the original ent_size */
4063 iter->ent_size = ent_size;
4068 /* Find the next real entry, and increment the iterator to the next entry */
4069 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4071 iter->ent = __find_next_entry(iter, &iter->cpu,
4072 &iter->lost_events, &iter->ts);
4075 trace_iterator_increment(iter);
4077 return iter->ent ? iter : NULL;
4080 static void trace_consume(struct trace_iterator *iter)
4082 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4083 &iter->lost_events);
4086 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4088 struct trace_iterator *iter = m->private;
4092 WARN_ON_ONCE(iter->leftover);
4096 /* can't go backwards */
4101 ent = trace_find_next_entry_inc(iter);
4105 while (ent && iter->idx < i)
4106 ent = trace_find_next_entry_inc(iter);
4113 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4115 struct ring_buffer_iter *buf_iter;
4116 unsigned long entries = 0;
4119 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4121 buf_iter = trace_buffer_iter(iter, cpu);
4125 ring_buffer_iter_reset(buf_iter);
4128 * We could have the case with the max latency tracers
4129 * that a reset never took place on a cpu. This is evident
4130 * by the timestamp being before the start of the buffer.
4132 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4133 if (ts >= iter->array_buffer->time_start)
4136 ring_buffer_iter_advance(buf_iter);
4139 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4143 * The current tracer is copied to avoid a global locking
4146 static void *s_start(struct seq_file *m, loff_t *pos)
4148 struct trace_iterator *iter = m->private;
4149 struct trace_array *tr = iter->tr;
4150 int cpu_file = iter->cpu_file;
4155 mutex_lock(&trace_types_lock);
4156 if (unlikely(tr->current_trace != iter->trace)) {
4157 /* Close iter->trace before switching to the new current tracer */
4158 if (iter->trace->close)
4159 iter->trace->close(iter);
4160 iter->trace = tr->current_trace;
4161 /* Reopen the new current tracer */
4162 if (iter->trace->open)
4163 iter->trace->open(iter);
4165 mutex_unlock(&trace_types_lock);
4167 #ifdef CONFIG_TRACER_MAX_TRACE
4168 if (iter->snapshot && iter->trace->use_max_tr)
4169 return ERR_PTR(-EBUSY);
4172 if (*pos != iter->pos) {
4177 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4178 for_each_tracing_cpu(cpu)
4179 tracing_iter_reset(iter, cpu);
4181 tracing_iter_reset(iter, cpu_file);
4184 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4189 * If we overflowed the seq_file before, then we want
4190 * to just reuse the trace_seq buffer again.
4196 p = s_next(m, p, &l);
4200 trace_event_read_lock();
4201 trace_access_lock(cpu_file);
4205 static void s_stop(struct seq_file *m, void *p)
4207 struct trace_iterator *iter = m->private;
4209 #ifdef CONFIG_TRACER_MAX_TRACE
4210 if (iter->snapshot && iter->trace->use_max_tr)
4214 trace_access_unlock(iter->cpu_file);
4215 trace_event_read_unlock();
4219 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4220 unsigned long *entries, int cpu)
4222 unsigned long count;
4224 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4226 * If this buffer has skipped entries, then we hold all
4227 * entries for the trace and we need to ignore the
4228 * ones before the time stamp.
4230 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4231 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4232 /* total is the same as the entries */
4236 ring_buffer_overrun_cpu(buf->buffer, cpu);
4241 get_total_entries(struct array_buffer *buf,
4242 unsigned long *total, unsigned long *entries)
4250 for_each_tracing_cpu(cpu) {
4251 get_total_entries_cpu(buf, &t, &e, cpu);
4257 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4259 unsigned long total, entries;
4264 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4269 unsigned long trace_total_entries(struct trace_array *tr)
4271 unsigned long total, entries;
4276 get_total_entries(&tr->array_buffer, &total, &entries);
4281 static void print_lat_help_header(struct seq_file *m)
4283 seq_puts(m, "# _------=> CPU# \n"
4284 "# / _-----=> irqs-off/BH-disabled\n"
4285 "# | / _----=> need-resched \n"
4286 "# || / _---=> hardirq/softirq \n"
4287 "# ||| / _--=> preempt-depth \n"
4288 "# |||| / _-=> migrate-disable \n"
4289 "# ||||| / delay \n"
4290 "# cmd pid |||||| time | caller \n"
4291 "# \\ / |||||| \\ | / \n");
4294 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4296 unsigned long total;
4297 unsigned long entries;
4299 get_total_entries(buf, &total, &entries);
4300 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4301 entries, total, num_online_cpus());
4305 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4308 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4310 print_event_info(buf, m);
4312 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4313 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4316 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4319 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4320 static const char space[] = " ";
4321 int prec = tgid ? 12 : 2;
4323 print_event_info(buf, m);
4325 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4326 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4327 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4328 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4329 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4330 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4331 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4332 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4336 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4338 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4339 struct array_buffer *buf = iter->array_buffer;
4340 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4341 struct tracer *type = iter->trace;
4342 unsigned long entries;
4343 unsigned long total;
4344 const char *name = type->name;
4346 get_total_entries(buf, &total, &entries);
4348 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4350 seq_puts(m, "# -----------------------------------"
4351 "---------------------------------\n");
4352 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4353 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4354 nsecs_to_usecs(data->saved_latency),
4358 preempt_model_none() ? "server" :
4359 preempt_model_voluntary() ? "desktop" :
4360 preempt_model_full() ? "preempt" :
4361 preempt_model_rt() ? "preempt_rt" :
4363 /* These are reserved for later use */
4366 seq_printf(m, " #P:%d)\n", num_online_cpus());
4370 seq_puts(m, "# -----------------\n");
4371 seq_printf(m, "# | task: %.16s-%d "
4372 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4373 data->comm, data->pid,
4374 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4375 data->policy, data->rt_priority);
4376 seq_puts(m, "# -----------------\n");
4378 if (data->critical_start) {
4379 seq_puts(m, "# => started at: ");
4380 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4381 trace_print_seq(m, &iter->seq);
4382 seq_puts(m, "\n# => ended at: ");
4383 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4384 trace_print_seq(m, &iter->seq);
4385 seq_puts(m, "\n#\n");
4391 static void test_cpu_buff_start(struct trace_iterator *iter)
4393 struct trace_seq *s = &iter->seq;
4394 struct trace_array *tr = iter->tr;
4396 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4399 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4402 if (cpumask_available(iter->started) &&
4403 cpumask_test_cpu(iter->cpu, iter->started))
4406 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4409 if (cpumask_available(iter->started))
4410 cpumask_set_cpu(iter->cpu, iter->started);
4412 /* Don't print started cpu buffer for the first entry of the trace */
4414 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4418 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4420 struct trace_array *tr = iter->tr;
4421 struct trace_seq *s = &iter->seq;
4422 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4423 struct trace_entry *entry;
4424 struct trace_event *event;
4428 test_cpu_buff_start(iter);
4430 event = ftrace_find_event(entry->type);
4432 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4433 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4434 trace_print_lat_context(iter);
4436 trace_print_context(iter);
4439 if (trace_seq_has_overflowed(s))
4440 return TRACE_TYPE_PARTIAL_LINE;
4443 if (tr->trace_flags & TRACE_ITER_FIELDS)
4444 return print_event_fields(iter, event);
4445 return event->funcs->trace(iter, sym_flags, event);
4448 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4450 return trace_handle_return(s);
4453 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4455 struct trace_array *tr = iter->tr;
4456 struct trace_seq *s = &iter->seq;
4457 struct trace_entry *entry;
4458 struct trace_event *event;
4462 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4463 trace_seq_printf(s, "%d %d %llu ",
4464 entry->pid, iter->cpu, iter->ts);
4466 if (trace_seq_has_overflowed(s))
4467 return TRACE_TYPE_PARTIAL_LINE;
4469 event = ftrace_find_event(entry->type);
4471 return event->funcs->raw(iter, 0, event);
4473 trace_seq_printf(s, "%d ?\n", entry->type);
4475 return trace_handle_return(s);
4478 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4480 struct trace_array *tr = iter->tr;
4481 struct trace_seq *s = &iter->seq;
4482 unsigned char newline = '\n';
4483 struct trace_entry *entry;
4484 struct trace_event *event;
4488 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4489 SEQ_PUT_HEX_FIELD(s, entry->pid);
4490 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4491 SEQ_PUT_HEX_FIELD(s, iter->ts);
4492 if (trace_seq_has_overflowed(s))
4493 return TRACE_TYPE_PARTIAL_LINE;
4496 event = ftrace_find_event(entry->type);
4498 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4499 if (ret != TRACE_TYPE_HANDLED)
4503 SEQ_PUT_FIELD(s, newline);
4505 return trace_handle_return(s);
4508 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4510 struct trace_array *tr = iter->tr;
4511 struct trace_seq *s = &iter->seq;
4512 struct trace_entry *entry;
4513 struct trace_event *event;
4517 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4518 SEQ_PUT_FIELD(s, entry->pid);
4519 SEQ_PUT_FIELD(s, iter->cpu);
4520 SEQ_PUT_FIELD(s, iter->ts);
4521 if (trace_seq_has_overflowed(s))
4522 return TRACE_TYPE_PARTIAL_LINE;
4525 event = ftrace_find_event(entry->type);
4526 return event ? event->funcs->binary(iter, 0, event) :
4530 int trace_empty(struct trace_iterator *iter)
4532 struct ring_buffer_iter *buf_iter;
4535 /* If we are looking at one CPU buffer, only check that one */
4536 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4537 cpu = iter->cpu_file;
4538 buf_iter = trace_buffer_iter(iter, cpu);
4540 if (!ring_buffer_iter_empty(buf_iter))
4543 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4549 for_each_tracing_cpu(cpu) {
4550 buf_iter = trace_buffer_iter(iter, cpu);
4552 if (!ring_buffer_iter_empty(buf_iter))
4555 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4563 /* Called with trace_event_read_lock() held. */
4564 enum print_line_t print_trace_line(struct trace_iterator *iter)
4566 struct trace_array *tr = iter->tr;
4567 unsigned long trace_flags = tr->trace_flags;
4568 enum print_line_t ret;
4570 if (iter->lost_events) {
4571 if (iter->lost_events == (unsigned long)-1)
4572 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4575 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4576 iter->cpu, iter->lost_events);
4577 if (trace_seq_has_overflowed(&iter->seq))
4578 return TRACE_TYPE_PARTIAL_LINE;
4581 if (iter->trace && iter->trace->print_line) {
4582 ret = iter->trace->print_line(iter);
4583 if (ret != TRACE_TYPE_UNHANDLED)
4587 if (iter->ent->type == TRACE_BPUTS &&
4588 trace_flags & TRACE_ITER_PRINTK &&
4589 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4590 return trace_print_bputs_msg_only(iter);
4592 if (iter->ent->type == TRACE_BPRINT &&
4593 trace_flags & TRACE_ITER_PRINTK &&
4594 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4595 return trace_print_bprintk_msg_only(iter);
4597 if (iter->ent->type == TRACE_PRINT &&
4598 trace_flags & TRACE_ITER_PRINTK &&
4599 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4600 return trace_print_printk_msg_only(iter);
4602 if (trace_flags & TRACE_ITER_BIN)
4603 return print_bin_fmt(iter);
4605 if (trace_flags & TRACE_ITER_HEX)
4606 return print_hex_fmt(iter);
4608 if (trace_flags & TRACE_ITER_RAW)
4609 return print_raw_fmt(iter);
4611 return print_trace_fmt(iter);
4614 void trace_latency_header(struct seq_file *m)
4616 struct trace_iterator *iter = m->private;
4617 struct trace_array *tr = iter->tr;
4619 /* print nothing if the buffers are empty */
4620 if (trace_empty(iter))
4623 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4624 print_trace_header(m, iter);
4626 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4627 print_lat_help_header(m);
4630 void trace_default_header(struct seq_file *m)
4632 struct trace_iterator *iter = m->private;
4633 struct trace_array *tr = iter->tr;
4634 unsigned long trace_flags = tr->trace_flags;
4636 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4639 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4640 /* print nothing if the buffers are empty */
4641 if (trace_empty(iter))
4643 print_trace_header(m, iter);
4644 if (!(trace_flags & TRACE_ITER_VERBOSE))
4645 print_lat_help_header(m);
4647 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4648 if (trace_flags & TRACE_ITER_IRQ_INFO)
4649 print_func_help_header_irq(iter->array_buffer,
4652 print_func_help_header(iter->array_buffer, m,
4658 static void test_ftrace_alive(struct seq_file *m)
4660 if (!ftrace_is_dead())
4662 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4663 "# MAY BE MISSING FUNCTION EVENTS\n");
4666 #ifdef CONFIG_TRACER_MAX_TRACE
4667 static void show_snapshot_main_help(struct seq_file *m)
4669 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4670 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4671 "# Takes a snapshot of the main buffer.\n"
4672 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4673 "# (Doesn't have to be '2' works with any number that\n"
4674 "# is not a '0' or '1')\n");
4677 static void show_snapshot_percpu_help(struct seq_file *m)
4679 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4680 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4681 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4682 "# Takes a snapshot of the main buffer for this cpu.\n");
4684 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4685 "# Must use main snapshot file to allocate.\n");
4687 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4688 "# (Doesn't have to be '2' works with any number that\n"
4689 "# is not a '0' or '1')\n");
4692 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4694 if (iter->tr->allocated_snapshot)
4695 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4697 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4699 seq_puts(m, "# Snapshot commands:\n");
4700 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4701 show_snapshot_main_help(m);
4703 show_snapshot_percpu_help(m);
4706 /* Should never be called */
4707 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4710 static int s_show(struct seq_file *m, void *v)
4712 struct trace_iterator *iter = v;
4715 if (iter->ent == NULL) {
4717 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4719 test_ftrace_alive(m);
4721 if (iter->snapshot && trace_empty(iter))
4722 print_snapshot_help(m, iter);
4723 else if (iter->trace && iter->trace->print_header)
4724 iter->trace->print_header(m);
4726 trace_default_header(m);
4728 } else if (iter->leftover) {
4730 * If we filled the seq_file buffer earlier, we
4731 * want to just show it now.
4733 ret = trace_print_seq(m, &iter->seq);
4735 /* ret should this time be zero, but you never know */
4736 iter->leftover = ret;
4739 ret = print_trace_line(iter);
4740 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4742 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
4744 ret = trace_print_seq(m, &iter->seq);
4746 * If we overflow the seq_file buffer, then it will
4747 * ask us for this data again at start up.
4749 * ret is 0 if seq_file write succeeded.
4752 iter->leftover = ret;
4759 * Should be used after trace_array_get(), trace_types_lock
4760 * ensures that i_cdev was already initialized.
4762 static inline int tracing_get_cpu(struct inode *inode)
4764 if (inode->i_cdev) /* See trace_create_cpu_file() */
4765 return (long)inode->i_cdev - 1;
4766 return RING_BUFFER_ALL_CPUS;
4769 static const struct seq_operations tracer_seq_ops = {
4777 * Note, as iter itself can be allocated and freed in different
4778 * ways, this function is only used to free its content, and not
4779 * the iterator itself. The only requirement to all the allocations
4780 * is that it must zero all fields (kzalloc), as freeing works with
4781 * ethier allocated content or NULL.
4783 static void free_trace_iter_content(struct trace_iterator *iter)
4785 /* The fmt is either NULL, allocated or points to static_fmt_buf */
4786 if (iter->fmt != static_fmt_buf)
4790 kfree(iter->buffer_iter);
4791 mutex_destroy(&iter->mutex);
4792 free_cpumask_var(iter->started);
4795 static struct trace_iterator *
4796 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4798 struct trace_array *tr = inode->i_private;
4799 struct trace_iterator *iter;
4802 if (tracing_disabled)
4803 return ERR_PTR(-ENODEV);
4805 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4807 return ERR_PTR(-ENOMEM);
4809 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4811 if (!iter->buffer_iter)
4815 * trace_find_next_entry() may need to save off iter->ent.
4816 * It will place it into the iter->temp buffer. As most
4817 * events are less than 128, allocate a buffer of that size.
4818 * If one is greater, then trace_find_next_entry() will
4819 * allocate a new buffer to adjust for the bigger iter->ent.
4820 * It's not critical if it fails to get allocated here.
4822 iter->temp = kmalloc(128, GFP_KERNEL);
4824 iter->temp_size = 128;
4827 * trace_event_printf() may need to modify given format
4828 * string to replace %p with %px so that it shows real address
4829 * instead of hash value. However, that is only for the event
4830 * tracing, other tracer may not need. Defer the allocation
4831 * until it is needed.
4836 mutex_lock(&trace_types_lock);
4837 iter->trace = tr->current_trace;
4839 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4844 #ifdef CONFIG_TRACER_MAX_TRACE
4845 /* Currently only the top directory has a snapshot */
4846 if (tr->current_trace->print_max || snapshot)
4847 iter->array_buffer = &tr->max_buffer;
4850 iter->array_buffer = &tr->array_buffer;
4851 iter->snapshot = snapshot;
4853 iter->cpu_file = tracing_get_cpu(inode);
4854 mutex_init(&iter->mutex);
4856 /* Notify the tracer early; before we stop tracing. */
4857 if (iter->trace->open)
4858 iter->trace->open(iter);
4860 /* Annotate start of buffers if we had overruns */
4861 if (ring_buffer_overruns(iter->array_buffer->buffer))
4862 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4864 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4865 if (trace_clocks[tr->clock_id].in_ns)
4866 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4869 * If pause-on-trace is enabled, then stop the trace while
4870 * dumping, unless this is the "snapshot" file
4872 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4873 tracing_stop_tr(tr);
4875 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4876 for_each_tracing_cpu(cpu) {
4877 iter->buffer_iter[cpu] =
4878 ring_buffer_read_prepare(iter->array_buffer->buffer,
4881 ring_buffer_read_prepare_sync();
4882 for_each_tracing_cpu(cpu) {
4883 ring_buffer_read_start(iter->buffer_iter[cpu]);
4884 tracing_iter_reset(iter, cpu);
4887 cpu = iter->cpu_file;
4888 iter->buffer_iter[cpu] =
4889 ring_buffer_read_prepare(iter->array_buffer->buffer,
4891 ring_buffer_read_prepare_sync();
4892 ring_buffer_read_start(iter->buffer_iter[cpu]);
4893 tracing_iter_reset(iter, cpu);
4896 mutex_unlock(&trace_types_lock);
4901 mutex_unlock(&trace_types_lock);
4902 free_trace_iter_content(iter);
4904 seq_release_private(inode, file);
4905 return ERR_PTR(-ENOMEM);
4908 int tracing_open_generic(struct inode *inode, struct file *filp)
4912 ret = tracing_check_open_get_tr(NULL);
4916 filp->private_data = inode->i_private;
4920 bool tracing_is_disabled(void)
4922 return (tracing_disabled) ? true: false;
4926 * Open and update trace_array ref count.
4927 * Must have the current trace_array passed to it.
4929 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4931 struct trace_array *tr = inode->i_private;
4934 ret = tracing_check_open_get_tr(tr);
4938 filp->private_data = inode->i_private;
4944 * The private pointer of the inode is the trace_event_file.
4945 * Update the tr ref count associated to it.
4947 int tracing_open_file_tr(struct inode *inode, struct file *filp)
4949 struct trace_event_file *file = inode->i_private;
4952 ret = tracing_check_open_get_tr(file->tr);
4956 mutex_lock(&event_mutex);
4958 /* Fail if the file is marked for removal */
4959 if (file->flags & EVENT_FILE_FL_FREED) {
4960 trace_array_put(file->tr);
4963 event_file_get(file);
4966 mutex_unlock(&event_mutex);
4970 filp->private_data = inode->i_private;
4975 int tracing_release_file_tr(struct inode *inode, struct file *filp)
4977 struct trace_event_file *file = inode->i_private;
4979 trace_array_put(file->tr);
4980 event_file_put(file);
4985 int tracing_single_release_file_tr(struct inode *inode, struct file *filp)
4987 tracing_release_file_tr(inode, filp);
4988 return single_release(inode, filp);
4991 static int tracing_mark_open(struct inode *inode, struct file *filp)
4993 stream_open(inode, filp);
4994 return tracing_open_generic_tr(inode, filp);
4997 static int tracing_release(struct inode *inode, struct file *file)
4999 struct trace_array *tr = inode->i_private;
5000 struct seq_file *m = file->private_data;
5001 struct trace_iterator *iter;
5004 if (!(file->f_mode & FMODE_READ)) {
5005 trace_array_put(tr);
5009 /* Writes do not use seq_file */
5011 mutex_lock(&trace_types_lock);
5013 for_each_tracing_cpu(cpu) {
5014 if (iter->buffer_iter[cpu])
5015 ring_buffer_read_finish(iter->buffer_iter[cpu]);
5018 if (iter->trace && iter->trace->close)
5019 iter->trace->close(iter);
5021 if (!iter->snapshot && tr->stop_count)
5022 /* reenable tracing if it was previously enabled */
5023 tracing_start_tr(tr);
5025 __trace_array_put(tr);
5027 mutex_unlock(&trace_types_lock);
5029 free_trace_iter_content(iter);
5030 seq_release_private(inode, file);
5035 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
5037 struct trace_array *tr = inode->i_private;
5039 trace_array_put(tr);
5043 static int tracing_single_release_tr(struct inode *inode, struct file *file)
5045 struct trace_array *tr = inode->i_private;
5047 trace_array_put(tr);
5049 return single_release(inode, file);
5052 static int tracing_open(struct inode *inode, struct file *file)
5054 struct trace_array *tr = inode->i_private;
5055 struct trace_iterator *iter;
5058 ret = tracing_check_open_get_tr(tr);
5062 /* If this file was open for write, then erase contents */
5063 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
5064 int cpu = tracing_get_cpu(inode);
5065 struct array_buffer *trace_buf = &tr->array_buffer;
5067 #ifdef CONFIG_TRACER_MAX_TRACE
5068 if (tr->current_trace->print_max)
5069 trace_buf = &tr->max_buffer;
5072 if (cpu == RING_BUFFER_ALL_CPUS)
5073 tracing_reset_online_cpus(trace_buf);
5075 tracing_reset_cpu(trace_buf, cpu);
5078 if (file->f_mode & FMODE_READ) {
5079 iter = __tracing_open(inode, file, false);
5081 ret = PTR_ERR(iter);
5082 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5083 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5087 trace_array_put(tr);
5093 * Some tracers are not suitable for instance buffers.
5094 * A tracer is always available for the global array (toplevel)
5095 * or if it explicitly states that it is.
5098 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5100 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5103 /* Find the next tracer that this trace array may use */
5104 static struct tracer *
5105 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5107 while (t && !trace_ok_for_array(t, tr))
5114 t_next(struct seq_file *m, void *v, loff_t *pos)
5116 struct trace_array *tr = m->private;
5117 struct tracer *t = v;
5122 t = get_tracer_for_array(tr, t->next);
5127 static void *t_start(struct seq_file *m, loff_t *pos)
5129 struct trace_array *tr = m->private;
5133 mutex_lock(&trace_types_lock);
5135 t = get_tracer_for_array(tr, trace_types);
5136 for (; t && l < *pos; t = t_next(m, t, &l))
5142 static void t_stop(struct seq_file *m, void *p)
5144 mutex_unlock(&trace_types_lock);
5147 static int t_show(struct seq_file *m, void *v)
5149 struct tracer *t = v;
5154 seq_puts(m, t->name);
5163 static const struct seq_operations show_traces_seq_ops = {
5170 static int show_traces_open(struct inode *inode, struct file *file)
5172 struct trace_array *tr = inode->i_private;
5176 ret = tracing_check_open_get_tr(tr);
5180 ret = seq_open(file, &show_traces_seq_ops);
5182 trace_array_put(tr);
5186 m = file->private_data;
5192 static int show_traces_release(struct inode *inode, struct file *file)
5194 struct trace_array *tr = inode->i_private;
5196 trace_array_put(tr);
5197 return seq_release(inode, file);
5201 tracing_write_stub(struct file *filp, const char __user *ubuf,
5202 size_t count, loff_t *ppos)
5207 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5211 if (file->f_mode & FMODE_READ)
5212 ret = seq_lseek(file, offset, whence);
5214 file->f_pos = ret = 0;
5219 static const struct file_operations tracing_fops = {
5220 .open = tracing_open,
5222 .read_iter = seq_read_iter,
5223 .splice_read = copy_splice_read,
5224 .write = tracing_write_stub,
5225 .llseek = tracing_lseek,
5226 .release = tracing_release,
5229 static const struct file_operations show_traces_fops = {
5230 .open = show_traces_open,
5232 .llseek = seq_lseek,
5233 .release = show_traces_release,
5237 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5238 size_t count, loff_t *ppos)
5240 struct trace_array *tr = file_inode(filp)->i_private;
5244 len = snprintf(NULL, 0, "%*pb\n",
5245 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5246 mask_str = kmalloc(len, GFP_KERNEL);
5250 len = snprintf(mask_str, len, "%*pb\n",
5251 cpumask_pr_args(tr->tracing_cpumask));
5256 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5264 int tracing_set_cpumask(struct trace_array *tr,
5265 cpumask_var_t tracing_cpumask_new)
5272 local_irq_disable();
5273 arch_spin_lock(&tr->max_lock);
5274 for_each_tracing_cpu(cpu) {
5276 * Increase/decrease the disabled counter if we are
5277 * about to flip a bit in the cpumask:
5279 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5280 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5281 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5282 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5283 #ifdef CONFIG_TRACER_MAX_TRACE
5284 ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu);
5287 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5288 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5289 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5290 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5291 #ifdef CONFIG_TRACER_MAX_TRACE
5292 ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu);
5296 arch_spin_unlock(&tr->max_lock);
5299 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5305 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5306 size_t count, loff_t *ppos)
5308 struct trace_array *tr = file_inode(filp)->i_private;
5309 cpumask_var_t tracing_cpumask_new;
5312 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5315 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5319 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5323 free_cpumask_var(tracing_cpumask_new);
5328 free_cpumask_var(tracing_cpumask_new);
5333 static const struct file_operations tracing_cpumask_fops = {
5334 .open = tracing_open_generic_tr,
5335 .read = tracing_cpumask_read,
5336 .write = tracing_cpumask_write,
5337 .release = tracing_release_generic_tr,
5338 .llseek = generic_file_llseek,
5341 static int tracing_trace_options_show(struct seq_file *m, void *v)
5343 struct tracer_opt *trace_opts;
5344 struct trace_array *tr = m->private;
5348 mutex_lock(&trace_types_lock);
5349 tracer_flags = tr->current_trace->flags->val;
5350 trace_opts = tr->current_trace->flags->opts;
5352 for (i = 0; trace_options[i]; i++) {
5353 if (tr->trace_flags & (1 << i))
5354 seq_printf(m, "%s\n", trace_options[i]);
5356 seq_printf(m, "no%s\n", trace_options[i]);
5359 for (i = 0; trace_opts[i].name; i++) {
5360 if (tracer_flags & trace_opts[i].bit)
5361 seq_printf(m, "%s\n", trace_opts[i].name);
5363 seq_printf(m, "no%s\n", trace_opts[i].name);
5365 mutex_unlock(&trace_types_lock);
5370 static int __set_tracer_option(struct trace_array *tr,
5371 struct tracer_flags *tracer_flags,
5372 struct tracer_opt *opts, int neg)
5374 struct tracer *trace = tracer_flags->trace;
5377 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5382 tracer_flags->val &= ~opts->bit;
5384 tracer_flags->val |= opts->bit;
5388 /* Try to assign a tracer specific option */
5389 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5391 struct tracer *trace = tr->current_trace;
5392 struct tracer_flags *tracer_flags = trace->flags;
5393 struct tracer_opt *opts = NULL;
5396 for (i = 0; tracer_flags->opts[i].name; i++) {
5397 opts = &tracer_flags->opts[i];
5399 if (strcmp(cmp, opts->name) == 0)
5400 return __set_tracer_option(tr, trace->flags, opts, neg);
5406 /* Some tracers require overwrite to stay enabled */
5407 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5409 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5415 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5419 if ((mask == TRACE_ITER_RECORD_TGID) ||
5420 (mask == TRACE_ITER_RECORD_CMD))
5421 lockdep_assert_held(&event_mutex);
5423 /* do nothing if flag is already set */
5424 if (!!(tr->trace_flags & mask) == !!enabled)
5427 /* Give the tracer a chance to approve the change */
5428 if (tr->current_trace->flag_changed)
5429 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5433 tr->trace_flags |= mask;
5435 tr->trace_flags &= ~mask;
5437 if (mask == TRACE_ITER_RECORD_CMD)
5438 trace_event_enable_cmd_record(enabled);
5440 if (mask == TRACE_ITER_RECORD_TGID) {
5442 tgid_map_max = pid_max;
5443 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5447 * Pairs with smp_load_acquire() in
5448 * trace_find_tgid_ptr() to ensure that if it observes
5449 * the tgid_map we just allocated then it also observes
5450 * the corresponding tgid_map_max value.
5452 smp_store_release(&tgid_map, map);
5455 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5459 trace_event_enable_tgid_record(enabled);
5462 if (mask == TRACE_ITER_EVENT_FORK)
5463 trace_event_follow_fork(tr, enabled);
5465 if (mask == TRACE_ITER_FUNC_FORK)
5466 ftrace_pid_follow_fork(tr, enabled);
5468 if (mask == TRACE_ITER_OVERWRITE) {
5469 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5470 #ifdef CONFIG_TRACER_MAX_TRACE
5471 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5475 if (mask == TRACE_ITER_PRINTK) {
5476 trace_printk_start_stop_comm(enabled);
5477 trace_printk_control(enabled);
5483 int trace_set_options(struct trace_array *tr, char *option)
5488 size_t orig_len = strlen(option);
5491 cmp = strstrip(option);
5493 len = str_has_prefix(cmp, "no");
5499 mutex_lock(&event_mutex);
5500 mutex_lock(&trace_types_lock);
5502 ret = match_string(trace_options, -1, cmp);
5503 /* If no option could be set, test the specific tracer options */
5505 ret = set_tracer_option(tr, cmp, neg);
5507 ret = set_tracer_flag(tr, 1 << ret, !neg);
5509 mutex_unlock(&trace_types_lock);
5510 mutex_unlock(&event_mutex);
5513 * If the first trailing whitespace is replaced with '\0' by strstrip,
5514 * turn it back into a space.
5516 if (orig_len > strlen(option))
5517 option[strlen(option)] = ' ';
5522 static void __init apply_trace_boot_options(void)
5524 char *buf = trace_boot_options_buf;
5528 option = strsep(&buf, ",");
5534 trace_set_options(&global_trace, option);
5536 /* Put back the comma to allow this to be called again */
5543 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5544 size_t cnt, loff_t *ppos)
5546 struct seq_file *m = filp->private_data;
5547 struct trace_array *tr = m->private;
5551 if (cnt >= sizeof(buf))
5554 if (copy_from_user(buf, ubuf, cnt))
5559 ret = trace_set_options(tr, buf);
5568 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5570 struct trace_array *tr = inode->i_private;
5573 ret = tracing_check_open_get_tr(tr);
5577 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5579 trace_array_put(tr);
5584 static const struct file_operations tracing_iter_fops = {
5585 .open = tracing_trace_options_open,
5587 .llseek = seq_lseek,
5588 .release = tracing_single_release_tr,
5589 .write = tracing_trace_options_write,
5592 static const char readme_msg[] =
5593 "tracing mini-HOWTO:\n\n"
5594 "# echo 0 > tracing_on : quick way to disable tracing\n"
5595 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5596 " Important files:\n"
5597 " trace\t\t\t- The static contents of the buffer\n"
5598 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5599 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5600 " current_tracer\t- function and latency tracers\n"
5601 " available_tracers\t- list of configured tracers for current_tracer\n"
5602 " error_log\t- error log for failed commands (that support it)\n"
5603 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5604 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5605 " trace_clock\t\t- change the clock used to order events\n"
5606 " local: Per cpu clock but may not be synced across CPUs\n"
5607 " global: Synced across CPUs but slows tracing down.\n"
5608 " counter: Not a clock, but just an increment\n"
5609 " uptime: Jiffy counter from time of boot\n"
5610 " perf: Same clock that perf events use\n"
5611 #ifdef CONFIG_X86_64
5612 " x86-tsc: TSC cycle counter\n"
5614 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5615 " delta: Delta difference against a buffer-wide timestamp\n"
5616 " absolute: Absolute (standalone) timestamp\n"
5617 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5618 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5619 " tracing_cpumask\t- Limit which CPUs to trace\n"
5620 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5621 "\t\t\t Remove sub-buffer with rmdir\n"
5622 " trace_options\t\t- Set format or modify how tracing happens\n"
5623 "\t\t\t Disable an option by prefixing 'no' to the\n"
5624 "\t\t\t option name\n"
5625 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5626 #ifdef CONFIG_DYNAMIC_FTRACE
5627 "\n available_filter_functions - list of functions that can be filtered on\n"
5628 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5629 "\t\t\t functions\n"
5630 "\t accepts: func_full_name or glob-matching-pattern\n"
5631 "\t modules: Can select a group via module\n"
5632 "\t Format: :mod:<module-name>\n"
5633 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5634 "\t triggers: a command to perform when function is hit\n"
5635 "\t Format: <function>:<trigger>[:count]\n"
5636 "\t trigger: traceon, traceoff\n"
5637 "\t\t enable_event:<system>:<event>\n"
5638 "\t\t disable_event:<system>:<event>\n"
5639 #ifdef CONFIG_STACKTRACE
5642 #ifdef CONFIG_TRACER_SNAPSHOT
5647 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5648 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5649 "\t The first one will disable tracing every time do_fault is hit\n"
5650 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5651 "\t The first time do trap is hit and it disables tracing, the\n"
5652 "\t counter will decrement to 2. If tracing is already disabled,\n"
5653 "\t the counter will not decrement. It only decrements when the\n"
5654 "\t trigger did work\n"
5655 "\t To remove trigger without count:\n"
5656 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5657 "\t To remove trigger with a count:\n"
5658 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5659 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5660 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5661 "\t modules: Can select a group via module command :mod:\n"
5662 "\t Does not accept triggers\n"
5663 #endif /* CONFIG_DYNAMIC_FTRACE */
5664 #ifdef CONFIG_FUNCTION_TRACER
5665 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5667 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5670 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5671 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5672 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5673 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5675 #ifdef CONFIG_TRACER_SNAPSHOT
5676 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5677 "\t\t\t snapshot buffer. Read the contents for more\n"
5678 "\t\t\t information\n"
5680 #ifdef CONFIG_STACK_TRACER
5681 " stack_trace\t\t- Shows the max stack trace when active\n"
5682 " stack_max_size\t- Shows current max stack size that was traced\n"
5683 "\t\t\t Write into this file to reset the max size (trigger a\n"
5684 "\t\t\t new trace)\n"
5685 #ifdef CONFIG_DYNAMIC_FTRACE
5686 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5689 #endif /* CONFIG_STACK_TRACER */
5690 #ifdef CONFIG_DYNAMIC_EVENTS
5691 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5692 "\t\t\t Write into this file to define/undefine new trace events.\n"
5694 #ifdef CONFIG_KPROBE_EVENTS
5695 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5696 "\t\t\t Write into this file to define/undefine new trace events.\n"
5698 #ifdef CONFIG_UPROBE_EVENTS
5699 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5700 "\t\t\t Write into this file to define/undefine new trace events.\n"
5702 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \
5703 defined(CONFIG_FPROBE_EVENTS)
5704 "\t accepts: event-definitions (one definition per line)\n"
5705 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5706 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5707 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5709 #ifdef CONFIG_FPROBE_EVENTS
5710 "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n"
5711 "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n"
5713 #ifdef CONFIG_HIST_TRIGGERS
5714 "\t s:[synthetic/]<event> <field> [<field>]\n"
5716 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n"
5717 "\t -:[<group>/][<event>]\n"
5718 #ifdef CONFIG_KPROBE_EVENTS
5719 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5720 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5722 #ifdef CONFIG_UPROBE_EVENTS
5723 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5725 "\t args: <name>=fetcharg[:type]\n"
5726 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5727 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5728 #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
5729 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5730 "\t <argname>[->field[->field|.field...]],\n"
5732 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5735 "\t $stack<index>, $stack, $retval, $comm,\n"
5737 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5738 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
5739 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5740 "\t symstr, <type>\\[<array-size>\\]\n"
5741 #ifdef CONFIG_HIST_TRIGGERS
5742 "\t field: <stype> <name>;\n"
5743 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5744 "\t [unsigned] char/int/long\n"
5746 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5747 "\t of the <attached-group>/<attached-event>.\n"
5749 " events/\t\t- Directory containing all trace event subsystems:\n"
5750 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5751 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5752 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5754 " filter\t\t- If set, only events passing filter are traced\n"
5755 " events/<system>/<event>/\t- Directory containing control files for\n"
5757 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5758 " filter\t\t- If set, only events passing filter are traced\n"
5759 " trigger\t\t- If set, a command to perform when event is hit\n"
5760 "\t Format: <trigger>[:count][if <filter>]\n"
5761 "\t trigger: traceon, traceoff\n"
5762 "\t enable_event:<system>:<event>\n"
5763 "\t disable_event:<system>:<event>\n"
5764 #ifdef CONFIG_HIST_TRIGGERS
5765 "\t enable_hist:<system>:<event>\n"
5766 "\t disable_hist:<system>:<event>\n"
5768 #ifdef CONFIG_STACKTRACE
5771 #ifdef CONFIG_TRACER_SNAPSHOT
5774 #ifdef CONFIG_HIST_TRIGGERS
5775 "\t\t hist (see below)\n"
5777 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5778 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5779 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5780 "\t events/block/block_unplug/trigger\n"
5781 "\t The first disables tracing every time block_unplug is hit.\n"
5782 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5783 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5784 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5785 "\t Like function triggers, the counter is only decremented if it\n"
5786 "\t enabled or disabled tracing.\n"
5787 "\t To remove a trigger without a count:\n"
5788 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5789 "\t To remove a trigger with a count:\n"
5790 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5791 "\t Filters can be ignored when removing a trigger.\n"
5792 #ifdef CONFIG_HIST_TRIGGERS
5793 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5794 "\t Format: hist:keys=<field1[,field2,...]>\n"
5795 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5796 "\t [:values=<field1[,field2,...]>]\n"
5797 "\t [:sort=<field1[,field2,...]>]\n"
5798 "\t [:size=#entries]\n"
5799 "\t [:pause][:continue][:clear]\n"
5800 "\t [:name=histname1]\n"
5801 "\t [:nohitcount]\n"
5802 "\t [:<handler>.<action>]\n"
5803 "\t [if <filter>]\n\n"
5804 "\t Note, special fields can be used as well:\n"
5805 "\t common_timestamp - to record current timestamp\n"
5806 "\t common_cpu - to record the CPU the event happened on\n"
5808 "\t A hist trigger variable can be:\n"
5809 "\t - a reference to a field e.g. x=current_timestamp,\n"
5810 "\t - a reference to another variable e.g. y=$x,\n"
5811 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5812 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5814 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5815 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5816 "\t variable reference, field or numeric literal.\n"
5818 "\t When a matching event is hit, an entry is added to a hash\n"
5819 "\t table using the key(s) and value(s) named, and the value of a\n"
5820 "\t sum called 'hitcount' is incremented. Keys and values\n"
5821 "\t correspond to fields in the event's format description. Keys\n"
5822 "\t can be any field, or the special string 'common_stacktrace'.\n"
5823 "\t Compound keys consisting of up to two fields can be specified\n"
5824 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5825 "\t fields. Sort keys consisting of up to two fields can be\n"
5826 "\t specified using the 'sort' keyword. The sort direction can\n"
5827 "\t be modified by appending '.descending' or '.ascending' to a\n"
5828 "\t sort field. The 'size' parameter can be used to specify more\n"
5829 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5830 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5831 "\t its histogram data will be shared with other triggers of the\n"
5832 "\t same name, and trigger hits will update this common data.\n\n"
5833 "\t Reading the 'hist' file for the event will dump the hash\n"
5834 "\t table in its entirety to stdout. If there are multiple hist\n"
5835 "\t triggers attached to an event, there will be a table for each\n"
5836 "\t trigger in the output. The table displayed for a named\n"
5837 "\t trigger will be the same as any other instance having the\n"
5838 "\t same name. The default format used to display a given field\n"
5839 "\t can be modified by appending any of the following modifiers\n"
5840 "\t to the field name, as applicable:\n\n"
5841 "\t .hex display a number as a hex value\n"
5842 "\t .sym display an address as a symbol\n"
5843 "\t .sym-offset display an address as a symbol and offset\n"
5844 "\t .execname display a common_pid as a program name\n"
5845 "\t .syscall display a syscall id as a syscall name\n"
5846 "\t .log2 display log2 value rather than raw number\n"
5847 "\t .buckets=size display values in groups of size rather than raw number\n"
5848 "\t .usecs display a common_timestamp in microseconds\n"
5849 "\t .percent display a number of percentage value\n"
5850 "\t .graph display a bar-graph of a value\n\n"
5851 "\t The 'pause' parameter can be used to pause an existing hist\n"
5852 "\t trigger or to start a hist trigger but not log any events\n"
5853 "\t until told to do so. 'continue' can be used to start or\n"
5854 "\t restart a paused hist trigger.\n\n"
5855 "\t The 'clear' parameter will clear the contents of a running\n"
5856 "\t hist trigger and leave its current paused/active state\n"
5858 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5859 "\t raw hitcount in the histogram.\n\n"
5860 "\t The enable_hist and disable_hist triggers can be used to\n"
5861 "\t have one event conditionally start and stop another event's\n"
5862 "\t already-attached hist trigger. The syntax is analogous to\n"
5863 "\t the enable_event and disable_event triggers.\n\n"
5864 "\t Hist trigger handlers and actions are executed whenever a\n"
5865 "\t a histogram entry is added or updated. They take the form:\n\n"
5866 "\t <handler>.<action>\n\n"
5867 "\t The available handlers are:\n\n"
5868 "\t onmatch(matching.event) - invoke on addition or update\n"
5869 "\t onmax(var) - invoke if var exceeds current max\n"
5870 "\t onchange(var) - invoke action if var changes\n\n"
5871 "\t The available actions are:\n\n"
5872 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5873 "\t save(field,...) - save current event fields\n"
5874 #ifdef CONFIG_TRACER_SNAPSHOT
5875 "\t snapshot() - snapshot the trace buffer\n\n"
5877 #ifdef CONFIG_SYNTH_EVENTS
5878 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5879 "\t Write into this file to define/undefine new synthetic events.\n"
5880 "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n"
5886 tracing_readme_read(struct file *filp, char __user *ubuf,
5887 size_t cnt, loff_t *ppos)
5889 return simple_read_from_buffer(ubuf, cnt, ppos,
5890 readme_msg, strlen(readme_msg));
5893 static const struct file_operations tracing_readme_fops = {
5894 .open = tracing_open_generic,
5895 .read = tracing_readme_read,
5896 .llseek = generic_file_llseek,
5899 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5903 return trace_find_tgid_ptr(pid);
5906 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5910 return trace_find_tgid_ptr(pid);
5913 static void saved_tgids_stop(struct seq_file *m, void *v)
5917 static int saved_tgids_show(struct seq_file *m, void *v)
5919 int *entry = (int *)v;
5920 int pid = entry - tgid_map;
5926 seq_printf(m, "%d %d\n", pid, tgid);
5930 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5931 .start = saved_tgids_start,
5932 .stop = saved_tgids_stop,
5933 .next = saved_tgids_next,
5934 .show = saved_tgids_show,
5937 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5941 ret = tracing_check_open_get_tr(NULL);
5945 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5949 static const struct file_operations tracing_saved_tgids_fops = {
5950 .open = tracing_saved_tgids_open,
5952 .llseek = seq_lseek,
5953 .release = seq_release,
5956 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5958 unsigned int *ptr = v;
5960 if (*pos || m->count)
5965 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5967 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5976 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5982 arch_spin_lock(&trace_cmdline_lock);
5984 v = &savedcmd->map_cmdline_to_pid[0];
5986 v = saved_cmdlines_next(m, v, &l);
5994 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5996 arch_spin_unlock(&trace_cmdline_lock);
6000 static int saved_cmdlines_show(struct seq_file *m, void *v)
6002 char buf[TASK_COMM_LEN];
6003 unsigned int *pid = v;
6005 __trace_find_cmdline(*pid, buf);
6006 seq_printf(m, "%d %s\n", *pid, buf);
6010 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
6011 .start = saved_cmdlines_start,
6012 .next = saved_cmdlines_next,
6013 .stop = saved_cmdlines_stop,
6014 .show = saved_cmdlines_show,
6017 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
6021 ret = tracing_check_open_get_tr(NULL);
6025 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
6028 static const struct file_operations tracing_saved_cmdlines_fops = {
6029 .open = tracing_saved_cmdlines_open,
6031 .llseek = seq_lseek,
6032 .release = seq_release,
6036 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
6037 size_t cnt, loff_t *ppos)
6043 arch_spin_lock(&trace_cmdline_lock);
6044 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
6045 arch_spin_unlock(&trace_cmdline_lock);
6048 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6051 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
6053 kfree(s->saved_cmdlines);
6054 kfree(s->map_cmdline_to_pid);
6058 static int tracing_resize_saved_cmdlines(unsigned int val)
6060 struct saved_cmdlines_buffer *s, *savedcmd_temp;
6062 s = kmalloc(sizeof(*s), GFP_KERNEL);
6066 if (allocate_cmdlines_buffer(val, s) < 0) {
6072 arch_spin_lock(&trace_cmdline_lock);
6073 savedcmd_temp = savedcmd;
6075 arch_spin_unlock(&trace_cmdline_lock);
6077 free_saved_cmdlines_buffer(savedcmd_temp);
6083 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
6084 size_t cnt, loff_t *ppos)
6089 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6093 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
6094 if (!val || val > PID_MAX_DEFAULT)
6097 ret = tracing_resize_saved_cmdlines((unsigned int)val);
6106 static const struct file_operations tracing_saved_cmdlines_size_fops = {
6107 .open = tracing_open_generic,
6108 .read = tracing_saved_cmdlines_size_read,
6109 .write = tracing_saved_cmdlines_size_write,
6112 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
6113 static union trace_eval_map_item *
6114 update_eval_map(union trace_eval_map_item *ptr)
6116 if (!ptr->map.eval_string) {
6117 if (ptr->tail.next) {
6118 ptr = ptr->tail.next;
6119 /* Set ptr to the next real item (skip head) */
6127 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6129 union trace_eval_map_item *ptr = v;
6132 * Paranoid! If ptr points to end, we don't want to increment past it.
6133 * This really should never happen.
6136 ptr = update_eval_map(ptr);
6137 if (WARN_ON_ONCE(!ptr))
6141 ptr = update_eval_map(ptr);
6146 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6148 union trace_eval_map_item *v;
6151 mutex_lock(&trace_eval_mutex);
6153 v = trace_eval_maps;
6157 while (v && l < *pos) {
6158 v = eval_map_next(m, v, &l);
6164 static void eval_map_stop(struct seq_file *m, void *v)
6166 mutex_unlock(&trace_eval_mutex);
6169 static int eval_map_show(struct seq_file *m, void *v)
6171 union trace_eval_map_item *ptr = v;
6173 seq_printf(m, "%s %ld (%s)\n",
6174 ptr->map.eval_string, ptr->map.eval_value,
6180 static const struct seq_operations tracing_eval_map_seq_ops = {
6181 .start = eval_map_start,
6182 .next = eval_map_next,
6183 .stop = eval_map_stop,
6184 .show = eval_map_show,
6187 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6191 ret = tracing_check_open_get_tr(NULL);
6195 return seq_open(filp, &tracing_eval_map_seq_ops);
6198 static const struct file_operations tracing_eval_map_fops = {
6199 .open = tracing_eval_map_open,
6201 .llseek = seq_lseek,
6202 .release = seq_release,
6205 static inline union trace_eval_map_item *
6206 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6208 /* Return tail of array given the head */
6209 return ptr + ptr->head.length + 1;
6213 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6216 struct trace_eval_map **stop;
6217 struct trace_eval_map **map;
6218 union trace_eval_map_item *map_array;
6219 union trace_eval_map_item *ptr;
6224 * The trace_eval_maps contains the map plus a head and tail item,
6225 * where the head holds the module and length of array, and the
6226 * tail holds a pointer to the next list.
6228 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6230 pr_warn("Unable to allocate trace eval mapping\n");
6234 mutex_lock(&trace_eval_mutex);
6236 if (!trace_eval_maps)
6237 trace_eval_maps = map_array;
6239 ptr = trace_eval_maps;
6241 ptr = trace_eval_jmp_to_tail(ptr);
6242 if (!ptr->tail.next)
6244 ptr = ptr->tail.next;
6247 ptr->tail.next = map_array;
6249 map_array->head.mod = mod;
6250 map_array->head.length = len;
6253 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6254 map_array->map = **map;
6257 memset(map_array, 0, sizeof(*map_array));
6259 mutex_unlock(&trace_eval_mutex);
6262 static void trace_create_eval_file(struct dentry *d_tracer)
6264 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6265 NULL, &tracing_eval_map_fops);
6268 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6269 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6270 static inline void trace_insert_eval_map_file(struct module *mod,
6271 struct trace_eval_map **start, int len) { }
6272 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6274 static void trace_insert_eval_map(struct module *mod,
6275 struct trace_eval_map **start, int len)
6277 struct trace_eval_map **map;
6284 trace_event_eval_update(map, len);
6286 trace_insert_eval_map_file(mod, start, len);
6290 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6291 size_t cnt, loff_t *ppos)
6293 struct trace_array *tr = filp->private_data;
6294 char buf[MAX_TRACER_SIZE+2];
6297 mutex_lock(&trace_types_lock);
6298 r = sprintf(buf, "%s\n", tr->current_trace->name);
6299 mutex_unlock(&trace_types_lock);
6301 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6304 int tracer_init(struct tracer *t, struct trace_array *tr)
6306 tracing_reset_online_cpus(&tr->array_buffer);
6310 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6314 for_each_tracing_cpu(cpu)
6315 per_cpu_ptr(buf->data, cpu)->entries = val;
6318 static void update_buffer_entries(struct array_buffer *buf, int cpu)
6320 if (cpu == RING_BUFFER_ALL_CPUS) {
6321 set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
6323 per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
6327 #ifdef CONFIG_TRACER_MAX_TRACE
6328 /* resize @tr's buffer to the size of @size_tr's entries */
6329 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6330 struct array_buffer *size_buf, int cpu_id)
6334 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6335 for_each_tracing_cpu(cpu) {
6336 ret = ring_buffer_resize(trace_buf->buffer,
6337 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6340 per_cpu_ptr(trace_buf->data, cpu)->entries =
6341 per_cpu_ptr(size_buf->data, cpu)->entries;
6344 ret = ring_buffer_resize(trace_buf->buffer,
6345 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6347 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6348 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6353 #endif /* CONFIG_TRACER_MAX_TRACE */
6355 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6356 unsigned long size, int cpu)
6361 * If kernel or user changes the size of the ring buffer
6362 * we use the size that was given, and we can forget about
6363 * expanding it later.
6365 trace_set_ring_buffer_expanded(tr);
6367 /* May be called before buffers are initialized */
6368 if (!tr->array_buffer.buffer)
6371 /* Do not allow tracing while resizing ring buffer */
6372 tracing_stop_tr(tr);
6374 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6378 #ifdef CONFIG_TRACER_MAX_TRACE
6379 if (!tr->allocated_snapshot)
6382 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6384 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6385 &tr->array_buffer, cpu);
6388 * AARGH! We are left with different
6389 * size max buffer!!!!
6390 * The max buffer is our "snapshot" buffer.
6391 * When a tracer needs a snapshot (one of the
6392 * latency tracers), it swaps the max buffer
6393 * with the saved snap shot. We succeeded to
6394 * update the size of the main buffer, but failed to
6395 * update the size of the max buffer. But when we tried
6396 * to reset the main buffer to the original size, we
6397 * failed there too. This is very unlikely to
6398 * happen, but if it does, warn and kill all
6402 tracing_disabled = 1;
6407 update_buffer_entries(&tr->max_buffer, cpu);
6410 #endif /* CONFIG_TRACER_MAX_TRACE */
6412 update_buffer_entries(&tr->array_buffer, cpu);
6414 tracing_start_tr(tr);
6418 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6419 unsigned long size, int cpu_id)
6423 mutex_lock(&trace_types_lock);
6425 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6426 /* make sure, this cpu is enabled in the mask */
6427 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6433 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6438 mutex_unlock(&trace_types_lock);
6445 * tracing_update_buffers - used by tracing facility to expand ring buffers
6446 * @tr: The tracing instance
6448 * To save on memory when the tracing is never used on a system with it
6449 * configured in. The ring buffers are set to a minimum size. But once
6450 * a user starts to use the tracing facility, then they need to grow
6451 * to their default size.
6453 * This function is to be called when a tracer is about to be used.
6455 int tracing_update_buffers(struct trace_array *tr)
6459 mutex_lock(&trace_types_lock);
6460 if (!tr->ring_buffer_expanded)
6461 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6462 RING_BUFFER_ALL_CPUS);
6463 mutex_unlock(&trace_types_lock);
6468 struct trace_option_dentry;
6471 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6474 * Used to clear out the tracer before deletion of an instance.
6475 * Must have trace_types_lock held.
6477 static void tracing_set_nop(struct trace_array *tr)
6479 if (tr->current_trace == &nop_trace)
6482 tr->current_trace->enabled--;
6484 if (tr->current_trace->reset)
6485 tr->current_trace->reset(tr);
6487 tr->current_trace = &nop_trace;
6490 static bool tracer_options_updated;
6492 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6494 /* Only enable if the directory has been created already. */
6498 /* Only create trace option files after update_tracer_options finish */
6499 if (!tracer_options_updated)
6502 create_trace_option_files(tr, t);
6505 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6508 #ifdef CONFIG_TRACER_MAX_TRACE
6513 mutex_lock(&trace_types_lock);
6515 if (!tr->ring_buffer_expanded) {
6516 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6517 RING_BUFFER_ALL_CPUS);
6523 for (t = trace_types; t; t = t->next) {
6524 if (strcmp(t->name, buf) == 0)
6531 if (t == tr->current_trace)
6534 #ifdef CONFIG_TRACER_SNAPSHOT
6535 if (t->use_max_tr) {
6536 local_irq_disable();
6537 arch_spin_lock(&tr->max_lock);
6538 if (tr->cond_snapshot)
6540 arch_spin_unlock(&tr->max_lock);
6546 /* Some tracers won't work on kernel command line */
6547 if (system_state < SYSTEM_RUNNING && t->noboot) {
6548 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6553 /* Some tracers are only allowed for the top level buffer */
6554 if (!trace_ok_for_array(t, tr)) {
6559 /* If trace pipe files are being read, we can't change the tracer */
6560 if (tr->trace_ref) {
6565 trace_branch_disable();
6567 tr->current_trace->enabled--;
6569 if (tr->current_trace->reset)
6570 tr->current_trace->reset(tr);
6572 #ifdef CONFIG_TRACER_MAX_TRACE
6573 had_max_tr = tr->current_trace->use_max_tr;
6575 /* Current trace needs to be nop_trace before synchronize_rcu */
6576 tr->current_trace = &nop_trace;
6578 if (had_max_tr && !t->use_max_tr) {
6580 * We need to make sure that the update_max_tr sees that
6581 * current_trace changed to nop_trace to keep it from
6582 * swapping the buffers after we resize it.
6583 * The update_max_tr is called from interrupts disabled
6584 * so a synchronized_sched() is sufficient.
6590 if (t->use_max_tr && !tr->allocated_snapshot) {
6591 ret = tracing_alloc_snapshot_instance(tr);
6596 tr->current_trace = &nop_trace;
6600 ret = tracer_init(t, tr);
6605 tr->current_trace = t;
6606 tr->current_trace->enabled++;
6607 trace_branch_enable(tr);
6609 mutex_unlock(&trace_types_lock);
6615 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6616 size_t cnt, loff_t *ppos)
6618 struct trace_array *tr = filp->private_data;
6619 char buf[MAX_TRACER_SIZE+1];
6626 if (cnt > MAX_TRACER_SIZE)
6627 cnt = MAX_TRACER_SIZE;
6629 if (copy_from_user(buf, ubuf, cnt))
6636 err = tracing_set_tracer(tr, name);
6646 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6647 size_t cnt, loff_t *ppos)
6652 r = snprintf(buf, sizeof(buf), "%ld\n",
6653 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6654 if (r > sizeof(buf))
6656 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6660 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6661 size_t cnt, loff_t *ppos)
6666 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6676 tracing_thresh_read(struct file *filp, char __user *ubuf,
6677 size_t cnt, loff_t *ppos)
6679 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6683 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6684 size_t cnt, loff_t *ppos)
6686 struct trace_array *tr = filp->private_data;
6689 mutex_lock(&trace_types_lock);
6690 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6694 if (tr->current_trace->update_thresh) {
6695 ret = tr->current_trace->update_thresh(tr);
6702 mutex_unlock(&trace_types_lock);
6707 #ifdef CONFIG_TRACER_MAX_TRACE
6710 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6711 size_t cnt, loff_t *ppos)
6713 struct trace_array *tr = filp->private_data;
6715 return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos);
6719 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6720 size_t cnt, loff_t *ppos)
6722 struct trace_array *tr = filp->private_data;
6724 return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos);
6729 static int open_pipe_on_cpu(struct trace_array *tr, int cpu)
6731 if (cpu == RING_BUFFER_ALL_CPUS) {
6732 if (cpumask_empty(tr->pipe_cpumask)) {
6733 cpumask_setall(tr->pipe_cpumask);
6736 } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) {
6737 cpumask_set_cpu(cpu, tr->pipe_cpumask);
6743 static void close_pipe_on_cpu(struct trace_array *tr, int cpu)
6745 if (cpu == RING_BUFFER_ALL_CPUS) {
6746 WARN_ON(!cpumask_full(tr->pipe_cpumask));
6747 cpumask_clear(tr->pipe_cpumask);
6749 WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask));
6750 cpumask_clear_cpu(cpu, tr->pipe_cpumask);
6754 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6756 struct trace_array *tr = inode->i_private;
6757 struct trace_iterator *iter;
6761 ret = tracing_check_open_get_tr(tr);
6765 mutex_lock(&trace_types_lock);
6766 cpu = tracing_get_cpu(inode);
6767 ret = open_pipe_on_cpu(tr, cpu);
6769 goto fail_pipe_on_cpu;
6771 /* create a buffer to store the information to pass to userspace */
6772 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6775 goto fail_alloc_iter;
6778 trace_seq_init(&iter->seq);
6779 iter->trace = tr->current_trace;
6781 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6786 /* trace pipe does not show start of buffer */
6787 cpumask_setall(iter->started);
6789 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6790 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6792 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6793 if (trace_clocks[tr->clock_id].in_ns)
6794 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6797 iter->array_buffer = &tr->array_buffer;
6798 iter->cpu_file = cpu;
6799 mutex_init(&iter->mutex);
6800 filp->private_data = iter;
6802 if (iter->trace->pipe_open)
6803 iter->trace->pipe_open(iter);
6805 nonseekable_open(inode, filp);
6809 mutex_unlock(&trace_types_lock);
6815 close_pipe_on_cpu(tr, cpu);
6817 __trace_array_put(tr);
6818 mutex_unlock(&trace_types_lock);
6822 static int tracing_release_pipe(struct inode *inode, struct file *file)
6824 struct trace_iterator *iter = file->private_data;
6825 struct trace_array *tr = inode->i_private;
6827 mutex_lock(&trace_types_lock);
6831 if (iter->trace->pipe_close)
6832 iter->trace->pipe_close(iter);
6833 close_pipe_on_cpu(tr, iter->cpu_file);
6834 mutex_unlock(&trace_types_lock);
6836 free_trace_iter_content(iter);
6839 trace_array_put(tr);
6845 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6847 struct trace_array *tr = iter->tr;
6849 /* Iterators are static, they should be filled or empty */
6850 if (trace_buffer_iter(iter, iter->cpu_file))
6851 return EPOLLIN | EPOLLRDNORM;
6853 if (tr->trace_flags & TRACE_ITER_BLOCK)
6855 * Always select as readable when in blocking mode
6857 return EPOLLIN | EPOLLRDNORM;
6859 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6860 filp, poll_table, iter->tr->buffer_percent);
6864 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6866 struct trace_iterator *iter = filp->private_data;
6868 return trace_poll(iter, filp, poll_table);
6871 /* Must be called with iter->mutex held. */
6872 static int tracing_wait_pipe(struct file *filp)
6874 struct trace_iterator *iter = filp->private_data;
6877 while (trace_empty(iter)) {
6879 if ((filp->f_flags & O_NONBLOCK)) {
6884 * We block until we read something and tracing is disabled.
6885 * We still block if tracing is disabled, but we have never
6886 * read anything. This allows a user to cat this file, and
6887 * then enable tracing. But after we have read something,
6888 * we give an EOF when tracing is again disabled.
6890 * iter->pos will be 0 if we haven't read anything.
6892 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6895 mutex_unlock(&iter->mutex);
6897 ret = wait_on_pipe(iter, 0);
6899 mutex_lock(&iter->mutex);
6912 tracing_read_pipe(struct file *filp, char __user *ubuf,
6913 size_t cnt, loff_t *ppos)
6915 struct trace_iterator *iter = filp->private_data;
6919 * Avoid more than one consumer on a single file descriptor
6920 * This is just a matter of traces coherency, the ring buffer itself
6923 mutex_lock(&iter->mutex);
6925 /* return any leftover data */
6926 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6930 trace_seq_init(&iter->seq);
6932 if (iter->trace->read) {
6933 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6939 sret = tracing_wait_pipe(filp);
6943 /* stop when tracing is finished */
6944 if (trace_empty(iter)) {
6949 if (cnt >= PAGE_SIZE)
6950 cnt = PAGE_SIZE - 1;
6952 /* reset all but tr, trace, and overruns */
6953 trace_iterator_reset(iter);
6954 cpumask_clear(iter->started);
6955 trace_seq_init(&iter->seq);
6957 trace_event_read_lock();
6958 trace_access_lock(iter->cpu_file);
6959 while (trace_find_next_entry_inc(iter) != NULL) {
6960 enum print_line_t ret;
6961 int save_len = iter->seq.seq.len;
6963 ret = print_trace_line(iter);
6964 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6966 * If one print_trace_line() fills entire trace_seq in one shot,
6967 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6968 * In this case, we need to consume it, otherwise, loop will peek
6969 * this event next time, resulting in an infinite loop.
6971 if (save_len == 0) {
6973 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6974 trace_consume(iter);
6978 /* In other cases, don't print partial lines */
6979 iter->seq.seq.len = save_len;
6982 if (ret != TRACE_TYPE_NO_CONSUME)
6983 trace_consume(iter);
6985 if (trace_seq_used(&iter->seq) >= cnt)
6989 * Setting the full flag means we reached the trace_seq buffer
6990 * size and we should leave by partial output condition above.
6991 * One of the trace_seq_* functions is not used properly.
6993 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6996 trace_access_unlock(iter->cpu_file);
6997 trace_event_read_unlock();
6999 /* Now copy what we have to the user */
7000 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
7001 if (iter->seq.readpos >= trace_seq_used(&iter->seq))
7002 trace_seq_init(&iter->seq);
7005 * If there was nothing to send to user, in spite of consuming trace
7006 * entries, go back to wait for more entries.
7012 mutex_unlock(&iter->mutex);
7017 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
7020 __free_page(spd->pages[idx]);
7024 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
7030 /* Seq buffer is page-sized, exactly what we need. */
7032 save_len = iter->seq.seq.len;
7033 ret = print_trace_line(iter);
7035 if (trace_seq_has_overflowed(&iter->seq)) {
7036 iter->seq.seq.len = save_len;
7041 * This should not be hit, because it should only
7042 * be set if the iter->seq overflowed. But check it
7043 * anyway to be safe.
7045 if (ret == TRACE_TYPE_PARTIAL_LINE) {
7046 iter->seq.seq.len = save_len;
7050 count = trace_seq_used(&iter->seq) - save_len;
7053 iter->seq.seq.len = save_len;
7057 if (ret != TRACE_TYPE_NO_CONSUME)
7058 trace_consume(iter);
7060 if (!trace_find_next_entry_inc(iter)) {
7070 static ssize_t tracing_splice_read_pipe(struct file *filp,
7072 struct pipe_inode_info *pipe,
7076 struct page *pages_def[PIPE_DEF_BUFFERS];
7077 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7078 struct trace_iterator *iter = filp->private_data;
7079 struct splice_pipe_desc spd = {
7081 .partial = partial_def,
7082 .nr_pages = 0, /* This gets updated below. */
7083 .nr_pages_max = PIPE_DEF_BUFFERS,
7084 .ops = &default_pipe_buf_ops,
7085 .spd_release = tracing_spd_release_pipe,
7091 if (splice_grow_spd(pipe, &spd))
7094 mutex_lock(&iter->mutex);
7096 if (iter->trace->splice_read) {
7097 ret = iter->trace->splice_read(iter, filp,
7098 ppos, pipe, len, flags);
7103 ret = tracing_wait_pipe(filp);
7107 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
7112 trace_event_read_lock();
7113 trace_access_lock(iter->cpu_file);
7115 /* Fill as many pages as possible. */
7116 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
7117 spd.pages[i] = alloc_page(GFP_KERNEL);
7121 rem = tracing_fill_pipe_page(rem, iter);
7123 /* Copy the data into the page, so we can start over. */
7124 ret = trace_seq_to_buffer(&iter->seq,
7125 page_address(spd.pages[i]),
7126 trace_seq_used(&iter->seq));
7128 __free_page(spd.pages[i]);
7131 spd.partial[i].offset = 0;
7132 spd.partial[i].len = trace_seq_used(&iter->seq);
7134 trace_seq_init(&iter->seq);
7137 trace_access_unlock(iter->cpu_file);
7138 trace_event_read_unlock();
7139 mutex_unlock(&iter->mutex);
7144 ret = splice_to_pipe(pipe, &spd);
7148 splice_shrink_spd(&spd);
7152 mutex_unlock(&iter->mutex);
7157 tracing_entries_read(struct file *filp, char __user *ubuf,
7158 size_t cnt, loff_t *ppos)
7160 struct inode *inode = file_inode(filp);
7161 struct trace_array *tr = inode->i_private;
7162 int cpu = tracing_get_cpu(inode);
7167 mutex_lock(&trace_types_lock);
7169 if (cpu == RING_BUFFER_ALL_CPUS) {
7170 int cpu, buf_size_same;
7175 /* check if all cpu sizes are same */
7176 for_each_tracing_cpu(cpu) {
7177 /* fill in the size from first enabled cpu */
7179 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7180 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7186 if (buf_size_same) {
7187 if (!tr->ring_buffer_expanded)
7188 r = sprintf(buf, "%lu (expanded: %lu)\n",
7190 trace_buf_size >> 10);
7192 r = sprintf(buf, "%lu\n", size >> 10);
7194 r = sprintf(buf, "X\n");
7196 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7198 mutex_unlock(&trace_types_lock);
7200 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7205 tracing_entries_write(struct file *filp, const char __user *ubuf,
7206 size_t cnt, loff_t *ppos)
7208 struct inode *inode = file_inode(filp);
7209 struct trace_array *tr = inode->i_private;
7213 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7217 /* must have at least 1 entry */
7221 /* value is in KB */
7223 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7233 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7234 size_t cnt, loff_t *ppos)
7236 struct trace_array *tr = filp->private_data;
7239 unsigned long size = 0, expanded_size = 0;
7241 mutex_lock(&trace_types_lock);
7242 for_each_tracing_cpu(cpu) {
7243 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7244 if (!tr->ring_buffer_expanded)
7245 expanded_size += trace_buf_size >> 10;
7247 if (tr->ring_buffer_expanded)
7248 r = sprintf(buf, "%lu\n", size);
7250 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7251 mutex_unlock(&trace_types_lock);
7253 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7257 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7258 size_t cnt, loff_t *ppos)
7261 * There is no need to read what the user has written, this function
7262 * is just to make sure that there is no error when "echo" is used
7271 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7273 struct trace_array *tr = inode->i_private;
7275 /* disable tracing ? */
7276 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7277 tracer_tracing_off(tr);
7278 /* resize the ring buffer to 0 */
7279 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7281 trace_array_put(tr);
7287 tracing_mark_write(struct file *filp, const char __user *ubuf,
7288 size_t cnt, loff_t *fpos)
7290 struct trace_array *tr = filp->private_data;
7291 struct ring_buffer_event *event;
7292 enum event_trigger_type tt = ETT_NONE;
7293 struct trace_buffer *buffer;
7294 struct print_entry *entry;
7299 /* Used in tracing_mark_raw_write() as well */
7300 #define FAULTED_STR "<faulted>"
7301 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7303 if (tracing_disabled)
7306 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7309 if (cnt > TRACE_BUF_SIZE)
7310 cnt = TRACE_BUF_SIZE;
7312 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7314 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7316 /* If less than "<faulted>", then make sure we can still add that */
7317 if (cnt < FAULTED_SIZE)
7318 size += FAULTED_SIZE - cnt;
7320 buffer = tr->array_buffer.buffer;
7321 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7323 if (unlikely(!event))
7324 /* Ring buffer disabled, return as if not open for write */
7327 entry = ring_buffer_event_data(event);
7328 entry->ip = _THIS_IP_;
7330 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7332 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7338 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7339 /* do not add \n before testing triggers, but add \0 */
7340 entry->buf[cnt] = '\0';
7341 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7344 if (entry->buf[cnt - 1] != '\n') {
7345 entry->buf[cnt] = '\n';
7346 entry->buf[cnt + 1] = '\0';
7348 entry->buf[cnt] = '\0';
7350 if (static_branch_unlikely(&trace_marker_exports_enabled))
7351 ftrace_exports(event, TRACE_EXPORT_MARKER);
7352 __buffer_unlock_commit(buffer, event);
7355 event_triggers_post_call(tr->trace_marker_file, tt);
7360 /* Limit it for now to 3K (including tag) */
7361 #define RAW_DATA_MAX_SIZE (1024*3)
7364 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7365 size_t cnt, loff_t *fpos)
7367 struct trace_array *tr = filp->private_data;
7368 struct ring_buffer_event *event;
7369 struct trace_buffer *buffer;
7370 struct raw_data_entry *entry;
7375 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7377 if (tracing_disabled)
7380 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7383 /* The marker must at least have a tag id */
7384 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7387 if (cnt > TRACE_BUF_SIZE)
7388 cnt = TRACE_BUF_SIZE;
7390 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7392 size = sizeof(*entry) + cnt;
7393 if (cnt < FAULT_SIZE_ID)
7394 size += FAULT_SIZE_ID - cnt;
7396 buffer = tr->array_buffer.buffer;
7397 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7400 /* Ring buffer disabled, return as if not open for write */
7403 entry = ring_buffer_event_data(event);
7405 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7408 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7413 __buffer_unlock_commit(buffer, event);
7418 static int tracing_clock_show(struct seq_file *m, void *v)
7420 struct trace_array *tr = m->private;
7423 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7425 "%s%s%s%s", i ? " " : "",
7426 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7427 i == tr->clock_id ? "]" : "");
7433 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7437 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7438 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7441 if (i == ARRAY_SIZE(trace_clocks))
7444 mutex_lock(&trace_types_lock);
7448 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7451 * New clock may not be consistent with the previous clock.
7452 * Reset the buffer so that it doesn't have incomparable timestamps.
7454 tracing_reset_online_cpus(&tr->array_buffer);
7456 #ifdef CONFIG_TRACER_MAX_TRACE
7457 if (tr->max_buffer.buffer)
7458 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7459 tracing_reset_online_cpus(&tr->max_buffer);
7462 mutex_unlock(&trace_types_lock);
7467 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7468 size_t cnt, loff_t *fpos)
7470 struct seq_file *m = filp->private_data;
7471 struct trace_array *tr = m->private;
7473 const char *clockstr;
7476 if (cnt >= sizeof(buf))
7479 if (copy_from_user(buf, ubuf, cnt))
7484 clockstr = strstrip(buf);
7486 ret = tracing_set_clock(tr, clockstr);
7495 static int tracing_clock_open(struct inode *inode, struct file *file)
7497 struct trace_array *tr = inode->i_private;
7500 ret = tracing_check_open_get_tr(tr);
7504 ret = single_open(file, tracing_clock_show, inode->i_private);
7506 trace_array_put(tr);
7511 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7513 struct trace_array *tr = m->private;
7515 mutex_lock(&trace_types_lock);
7517 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7518 seq_puts(m, "delta [absolute]\n");
7520 seq_puts(m, "[delta] absolute\n");
7522 mutex_unlock(&trace_types_lock);
7527 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7529 struct trace_array *tr = inode->i_private;
7532 ret = tracing_check_open_get_tr(tr);
7536 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7538 trace_array_put(tr);
7543 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7545 if (rbe == this_cpu_read(trace_buffered_event))
7546 return ring_buffer_time_stamp(buffer);
7548 return ring_buffer_event_time_stamp(buffer, rbe);
7552 * Set or disable using the per CPU trace_buffer_event when possible.
7554 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7558 mutex_lock(&trace_types_lock);
7560 if (set && tr->no_filter_buffering_ref++)
7564 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7569 --tr->no_filter_buffering_ref;
7572 mutex_unlock(&trace_types_lock);
7577 struct ftrace_buffer_info {
7578 struct trace_iterator iter;
7580 unsigned int spare_cpu;
7584 #ifdef CONFIG_TRACER_SNAPSHOT
7585 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7587 struct trace_array *tr = inode->i_private;
7588 struct trace_iterator *iter;
7592 ret = tracing_check_open_get_tr(tr);
7596 if (file->f_mode & FMODE_READ) {
7597 iter = __tracing_open(inode, file, true);
7599 ret = PTR_ERR(iter);
7601 /* Writes still need the seq_file to hold the private data */
7603 m = kzalloc(sizeof(*m), GFP_KERNEL);
7606 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7614 iter->array_buffer = &tr->max_buffer;
7615 iter->cpu_file = tracing_get_cpu(inode);
7617 file->private_data = m;
7621 trace_array_put(tr);
7626 static void tracing_swap_cpu_buffer(void *tr)
7628 update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
7632 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7635 struct seq_file *m = filp->private_data;
7636 struct trace_iterator *iter = m->private;
7637 struct trace_array *tr = iter->tr;
7641 ret = tracing_update_buffers(tr);
7645 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7649 mutex_lock(&trace_types_lock);
7651 if (tr->current_trace->use_max_tr) {
7656 local_irq_disable();
7657 arch_spin_lock(&tr->max_lock);
7658 if (tr->cond_snapshot)
7660 arch_spin_unlock(&tr->max_lock);
7667 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7671 if (tr->allocated_snapshot)
7675 /* Only allow per-cpu swap if the ring buffer supports it */
7676 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7677 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7682 if (tr->allocated_snapshot)
7683 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7684 &tr->array_buffer, iter->cpu_file);
7686 ret = tracing_alloc_snapshot_instance(tr);
7689 /* Now, we're going to swap */
7690 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7691 local_irq_disable();
7692 update_max_tr(tr, current, smp_processor_id(), NULL);
7695 smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
7700 if (tr->allocated_snapshot) {
7701 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7702 tracing_reset_online_cpus(&tr->max_buffer);
7704 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7714 mutex_unlock(&trace_types_lock);
7718 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7720 struct seq_file *m = file->private_data;
7723 ret = tracing_release(inode, file);
7725 if (file->f_mode & FMODE_READ)
7728 /* If write only, the seq_file is just a stub */
7736 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7737 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7738 size_t count, loff_t *ppos);
7739 static int tracing_buffers_release(struct inode *inode, struct file *file);
7740 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7741 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7743 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7745 struct ftrace_buffer_info *info;
7748 /* The following checks for tracefs lockdown */
7749 ret = tracing_buffers_open(inode, filp);
7753 info = filp->private_data;
7755 if (info->iter.trace->use_max_tr) {
7756 tracing_buffers_release(inode, filp);
7760 info->iter.snapshot = true;
7761 info->iter.array_buffer = &info->iter.tr->max_buffer;
7766 #endif /* CONFIG_TRACER_SNAPSHOT */
7769 static const struct file_operations tracing_thresh_fops = {
7770 .open = tracing_open_generic,
7771 .read = tracing_thresh_read,
7772 .write = tracing_thresh_write,
7773 .llseek = generic_file_llseek,
7776 #ifdef CONFIG_TRACER_MAX_TRACE
7777 static const struct file_operations tracing_max_lat_fops = {
7778 .open = tracing_open_generic_tr,
7779 .read = tracing_max_lat_read,
7780 .write = tracing_max_lat_write,
7781 .llseek = generic_file_llseek,
7782 .release = tracing_release_generic_tr,
7786 static const struct file_operations set_tracer_fops = {
7787 .open = tracing_open_generic_tr,
7788 .read = tracing_set_trace_read,
7789 .write = tracing_set_trace_write,
7790 .llseek = generic_file_llseek,
7791 .release = tracing_release_generic_tr,
7794 static const struct file_operations tracing_pipe_fops = {
7795 .open = tracing_open_pipe,
7796 .poll = tracing_poll_pipe,
7797 .read = tracing_read_pipe,
7798 .splice_read = tracing_splice_read_pipe,
7799 .release = tracing_release_pipe,
7800 .llseek = no_llseek,
7803 static const struct file_operations tracing_entries_fops = {
7804 .open = tracing_open_generic_tr,
7805 .read = tracing_entries_read,
7806 .write = tracing_entries_write,
7807 .llseek = generic_file_llseek,
7808 .release = tracing_release_generic_tr,
7811 static const struct file_operations tracing_total_entries_fops = {
7812 .open = tracing_open_generic_tr,
7813 .read = tracing_total_entries_read,
7814 .llseek = generic_file_llseek,
7815 .release = tracing_release_generic_tr,
7818 static const struct file_operations tracing_free_buffer_fops = {
7819 .open = tracing_open_generic_tr,
7820 .write = tracing_free_buffer_write,
7821 .release = tracing_free_buffer_release,
7824 static const struct file_operations tracing_mark_fops = {
7825 .open = tracing_mark_open,
7826 .write = tracing_mark_write,
7827 .release = tracing_release_generic_tr,
7830 static const struct file_operations tracing_mark_raw_fops = {
7831 .open = tracing_mark_open,
7832 .write = tracing_mark_raw_write,
7833 .release = tracing_release_generic_tr,
7836 static const struct file_operations trace_clock_fops = {
7837 .open = tracing_clock_open,
7839 .llseek = seq_lseek,
7840 .release = tracing_single_release_tr,
7841 .write = tracing_clock_write,
7844 static const struct file_operations trace_time_stamp_mode_fops = {
7845 .open = tracing_time_stamp_mode_open,
7847 .llseek = seq_lseek,
7848 .release = tracing_single_release_tr,
7851 #ifdef CONFIG_TRACER_SNAPSHOT
7852 static const struct file_operations snapshot_fops = {
7853 .open = tracing_snapshot_open,
7855 .write = tracing_snapshot_write,
7856 .llseek = tracing_lseek,
7857 .release = tracing_snapshot_release,
7860 static const struct file_operations snapshot_raw_fops = {
7861 .open = snapshot_raw_open,
7862 .read = tracing_buffers_read,
7863 .release = tracing_buffers_release,
7864 .splice_read = tracing_buffers_splice_read,
7865 .llseek = no_llseek,
7868 #endif /* CONFIG_TRACER_SNAPSHOT */
7871 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7872 * @filp: The active open file structure
7873 * @ubuf: The userspace provided buffer to read value into
7874 * @cnt: The maximum number of bytes to read
7875 * @ppos: The current "file" position
7877 * This function implements the write interface for a struct trace_min_max_param.
7878 * The filp->private_data must point to a trace_min_max_param structure that
7879 * defines where to write the value, the min and the max acceptable values,
7880 * and a lock to protect the write.
7883 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7885 struct trace_min_max_param *param = filp->private_data;
7892 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7897 mutex_lock(param->lock);
7899 if (param->min && val < *param->min)
7902 if (param->max && val > *param->max)
7909 mutex_unlock(param->lock);
7918 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7919 * @filp: The active open file structure
7920 * @ubuf: The userspace provided buffer to read value into
7921 * @cnt: The maximum number of bytes to read
7922 * @ppos: The current "file" position
7924 * This function implements the read interface for a struct trace_min_max_param.
7925 * The filp->private_data must point to a trace_min_max_param struct with valid
7929 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7931 struct trace_min_max_param *param = filp->private_data;
7932 char buf[U64_STR_SIZE];
7941 if (cnt > sizeof(buf))
7944 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7946 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7949 const struct file_operations trace_min_max_fops = {
7950 .open = tracing_open_generic,
7951 .read = trace_min_max_read,
7952 .write = trace_min_max_write,
7955 #define TRACING_LOG_ERRS_MAX 8
7956 #define TRACING_LOG_LOC_MAX 128
7958 #define CMD_PREFIX " Command: "
7961 const char **errs; /* ptr to loc-specific array of err strings */
7962 u8 type; /* index into errs -> specific err string */
7963 u16 pos; /* caret position */
7967 struct tracing_log_err {
7968 struct list_head list;
7969 struct err_info info;
7970 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7971 char *cmd; /* what caused err */
7974 static DEFINE_MUTEX(tracing_err_log_lock);
7976 static struct tracing_log_err *alloc_tracing_log_err(int len)
7978 struct tracing_log_err *err;
7980 err = kzalloc(sizeof(*err), GFP_KERNEL);
7982 return ERR_PTR(-ENOMEM);
7984 err->cmd = kzalloc(len, GFP_KERNEL);
7987 return ERR_PTR(-ENOMEM);
7993 static void free_tracing_log_err(struct tracing_log_err *err)
7999 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
8002 struct tracing_log_err *err;
8005 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
8006 err = alloc_tracing_log_err(len);
8007 if (PTR_ERR(err) != -ENOMEM)
8008 tr->n_err_log_entries++;
8012 cmd = kzalloc(len, GFP_KERNEL);
8014 return ERR_PTR(-ENOMEM);
8015 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
8018 list_del(&err->list);
8024 * err_pos - find the position of a string within a command for error careting
8025 * @cmd: The tracing command that caused the error
8026 * @str: The string to position the caret at within @cmd
8028 * Finds the position of the first occurrence of @str within @cmd. The
8029 * return value can be passed to tracing_log_err() for caret placement
8032 * Returns the index within @cmd of the first occurrence of @str or 0
8033 * if @str was not found.
8035 unsigned int err_pos(char *cmd, const char *str)
8039 if (WARN_ON(!strlen(cmd)))
8042 found = strstr(cmd, str);
8050 * tracing_log_err - write an error to the tracing error log
8051 * @tr: The associated trace array for the error (NULL for top level array)
8052 * @loc: A string describing where the error occurred
8053 * @cmd: The tracing command that caused the error
8054 * @errs: The array of loc-specific static error strings
8055 * @type: The index into errs[], which produces the specific static err string
8056 * @pos: The position the caret should be placed in the cmd
8058 * Writes an error into tracing/error_log of the form:
8060 * <loc>: error: <text>
8064 * tracing/error_log is a small log file containing the last
8065 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
8066 * unless there has been a tracing error, and the error log can be
8067 * cleared and have its memory freed by writing the empty string in
8068 * truncation mode to it i.e. echo > tracing/error_log.
8070 * NOTE: the @errs array along with the @type param are used to
8071 * produce a static error string - this string is not copied and saved
8072 * when the error is logged - only a pointer to it is saved. See
8073 * existing callers for examples of how static strings are typically
8074 * defined for use with tracing_log_err().
8076 void tracing_log_err(struct trace_array *tr,
8077 const char *loc, const char *cmd,
8078 const char **errs, u8 type, u16 pos)
8080 struct tracing_log_err *err;
8086 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
8088 mutex_lock(&tracing_err_log_lock);
8089 err = get_tracing_log_err(tr, len);
8090 if (PTR_ERR(err) == -ENOMEM) {
8091 mutex_unlock(&tracing_err_log_lock);
8095 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
8096 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
8098 err->info.errs = errs;
8099 err->info.type = type;
8100 err->info.pos = pos;
8101 err->info.ts = local_clock();
8103 list_add_tail(&err->list, &tr->err_log);
8104 mutex_unlock(&tracing_err_log_lock);
8107 static void clear_tracing_err_log(struct trace_array *tr)
8109 struct tracing_log_err *err, *next;
8111 mutex_lock(&tracing_err_log_lock);
8112 list_for_each_entry_safe(err, next, &tr->err_log, list) {
8113 list_del(&err->list);
8114 free_tracing_log_err(err);
8117 tr->n_err_log_entries = 0;
8118 mutex_unlock(&tracing_err_log_lock);
8121 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
8123 struct trace_array *tr = m->private;
8125 mutex_lock(&tracing_err_log_lock);
8127 return seq_list_start(&tr->err_log, *pos);
8130 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
8132 struct trace_array *tr = m->private;
8134 return seq_list_next(v, &tr->err_log, pos);
8137 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
8139 mutex_unlock(&tracing_err_log_lock);
8142 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
8146 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
8148 for (i = 0; i < pos; i++)
8153 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
8155 struct tracing_log_err *err = v;
8158 const char *err_text = err->info.errs[err->info.type];
8159 u64 sec = err->info.ts;
8162 nsec = do_div(sec, NSEC_PER_SEC);
8163 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
8164 err->loc, err_text);
8165 seq_printf(m, "%s", err->cmd);
8166 tracing_err_log_show_pos(m, err->info.pos);
8172 static const struct seq_operations tracing_err_log_seq_ops = {
8173 .start = tracing_err_log_seq_start,
8174 .next = tracing_err_log_seq_next,
8175 .stop = tracing_err_log_seq_stop,
8176 .show = tracing_err_log_seq_show
8179 static int tracing_err_log_open(struct inode *inode, struct file *file)
8181 struct trace_array *tr = inode->i_private;
8184 ret = tracing_check_open_get_tr(tr);
8188 /* If this file was opened for write, then erase contents */
8189 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8190 clear_tracing_err_log(tr);
8192 if (file->f_mode & FMODE_READ) {
8193 ret = seq_open(file, &tracing_err_log_seq_ops);
8195 struct seq_file *m = file->private_data;
8198 trace_array_put(tr);
8204 static ssize_t tracing_err_log_write(struct file *file,
8205 const char __user *buffer,
8206 size_t count, loff_t *ppos)
8211 static int tracing_err_log_release(struct inode *inode, struct file *file)
8213 struct trace_array *tr = inode->i_private;
8215 trace_array_put(tr);
8217 if (file->f_mode & FMODE_READ)
8218 seq_release(inode, file);
8223 static const struct file_operations tracing_err_log_fops = {
8224 .open = tracing_err_log_open,
8225 .write = tracing_err_log_write,
8227 .llseek = tracing_lseek,
8228 .release = tracing_err_log_release,
8231 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8233 struct trace_array *tr = inode->i_private;
8234 struct ftrace_buffer_info *info;
8237 ret = tracing_check_open_get_tr(tr);
8241 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8243 trace_array_put(tr);
8247 mutex_lock(&trace_types_lock);
8250 info->iter.cpu_file = tracing_get_cpu(inode);
8251 info->iter.trace = tr->current_trace;
8252 info->iter.array_buffer = &tr->array_buffer;
8254 /* Force reading ring buffer for first read */
8255 info->read = (unsigned int)-1;
8257 filp->private_data = info;
8261 mutex_unlock(&trace_types_lock);
8263 ret = nonseekable_open(inode, filp);
8265 trace_array_put(tr);
8271 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8273 struct ftrace_buffer_info *info = filp->private_data;
8274 struct trace_iterator *iter = &info->iter;
8276 return trace_poll(iter, filp, poll_table);
8280 tracing_buffers_read(struct file *filp, char __user *ubuf,
8281 size_t count, loff_t *ppos)
8283 struct ftrace_buffer_info *info = filp->private_data;
8284 struct trace_iterator *iter = &info->iter;
8291 #ifdef CONFIG_TRACER_MAX_TRACE
8292 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8297 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8299 if (IS_ERR(info->spare)) {
8300 ret = PTR_ERR(info->spare);
8303 info->spare_cpu = iter->cpu_file;
8309 /* Do we have previous read data to read? */
8310 if (info->read < PAGE_SIZE)
8314 trace_access_lock(iter->cpu_file);
8315 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8319 trace_access_unlock(iter->cpu_file);
8322 if (trace_empty(iter)) {
8323 if ((filp->f_flags & O_NONBLOCK))
8326 ret = wait_on_pipe(iter, 0);
8337 size = PAGE_SIZE - info->read;
8341 ret = copy_to_user(ubuf, info->spare + info->read, size);
8353 static int tracing_buffers_release(struct inode *inode, struct file *file)
8355 struct ftrace_buffer_info *info = file->private_data;
8356 struct trace_iterator *iter = &info->iter;
8358 mutex_lock(&trace_types_lock);
8360 iter->tr->trace_ref--;
8362 __trace_array_put(iter->tr);
8365 /* Make sure the waiters see the new wait_index */
8368 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8371 ring_buffer_free_read_page(iter->array_buffer->buffer,
8372 info->spare_cpu, info->spare);
8375 mutex_unlock(&trace_types_lock);
8381 struct trace_buffer *buffer;
8384 refcount_t refcount;
8387 static void buffer_ref_release(struct buffer_ref *ref)
8389 if (!refcount_dec_and_test(&ref->refcount))
8391 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8395 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8396 struct pipe_buffer *buf)
8398 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8400 buffer_ref_release(ref);
8404 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8405 struct pipe_buffer *buf)
8407 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8409 if (refcount_read(&ref->refcount) > INT_MAX/2)
8412 refcount_inc(&ref->refcount);
8416 /* Pipe buffer operations for a buffer. */
8417 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8418 .release = buffer_pipe_buf_release,
8419 .get = buffer_pipe_buf_get,
8423 * Callback from splice_to_pipe(), if we need to release some pages
8424 * at the end of the spd in case we error'ed out in filling the pipe.
8426 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8428 struct buffer_ref *ref =
8429 (struct buffer_ref *)spd->partial[i].private;
8431 buffer_ref_release(ref);
8432 spd->partial[i].private = 0;
8436 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8437 struct pipe_inode_info *pipe, size_t len,
8440 struct ftrace_buffer_info *info = file->private_data;
8441 struct trace_iterator *iter = &info->iter;
8442 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8443 struct page *pages_def[PIPE_DEF_BUFFERS];
8444 struct splice_pipe_desc spd = {
8446 .partial = partial_def,
8447 .nr_pages_max = PIPE_DEF_BUFFERS,
8448 .ops = &buffer_pipe_buf_ops,
8449 .spd_release = buffer_spd_release,
8451 struct buffer_ref *ref;
8455 #ifdef CONFIG_TRACER_MAX_TRACE
8456 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8460 if (*ppos & (PAGE_SIZE - 1))
8463 if (len & (PAGE_SIZE - 1)) {
8464 if (len < PAGE_SIZE)
8469 if (splice_grow_spd(pipe, &spd))
8473 trace_access_lock(iter->cpu_file);
8474 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8476 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8480 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8486 refcount_set(&ref->refcount, 1);
8487 ref->buffer = iter->array_buffer->buffer;
8488 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8489 if (IS_ERR(ref->page)) {
8490 ret = PTR_ERR(ref->page);
8495 ref->cpu = iter->cpu_file;
8497 r = ring_buffer_read_page(ref->buffer, &ref->page,
8498 len, iter->cpu_file, 1);
8500 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8506 page = virt_to_page(ref->page);
8508 spd.pages[i] = page;
8509 spd.partial[i].len = PAGE_SIZE;
8510 spd.partial[i].offset = 0;
8511 spd.partial[i].private = (unsigned long)ref;
8515 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8518 trace_access_unlock(iter->cpu_file);
8521 /* did we read anything? */
8522 if (!spd.nr_pages) {
8529 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8532 wait_index = READ_ONCE(iter->wait_index);
8534 ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent);
8538 /* No need to wait after waking up when tracing is off */
8539 if (!tracer_tracing_is_on(iter->tr))
8542 /* Make sure we see the new wait_index */
8544 if (wait_index != iter->wait_index)
8550 ret = splice_to_pipe(pipe, &spd);
8552 splice_shrink_spd(&spd);
8557 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8558 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8560 struct ftrace_buffer_info *info = file->private_data;
8561 struct trace_iterator *iter = &info->iter;
8564 return -ENOIOCTLCMD;
8566 mutex_lock(&trace_types_lock);
8569 /* Make sure the waiters see the new wait_index */
8572 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8574 mutex_unlock(&trace_types_lock);
8578 static const struct file_operations tracing_buffers_fops = {
8579 .open = tracing_buffers_open,
8580 .read = tracing_buffers_read,
8581 .poll = tracing_buffers_poll,
8582 .release = tracing_buffers_release,
8583 .splice_read = tracing_buffers_splice_read,
8584 .unlocked_ioctl = tracing_buffers_ioctl,
8585 .llseek = no_llseek,
8589 tracing_stats_read(struct file *filp, char __user *ubuf,
8590 size_t count, loff_t *ppos)
8592 struct inode *inode = file_inode(filp);
8593 struct trace_array *tr = inode->i_private;
8594 struct array_buffer *trace_buf = &tr->array_buffer;
8595 int cpu = tracing_get_cpu(inode);
8596 struct trace_seq *s;
8598 unsigned long long t;
8599 unsigned long usec_rem;
8601 s = kmalloc(sizeof(*s), GFP_KERNEL);
8607 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8608 trace_seq_printf(s, "entries: %ld\n", cnt);
8610 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8611 trace_seq_printf(s, "overrun: %ld\n", cnt);
8613 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8614 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8616 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8617 trace_seq_printf(s, "bytes: %ld\n", cnt);
8619 if (trace_clocks[tr->clock_id].in_ns) {
8620 /* local or global for trace_clock */
8621 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8622 usec_rem = do_div(t, USEC_PER_SEC);
8623 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8626 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8627 usec_rem = do_div(t, USEC_PER_SEC);
8628 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8630 /* counter or tsc mode for trace_clock */
8631 trace_seq_printf(s, "oldest event ts: %llu\n",
8632 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8634 trace_seq_printf(s, "now ts: %llu\n",
8635 ring_buffer_time_stamp(trace_buf->buffer));
8638 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8639 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8641 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8642 trace_seq_printf(s, "read events: %ld\n", cnt);
8644 count = simple_read_from_buffer(ubuf, count, ppos,
8645 s->buffer, trace_seq_used(s));
8652 static const struct file_operations tracing_stats_fops = {
8653 .open = tracing_open_generic_tr,
8654 .read = tracing_stats_read,
8655 .llseek = generic_file_llseek,
8656 .release = tracing_release_generic_tr,
8659 #ifdef CONFIG_DYNAMIC_FTRACE
8662 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8663 size_t cnt, loff_t *ppos)
8669 /* 256 should be plenty to hold the amount needed */
8670 buf = kmalloc(256, GFP_KERNEL);
8674 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8675 ftrace_update_tot_cnt,
8676 ftrace_number_of_pages,
8677 ftrace_number_of_groups);
8679 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8684 static const struct file_operations tracing_dyn_info_fops = {
8685 .open = tracing_open_generic,
8686 .read = tracing_read_dyn_info,
8687 .llseek = generic_file_llseek,
8689 #endif /* CONFIG_DYNAMIC_FTRACE */
8691 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8693 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8694 struct trace_array *tr, struct ftrace_probe_ops *ops,
8697 tracing_snapshot_instance(tr);
8701 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8702 struct trace_array *tr, struct ftrace_probe_ops *ops,
8705 struct ftrace_func_mapper *mapper = data;
8709 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8719 tracing_snapshot_instance(tr);
8723 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8724 struct ftrace_probe_ops *ops, void *data)
8726 struct ftrace_func_mapper *mapper = data;
8729 seq_printf(m, "%ps:", (void *)ip);
8731 seq_puts(m, "snapshot");
8734 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8737 seq_printf(m, ":count=%ld\n", *count);
8739 seq_puts(m, ":unlimited\n");
8745 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8746 unsigned long ip, void *init_data, void **data)
8748 struct ftrace_func_mapper *mapper = *data;
8751 mapper = allocate_ftrace_func_mapper();
8757 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8761 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8762 unsigned long ip, void *data)
8764 struct ftrace_func_mapper *mapper = data;
8769 free_ftrace_func_mapper(mapper, NULL);
8773 ftrace_func_mapper_remove_ip(mapper, ip);
8776 static struct ftrace_probe_ops snapshot_probe_ops = {
8777 .func = ftrace_snapshot,
8778 .print = ftrace_snapshot_print,
8781 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8782 .func = ftrace_count_snapshot,
8783 .print = ftrace_snapshot_print,
8784 .init = ftrace_snapshot_init,
8785 .free = ftrace_snapshot_free,
8789 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8790 char *glob, char *cmd, char *param, int enable)
8792 struct ftrace_probe_ops *ops;
8793 void *count = (void *)-1;
8800 /* hash funcs only work with set_ftrace_filter */
8804 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8807 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8812 number = strsep(¶m, ":");
8814 if (!strlen(number))
8818 * We use the callback data field (which is a pointer)
8821 ret = kstrtoul(number, 0, (unsigned long *)&count);
8826 ret = tracing_alloc_snapshot_instance(tr);
8830 ret = register_ftrace_function_probe(glob, tr, ops, count);
8833 return ret < 0 ? ret : 0;
8836 static struct ftrace_func_command ftrace_snapshot_cmd = {
8838 .func = ftrace_trace_snapshot_callback,
8841 static __init int register_snapshot_cmd(void)
8843 return register_ftrace_command(&ftrace_snapshot_cmd);
8846 static inline __init int register_snapshot_cmd(void) { return 0; }
8847 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8849 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8851 if (WARN_ON(!tr->dir))
8852 return ERR_PTR(-ENODEV);
8854 /* Top directory uses NULL as the parent */
8855 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8858 /* All sub buffers have a descriptor */
8862 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8864 struct dentry *d_tracer;
8867 return tr->percpu_dir;
8869 d_tracer = tracing_get_dentry(tr);
8870 if (IS_ERR(d_tracer))
8873 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8875 MEM_FAIL(!tr->percpu_dir,
8876 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8878 return tr->percpu_dir;
8881 static struct dentry *
8882 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8883 void *data, long cpu, const struct file_operations *fops)
8885 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8887 if (ret) /* See tracing_get_cpu() */
8888 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8893 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8895 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8896 struct dentry *d_cpu;
8897 char cpu_dir[30]; /* 30 characters should be more than enough */
8902 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8903 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8905 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8909 /* per cpu trace_pipe */
8910 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8911 tr, cpu, &tracing_pipe_fops);
8914 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8915 tr, cpu, &tracing_fops);
8917 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8918 tr, cpu, &tracing_buffers_fops);
8920 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8921 tr, cpu, &tracing_stats_fops);
8923 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8924 tr, cpu, &tracing_entries_fops);
8926 #ifdef CONFIG_TRACER_SNAPSHOT
8927 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8928 tr, cpu, &snapshot_fops);
8930 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8931 tr, cpu, &snapshot_raw_fops);
8935 #ifdef CONFIG_FTRACE_SELFTEST
8936 /* Let selftest have access to static functions in this file */
8937 #include "trace_selftest.c"
8941 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8944 struct trace_option_dentry *topt = filp->private_data;
8947 if (topt->flags->val & topt->opt->bit)
8952 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8956 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8959 struct trace_option_dentry *topt = filp->private_data;
8963 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8967 if (val != 0 && val != 1)
8970 if (!!(topt->flags->val & topt->opt->bit) != val) {
8971 mutex_lock(&trace_types_lock);
8972 ret = __set_tracer_option(topt->tr, topt->flags,
8974 mutex_unlock(&trace_types_lock);
8984 static int tracing_open_options(struct inode *inode, struct file *filp)
8986 struct trace_option_dentry *topt = inode->i_private;
8989 ret = tracing_check_open_get_tr(topt->tr);
8993 filp->private_data = inode->i_private;
8997 static int tracing_release_options(struct inode *inode, struct file *file)
8999 struct trace_option_dentry *topt = file->private_data;
9001 trace_array_put(topt->tr);
9005 static const struct file_operations trace_options_fops = {
9006 .open = tracing_open_options,
9007 .read = trace_options_read,
9008 .write = trace_options_write,
9009 .llseek = generic_file_llseek,
9010 .release = tracing_release_options,
9014 * In order to pass in both the trace_array descriptor as well as the index
9015 * to the flag that the trace option file represents, the trace_array
9016 * has a character array of trace_flags_index[], which holds the index
9017 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
9018 * The address of this character array is passed to the flag option file
9019 * read/write callbacks.
9021 * In order to extract both the index and the trace_array descriptor,
9022 * get_tr_index() uses the following algorithm.
9026 * As the pointer itself contains the address of the index (remember
9029 * Then to get the trace_array descriptor, by subtracting that index
9030 * from the ptr, we get to the start of the index itself.
9032 * ptr - idx == &index[0]
9034 * Then a simple container_of() from that pointer gets us to the
9035 * trace_array descriptor.
9037 static void get_tr_index(void *data, struct trace_array **ptr,
9038 unsigned int *pindex)
9040 *pindex = *(unsigned char *)data;
9042 *ptr = container_of(data - *pindex, struct trace_array,
9047 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
9050 void *tr_index = filp->private_data;
9051 struct trace_array *tr;
9055 get_tr_index(tr_index, &tr, &index);
9057 if (tr->trace_flags & (1 << index))
9062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
9066 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
9069 void *tr_index = filp->private_data;
9070 struct trace_array *tr;
9075 get_tr_index(tr_index, &tr, &index);
9077 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9081 if (val != 0 && val != 1)
9084 mutex_lock(&event_mutex);
9085 mutex_lock(&trace_types_lock);
9086 ret = set_tracer_flag(tr, 1 << index, val);
9087 mutex_unlock(&trace_types_lock);
9088 mutex_unlock(&event_mutex);
9098 static const struct file_operations trace_options_core_fops = {
9099 .open = tracing_open_generic,
9100 .read = trace_options_core_read,
9101 .write = trace_options_core_write,
9102 .llseek = generic_file_llseek,
9105 struct dentry *trace_create_file(const char *name,
9107 struct dentry *parent,
9109 const struct file_operations *fops)
9113 ret = tracefs_create_file(name, mode, parent, data, fops);
9115 pr_warn("Could not create tracefs '%s' entry\n", name);
9121 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
9123 struct dentry *d_tracer;
9128 d_tracer = tracing_get_dentry(tr);
9129 if (IS_ERR(d_tracer))
9132 tr->options = tracefs_create_dir("options", d_tracer);
9134 pr_warn("Could not create tracefs directory 'options'\n");
9142 create_trace_option_file(struct trace_array *tr,
9143 struct trace_option_dentry *topt,
9144 struct tracer_flags *flags,
9145 struct tracer_opt *opt)
9147 struct dentry *t_options;
9149 t_options = trace_options_init_dentry(tr);
9153 topt->flags = flags;
9157 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
9158 t_options, topt, &trace_options_fops);
9163 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
9165 struct trace_option_dentry *topts;
9166 struct trace_options *tr_topts;
9167 struct tracer_flags *flags;
9168 struct tracer_opt *opts;
9175 flags = tracer->flags;
9177 if (!flags || !flags->opts)
9181 * If this is an instance, only create flags for tracers
9182 * the instance may have.
9184 if (!trace_ok_for_array(tracer, tr))
9187 for (i = 0; i < tr->nr_topts; i++) {
9188 /* Make sure there's no duplicate flags. */
9189 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9195 for (cnt = 0; opts[cnt].name; cnt++)
9198 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9202 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9209 tr->topts = tr_topts;
9210 tr->topts[tr->nr_topts].tracer = tracer;
9211 tr->topts[tr->nr_topts].topts = topts;
9214 for (cnt = 0; opts[cnt].name; cnt++) {
9215 create_trace_option_file(tr, &topts[cnt], flags,
9217 MEM_FAIL(topts[cnt].entry == NULL,
9218 "Failed to create trace option: %s",
9223 static struct dentry *
9224 create_trace_option_core_file(struct trace_array *tr,
9225 const char *option, long index)
9227 struct dentry *t_options;
9229 t_options = trace_options_init_dentry(tr);
9233 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9234 (void *)&tr->trace_flags_index[index],
9235 &trace_options_core_fops);
9238 static void create_trace_options_dir(struct trace_array *tr)
9240 struct dentry *t_options;
9241 bool top_level = tr == &global_trace;
9244 t_options = trace_options_init_dentry(tr);
9248 for (i = 0; trace_options[i]; i++) {
9250 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9251 create_trace_option_core_file(tr, trace_options[i], i);
9256 rb_simple_read(struct file *filp, char __user *ubuf,
9257 size_t cnt, loff_t *ppos)
9259 struct trace_array *tr = filp->private_data;
9263 r = tracer_tracing_is_on(tr);
9264 r = sprintf(buf, "%d\n", r);
9266 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9270 rb_simple_write(struct file *filp, const char __user *ubuf,
9271 size_t cnt, loff_t *ppos)
9273 struct trace_array *tr = filp->private_data;
9274 struct trace_buffer *buffer = tr->array_buffer.buffer;
9278 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9283 mutex_lock(&trace_types_lock);
9284 if (!!val == tracer_tracing_is_on(tr)) {
9285 val = 0; /* do nothing */
9287 tracer_tracing_on(tr);
9288 if (tr->current_trace->start)
9289 tr->current_trace->start(tr);
9291 tracer_tracing_off(tr);
9292 if (tr->current_trace->stop)
9293 tr->current_trace->stop(tr);
9294 /* Wake up any waiters */
9295 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9297 mutex_unlock(&trace_types_lock);
9305 static const struct file_operations rb_simple_fops = {
9306 .open = tracing_open_generic_tr,
9307 .read = rb_simple_read,
9308 .write = rb_simple_write,
9309 .release = tracing_release_generic_tr,
9310 .llseek = default_llseek,
9314 buffer_percent_read(struct file *filp, char __user *ubuf,
9315 size_t cnt, loff_t *ppos)
9317 struct trace_array *tr = filp->private_data;
9321 r = tr->buffer_percent;
9322 r = sprintf(buf, "%d\n", r);
9324 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9328 buffer_percent_write(struct file *filp, const char __user *ubuf,
9329 size_t cnt, loff_t *ppos)
9331 struct trace_array *tr = filp->private_data;
9335 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9342 tr->buffer_percent = val;
9349 static const struct file_operations buffer_percent_fops = {
9350 .open = tracing_open_generic_tr,
9351 .read = buffer_percent_read,
9352 .write = buffer_percent_write,
9353 .release = tracing_release_generic_tr,
9354 .llseek = default_llseek,
9357 static struct dentry *trace_instance_dir;
9360 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9363 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9365 enum ring_buffer_flags rb_flags;
9367 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9371 buf->buffer = ring_buffer_alloc(size, rb_flags);
9375 buf->data = alloc_percpu(struct trace_array_cpu);
9377 ring_buffer_free(buf->buffer);
9382 /* Allocate the first page for all buffers */
9383 set_buffer_entries(&tr->array_buffer,
9384 ring_buffer_size(tr->array_buffer.buffer, 0));
9389 static void free_trace_buffer(struct array_buffer *buf)
9392 ring_buffer_free(buf->buffer);
9394 free_percpu(buf->data);
9399 static int allocate_trace_buffers(struct trace_array *tr, int size)
9403 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9407 #ifdef CONFIG_TRACER_MAX_TRACE
9408 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9409 allocate_snapshot ? size : 1);
9410 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9411 free_trace_buffer(&tr->array_buffer);
9414 tr->allocated_snapshot = allocate_snapshot;
9416 allocate_snapshot = false;
9422 static void free_trace_buffers(struct trace_array *tr)
9427 free_trace_buffer(&tr->array_buffer);
9429 #ifdef CONFIG_TRACER_MAX_TRACE
9430 free_trace_buffer(&tr->max_buffer);
9434 static void init_trace_flags_index(struct trace_array *tr)
9438 /* Used by the trace options files */
9439 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9440 tr->trace_flags_index[i] = i;
9443 static void __update_tracer_options(struct trace_array *tr)
9447 for (t = trace_types; t; t = t->next)
9448 add_tracer_options(tr, t);
9451 static void update_tracer_options(struct trace_array *tr)
9453 mutex_lock(&trace_types_lock);
9454 tracer_options_updated = true;
9455 __update_tracer_options(tr);
9456 mutex_unlock(&trace_types_lock);
9459 /* Must have trace_types_lock held */
9460 struct trace_array *trace_array_find(const char *instance)
9462 struct trace_array *tr, *found = NULL;
9464 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9465 if (tr->name && strcmp(tr->name, instance) == 0) {
9474 struct trace_array *trace_array_find_get(const char *instance)
9476 struct trace_array *tr;
9478 mutex_lock(&trace_types_lock);
9479 tr = trace_array_find(instance);
9482 mutex_unlock(&trace_types_lock);
9487 static int trace_array_create_dir(struct trace_array *tr)
9491 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9495 ret = event_trace_add_tracer(tr->dir, tr);
9497 tracefs_remove(tr->dir);
9501 init_tracer_tracefs(tr, tr->dir);
9502 __update_tracer_options(tr);
9507 static struct trace_array *trace_array_create(const char *name)
9509 struct trace_array *tr;
9513 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9515 return ERR_PTR(ret);
9517 tr->name = kstrdup(name, GFP_KERNEL);
9521 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9524 if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
9527 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9529 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9531 raw_spin_lock_init(&tr->start_lock);
9533 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9535 tr->current_trace = &nop_trace;
9537 INIT_LIST_HEAD(&tr->systems);
9538 INIT_LIST_HEAD(&tr->events);
9539 INIT_LIST_HEAD(&tr->hist_vars);
9540 INIT_LIST_HEAD(&tr->err_log);
9542 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9545 /* The ring buffer is defaultly expanded */
9546 trace_set_ring_buffer_expanded(tr);
9548 if (ftrace_allocate_ftrace_ops(tr) < 0)
9551 ftrace_init_trace_array(tr);
9553 init_trace_flags_index(tr);
9555 if (trace_instance_dir) {
9556 ret = trace_array_create_dir(tr);
9560 __trace_early_add_events(tr);
9562 list_add(&tr->list, &ftrace_trace_arrays);
9569 ftrace_free_ftrace_ops(tr);
9570 free_trace_buffers(tr);
9571 free_cpumask_var(tr->pipe_cpumask);
9572 free_cpumask_var(tr->tracing_cpumask);
9576 return ERR_PTR(ret);
9579 static int instance_mkdir(const char *name)
9581 struct trace_array *tr;
9584 mutex_lock(&event_mutex);
9585 mutex_lock(&trace_types_lock);
9588 if (trace_array_find(name))
9591 tr = trace_array_create(name);
9593 ret = PTR_ERR_OR_ZERO(tr);
9596 mutex_unlock(&trace_types_lock);
9597 mutex_unlock(&event_mutex);
9602 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9603 * @name: The name of the trace array to be looked up/created.
9605 * Returns pointer to trace array with given name.
9606 * NULL, if it cannot be created.
9608 * NOTE: This function increments the reference counter associated with the
9609 * trace array returned. This makes sure it cannot be freed while in use.
9610 * Use trace_array_put() once the trace array is no longer needed.
9611 * If the trace_array is to be freed, trace_array_destroy() needs to
9612 * be called after the trace_array_put(), or simply let user space delete
9613 * it from the tracefs instances directory. But until the
9614 * trace_array_put() is called, user space can not delete it.
9617 struct trace_array *trace_array_get_by_name(const char *name)
9619 struct trace_array *tr;
9621 mutex_lock(&event_mutex);
9622 mutex_lock(&trace_types_lock);
9624 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9625 if (tr->name && strcmp(tr->name, name) == 0)
9629 tr = trace_array_create(name);
9637 mutex_unlock(&trace_types_lock);
9638 mutex_unlock(&event_mutex);
9641 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9643 static int __remove_instance(struct trace_array *tr)
9647 /* Reference counter for a newly created trace array = 1. */
9648 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9651 list_del(&tr->list);
9653 /* Disable all the flags that were enabled coming in */
9654 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9655 if ((1 << i) & ZEROED_TRACE_FLAGS)
9656 set_tracer_flag(tr, 1 << i, 0);
9659 tracing_set_nop(tr);
9660 clear_ftrace_function_probes(tr);
9661 event_trace_del_tracer(tr);
9662 ftrace_clear_pids(tr);
9663 ftrace_destroy_function_files(tr);
9664 tracefs_remove(tr->dir);
9665 free_percpu(tr->last_func_repeats);
9666 free_trace_buffers(tr);
9667 clear_tracing_err_log(tr);
9669 for (i = 0; i < tr->nr_topts; i++) {
9670 kfree(tr->topts[i].topts);
9674 free_cpumask_var(tr->pipe_cpumask);
9675 free_cpumask_var(tr->tracing_cpumask);
9682 int trace_array_destroy(struct trace_array *this_tr)
9684 struct trace_array *tr;
9690 mutex_lock(&event_mutex);
9691 mutex_lock(&trace_types_lock);
9695 /* Making sure trace array exists before destroying it. */
9696 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9697 if (tr == this_tr) {
9698 ret = __remove_instance(tr);
9703 mutex_unlock(&trace_types_lock);
9704 mutex_unlock(&event_mutex);
9708 EXPORT_SYMBOL_GPL(trace_array_destroy);
9710 static int instance_rmdir(const char *name)
9712 struct trace_array *tr;
9715 mutex_lock(&event_mutex);
9716 mutex_lock(&trace_types_lock);
9719 tr = trace_array_find(name);
9721 ret = __remove_instance(tr);
9723 mutex_unlock(&trace_types_lock);
9724 mutex_unlock(&event_mutex);
9729 static __init void create_trace_instances(struct dentry *d_tracer)
9731 struct trace_array *tr;
9733 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9736 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9739 mutex_lock(&event_mutex);
9740 mutex_lock(&trace_types_lock);
9742 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9745 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9746 "Failed to create instance directory\n"))
9750 mutex_unlock(&trace_types_lock);
9751 mutex_unlock(&event_mutex);
9755 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9759 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9760 tr, &show_traces_fops);
9762 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9763 tr, &set_tracer_fops);
9765 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9766 tr, &tracing_cpumask_fops);
9768 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9769 tr, &tracing_iter_fops);
9771 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9774 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9775 tr, &tracing_pipe_fops);
9777 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9778 tr, &tracing_entries_fops);
9780 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9781 tr, &tracing_total_entries_fops);
9783 trace_create_file("free_buffer", 0200, d_tracer,
9784 tr, &tracing_free_buffer_fops);
9786 trace_create_file("trace_marker", 0220, d_tracer,
9787 tr, &tracing_mark_fops);
9789 tr->trace_marker_file = __find_event_file(tr, "ftrace", "print");
9791 trace_create_file("trace_marker_raw", 0220, d_tracer,
9792 tr, &tracing_mark_raw_fops);
9794 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9797 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9798 tr, &rb_simple_fops);
9800 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9801 &trace_time_stamp_mode_fops);
9803 tr->buffer_percent = 50;
9805 trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer,
9806 tr, &buffer_percent_fops);
9808 create_trace_options_dir(tr);
9810 #ifdef CONFIG_TRACER_MAX_TRACE
9811 trace_create_maxlat_file(tr, d_tracer);
9814 if (ftrace_create_function_files(tr, d_tracer))
9815 MEM_FAIL(1, "Could not allocate function filter files");
9817 #ifdef CONFIG_TRACER_SNAPSHOT
9818 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9819 tr, &snapshot_fops);
9822 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9823 tr, &tracing_err_log_fops);
9825 for_each_tracing_cpu(cpu)
9826 tracing_init_tracefs_percpu(tr, cpu);
9828 ftrace_init_tracefs(tr, d_tracer);
9831 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9833 struct vfsmount *mnt;
9834 struct file_system_type *type;
9837 * To maintain backward compatibility for tools that mount
9838 * debugfs to get to the tracing facility, tracefs is automatically
9839 * mounted to the debugfs/tracing directory.
9841 type = get_fs_type("tracefs");
9844 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9845 put_filesystem(type);
9854 * tracing_init_dentry - initialize top level trace array
9856 * This is called when creating files or directories in the tracing
9857 * directory. It is called via fs_initcall() by any of the boot up code
9858 * and expects to return the dentry of the top level tracing directory.
9860 int tracing_init_dentry(void)
9862 struct trace_array *tr = &global_trace;
9864 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9865 pr_warn("Tracing disabled due to lockdown\n");
9869 /* The top level trace array uses NULL as parent */
9873 if (WARN_ON(!tracefs_initialized()))
9877 * As there may still be users that expect the tracing
9878 * files to exist in debugfs/tracing, we must automount
9879 * the tracefs file system there, so older tools still
9880 * work with the newer kernel.
9882 tr->dir = debugfs_create_automount("tracing", NULL,
9883 trace_automount, NULL);
9888 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9889 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9891 static struct workqueue_struct *eval_map_wq __initdata;
9892 static struct work_struct eval_map_work __initdata;
9893 static struct work_struct tracerfs_init_work __initdata;
9895 static void __init eval_map_work_func(struct work_struct *work)
9899 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9900 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9903 static int __init trace_eval_init(void)
9905 INIT_WORK(&eval_map_work, eval_map_work_func);
9907 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9909 pr_err("Unable to allocate eval_map_wq\n");
9911 eval_map_work_func(&eval_map_work);
9915 queue_work(eval_map_wq, &eval_map_work);
9919 subsys_initcall(trace_eval_init);
9921 static int __init trace_eval_sync(void)
9923 /* Make sure the eval map updates are finished */
9925 destroy_workqueue(eval_map_wq);
9929 late_initcall_sync(trace_eval_sync);
9932 #ifdef CONFIG_MODULES
9933 static void trace_module_add_evals(struct module *mod)
9935 if (!mod->num_trace_evals)
9939 * Modules with bad taint do not have events created, do
9940 * not bother with enums either.
9942 if (trace_module_has_bad_taint(mod))
9945 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9948 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9949 static void trace_module_remove_evals(struct module *mod)
9951 union trace_eval_map_item *map;
9952 union trace_eval_map_item **last = &trace_eval_maps;
9954 if (!mod->num_trace_evals)
9957 mutex_lock(&trace_eval_mutex);
9959 map = trace_eval_maps;
9962 if (map->head.mod == mod)
9964 map = trace_eval_jmp_to_tail(map);
9965 last = &map->tail.next;
9966 map = map->tail.next;
9971 *last = trace_eval_jmp_to_tail(map)->tail.next;
9974 mutex_unlock(&trace_eval_mutex);
9977 static inline void trace_module_remove_evals(struct module *mod) { }
9978 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9980 static int trace_module_notify(struct notifier_block *self,
9981 unsigned long val, void *data)
9983 struct module *mod = data;
9986 case MODULE_STATE_COMING:
9987 trace_module_add_evals(mod);
9989 case MODULE_STATE_GOING:
9990 trace_module_remove_evals(mod);
9997 static struct notifier_block trace_module_nb = {
9998 .notifier_call = trace_module_notify,
10001 #endif /* CONFIG_MODULES */
10003 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
10006 event_trace_init();
10008 init_tracer_tracefs(&global_trace, NULL);
10009 ftrace_init_tracefs_toplevel(&global_trace, NULL);
10011 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
10012 &global_trace, &tracing_thresh_fops);
10014 trace_create_file("README", TRACE_MODE_READ, NULL,
10015 NULL, &tracing_readme_fops);
10017 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
10018 NULL, &tracing_saved_cmdlines_fops);
10020 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
10021 NULL, &tracing_saved_cmdlines_size_fops);
10023 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
10024 NULL, &tracing_saved_tgids_fops);
10026 trace_create_eval_file(NULL);
10028 #ifdef CONFIG_MODULES
10029 register_module_notifier(&trace_module_nb);
10032 #ifdef CONFIG_DYNAMIC_FTRACE
10033 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
10034 NULL, &tracing_dyn_info_fops);
10037 create_trace_instances(NULL);
10039 update_tracer_options(&global_trace);
10042 static __init int tracer_init_tracefs(void)
10046 trace_access_lock_init();
10048 ret = tracing_init_dentry();
10053 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
10054 queue_work(eval_map_wq, &tracerfs_init_work);
10056 tracer_init_tracefs_work_func(NULL);
10059 rv_init_interface();
10064 fs_initcall(tracer_init_tracefs);
10066 static int trace_die_panic_handler(struct notifier_block *self,
10067 unsigned long ev, void *unused);
10069 static struct notifier_block trace_panic_notifier = {
10070 .notifier_call = trace_die_panic_handler,
10071 .priority = INT_MAX - 1,
10074 static struct notifier_block trace_die_notifier = {
10075 .notifier_call = trace_die_panic_handler,
10076 .priority = INT_MAX - 1,
10080 * The idea is to execute the following die/panic callback early, in order
10081 * to avoid showing irrelevant information in the trace (like other panic
10082 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
10083 * warnings get disabled (to prevent potential log flooding).
10085 static int trace_die_panic_handler(struct notifier_block *self,
10086 unsigned long ev, void *unused)
10088 if (!ftrace_dump_on_oops)
10089 return NOTIFY_DONE;
10091 /* The die notifier requires DIE_OOPS to trigger */
10092 if (self == &trace_die_notifier && ev != DIE_OOPS)
10093 return NOTIFY_DONE;
10095 ftrace_dump(ftrace_dump_on_oops);
10097 return NOTIFY_DONE;
10101 * printk is set to max of 1024, we really don't need it that big.
10102 * Nothing should be printing 1000 characters anyway.
10104 #define TRACE_MAX_PRINT 1000
10107 * Define here KERN_TRACE so that we have one place to modify
10108 * it if we decide to change what log level the ftrace dump
10111 #define KERN_TRACE KERN_EMERG
10114 trace_printk_seq(struct trace_seq *s)
10116 /* Probably should print a warning here. */
10117 if (s->seq.len >= TRACE_MAX_PRINT)
10118 s->seq.len = TRACE_MAX_PRINT;
10121 * More paranoid code. Although the buffer size is set to
10122 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
10123 * an extra layer of protection.
10125 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
10126 s->seq.len = s->seq.size - 1;
10128 /* should be zero ended, but we are paranoid. */
10129 s->buffer[s->seq.len] = 0;
10131 printk(KERN_TRACE "%s", s->buffer);
10136 void trace_init_global_iter(struct trace_iterator *iter)
10138 iter->tr = &global_trace;
10139 iter->trace = iter->tr->current_trace;
10140 iter->cpu_file = RING_BUFFER_ALL_CPUS;
10141 iter->array_buffer = &global_trace.array_buffer;
10143 if (iter->trace && iter->trace->open)
10144 iter->trace->open(iter);
10146 /* Annotate start of buffers if we had overruns */
10147 if (ring_buffer_overruns(iter->array_buffer->buffer))
10148 iter->iter_flags |= TRACE_FILE_ANNOTATE;
10150 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
10151 if (trace_clocks[iter->tr->clock_id].in_ns)
10152 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
10154 /* Can not use kmalloc for iter.temp and iter.fmt */
10155 iter->temp = static_temp_buf;
10156 iter->temp_size = STATIC_TEMP_BUF_SIZE;
10157 iter->fmt = static_fmt_buf;
10158 iter->fmt_size = STATIC_FMT_BUF_SIZE;
10161 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
10163 /* use static because iter can be a bit big for the stack */
10164 static struct trace_iterator iter;
10165 static atomic_t dump_running;
10166 struct trace_array *tr = &global_trace;
10167 unsigned int old_userobj;
10168 unsigned long flags;
10171 /* Only allow one dump user at a time. */
10172 if (atomic_inc_return(&dump_running) != 1) {
10173 atomic_dec(&dump_running);
10178 * Always turn off tracing when we dump.
10179 * We don't need to show trace output of what happens
10180 * between multiple crashes.
10182 * If the user does a sysrq-z, then they can re-enable
10183 * tracing with echo 1 > tracing_on.
10187 local_irq_save(flags);
10189 /* Simulate the iterator */
10190 trace_init_global_iter(&iter);
10192 for_each_tracing_cpu(cpu) {
10193 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10196 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10198 /* don't look at user memory in panic mode */
10199 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10201 switch (oops_dump_mode) {
10203 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10206 iter.cpu_file = raw_smp_processor_id();
10211 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10212 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10215 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10217 /* Did function tracer already get disabled? */
10218 if (ftrace_is_dead()) {
10219 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10220 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10224 * We need to stop all tracing on all CPUS to read
10225 * the next buffer. This is a bit expensive, but is
10226 * not done often. We fill all what we can read,
10227 * and then release the locks again.
10230 while (!trace_empty(&iter)) {
10233 printk(KERN_TRACE "---------------------------------\n");
10237 trace_iterator_reset(&iter);
10238 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10240 if (trace_find_next_entry_inc(&iter) != NULL) {
10243 ret = print_trace_line(&iter);
10244 if (ret != TRACE_TYPE_NO_CONSUME)
10245 trace_consume(&iter);
10247 touch_nmi_watchdog();
10249 trace_printk_seq(&iter.seq);
10253 printk(KERN_TRACE " (ftrace buffer empty)\n");
10255 printk(KERN_TRACE "---------------------------------\n");
10258 tr->trace_flags |= old_userobj;
10260 for_each_tracing_cpu(cpu) {
10261 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10263 atomic_dec(&dump_running);
10264 local_irq_restore(flags);
10266 EXPORT_SYMBOL_GPL(ftrace_dump);
10268 #define WRITE_BUFSIZE 4096
10270 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10271 size_t count, loff_t *ppos,
10272 int (*createfn)(const char *))
10274 char *kbuf, *buf, *tmp;
10279 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10283 while (done < count) {
10284 size = count - done;
10286 if (size >= WRITE_BUFSIZE)
10287 size = WRITE_BUFSIZE - 1;
10289 if (copy_from_user(kbuf, buffer + done, size)) {
10296 tmp = strchr(buf, '\n');
10299 size = tmp - buf + 1;
10301 size = strlen(buf);
10302 if (done + size < count) {
10305 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10306 pr_warn("Line length is too long: Should be less than %d\n",
10307 WRITE_BUFSIZE - 2);
10314 /* Remove comments */
10315 tmp = strchr(buf, '#');
10320 ret = createfn(buf);
10325 } while (done < count);
10335 #ifdef CONFIG_TRACER_MAX_TRACE
10336 __init static bool tr_needs_alloc_snapshot(const char *name)
10339 int len = strlen(name);
10342 if (!boot_snapshot_index)
10345 if (strncmp(name, boot_snapshot_info, len) == 0 &&
10346 boot_snapshot_info[len] == '\t')
10349 test = kmalloc(strlen(name) + 3, GFP_KERNEL);
10353 sprintf(test, "\t%s\t", name);
10354 ret = strstr(boot_snapshot_info, test) == NULL;
10359 __init static void do_allocate_snapshot(const char *name)
10361 if (!tr_needs_alloc_snapshot(name))
10365 * When allocate_snapshot is set, the next call to
10366 * allocate_trace_buffers() (called by trace_array_get_by_name())
10367 * will allocate the snapshot buffer. That will alse clear
10370 allocate_snapshot = true;
10373 static inline void do_allocate_snapshot(const char *name) { }
10376 __init static void enable_instances(void)
10378 struct trace_array *tr;
10383 /* A tab is always appended */
10384 boot_instance_info[boot_instance_index - 1] = '\0';
10385 str = boot_instance_info;
10387 while ((curr_str = strsep(&str, "\t"))) {
10389 tok = strsep(&curr_str, ",");
10391 if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE))
10392 do_allocate_snapshot(tok);
10394 tr = trace_array_get_by_name(tok);
10396 pr_warn("Failed to create instance buffer %s\n", curr_str);
10399 /* Allow user space to delete it */
10400 trace_array_put(tr);
10402 while ((tok = strsep(&curr_str, ","))) {
10403 early_enable_events(tr, tok, true);
10408 __init static int tracer_alloc_buffers(void)
10414 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10415 pr_warn("Tracing disabled due to lockdown\n");
10420 * Make sure we don't accidentally add more trace options
10421 * than we have bits for.
10423 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10425 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10428 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10429 goto out_free_buffer_mask;
10431 /* Only allocate trace_printk buffers if a trace_printk exists */
10432 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10433 /* Must be called before global_trace.buffer is allocated */
10434 trace_printk_init_buffers();
10436 /* To save memory, keep the ring buffer size to its minimum */
10437 if (global_trace.ring_buffer_expanded)
10438 ring_buf_size = trace_buf_size;
10442 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10443 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10445 raw_spin_lock_init(&global_trace.start_lock);
10448 * The prepare callbacks allocates some memory for the ring buffer. We
10449 * don't free the buffer if the CPU goes down. If we were to free
10450 * the buffer, then the user would lose any trace that was in the
10451 * buffer. The memory will be removed once the "instance" is removed.
10453 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10454 "trace/RB:prepare", trace_rb_cpu_prepare,
10457 goto out_free_cpumask;
10458 /* Used for event triggers */
10460 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10462 goto out_rm_hp_state;
10464 if (trace_create_savedcmd() < 0)
10465 goto out_free_temp_buffer;
10467 if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
10468 goto out_free_savedcmd;
10470 /* TODO: make the number of buffers hot pluggable with CPUS */
10471 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10472 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10473 goto out_free_pipe_cpumask;
10475 if (global_trace.buffer_disabled)
10478 if (trace_boot_clock) {
10479 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10481 pr_warn("Trace clock %s not defined, going back to default\n",
10486 * register_tracer() might reference current_trace, so it
10487 * needs to be set before we register anything. This is
10488 * just a bootstrap of current_trace anyway.
10490 global_trace.current_trace = &nop_trace;
10492 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10494 ftrace_init_global_array_ops(&global_trace);
10496 init_trace_flags_index(&global_trace);
10498 register_tracer(&nop_trace);
10500 /* Function tracing may start here (via kernel command line) */
10501 init_function_trace();
10503 /* All seems OK, enable tracing */
10504 tracing_disabled = 0;
10506 atomic_notifier_chain_register(&panic_notifier_list,
10507 &trace_panic_notifier);
10509 register_die_notifier(&trace_die_notifier);
10511 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10513 INIT_LIST_HEAD(&global_trace.systems);
10514 INIT_LIST_HEAD(&global_trace.events);
10515 INIT_LIST_HEAD(&global_trace.hist_vars);
10516 INIT_LIST_HEAD(&global_trace.err_log);
10517 list_add(&global_trace.list, &ftrace_trace_arrays);
10519 apply_trace_boot_options();
10521 register_snapshot_cmd();
10527 out_free_pipe_cpumask:
10528 free_cpumask_var(global_trace.pipe_cpumask);
10530 free_saved_cmdlines_buffer(savedcmd);
10531 out_free_temp_buffer:
10532 ring_buffer_free(temp_buffer);
10534 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10536 free_cpumask_var(global_trace.tracing_cpumask);
10537 out_free_buffer_mask:
10538 free_cpumask_var(tracing_buffer_mask);
10543 void __init ftrace_boot_snapshot(void)
10545 #ifdef CONFIG_TRACER_MAX_TRACE
10546 struct trace_array *tr;
10548 if (!snapshot_at_boot)
10551 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
10552 if (!tr->allocated_snapshot)
10555 tracing_snapshot_instance(tr);
10556 trace_array_puts(tr, "** Boot snapshot taken **\n");
10561 void __init early_trace_init(void)
10563 if (tracepoint_printk) {
10564 tracepoint_print_iter =
10565 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10566 if (MEM_FAIL(!tracepoint_print_iter,
10567 "Failed to allocate trace iterator\n"))
10568 tracepoint_printk = 0;
10570 static_key_enable(&tracepoint_printk_key.key);
10572 tracer_alloc_buffers();
10577 void __init trace_init(void)
10579 trace_event_init();
10581 if (boot_instance_index)
10582 enable_instances();
10585 __init static void clear_boot_tracer(void)
10588 * The default tracer at boot buffer is an init section.
10589 * This function is called in lateinit. If we did not
10590 * find the boot tracer, then clear it out, to prevent
10591 * later registration from accessing the buffer that is
10592 * about to be freed.
10594 if (!default_bootup_tracer)
10597 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10598 default_bootup_tracer);
10599 default_bootup_tracer = NULL;
10602 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10603 __init static void tracing_set_default_clock(void)
10605 /* sched_clock_stable() is determined in late_initcall */
10606 if (!trace_boot_clock && !sched_clock_stable()) {
10607 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10608 pr_warn("Can not set tracing clock due to lockdown\n");
10612 printk(KERN_WARNING
10613 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10614 "If you want to keep using the local clock, then add:\n"
10615 " \"trace_clock=local\"\n"
10616 "on the kernel command line\n");
10617 tracing_set_clock(&global_trace, "global");
10621 static inline void tracing_set_default_clock(void) { }
10624 __init static int late_trace_init(void)
10626 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10627 static_key_disable(&tracepoint_printk_key.key);
10628 tracepoint_printk = 0;
10631 tracing_set_default_clock();
10632 clear_boot_tracer();
10636 late_initcall_sync(late_trace_init);