1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/panic_notifier.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If boot-time tracing including tracers/events via kernel cmdline
72 * is running, we do not want to run SELFTEST.
74 bool __read_mostly tracing_selftest_disabled;
76 #ifdef CONFIG_FTRACE_STARTUP_TEST
77 void __init disable_tracing_selftest(const char *reason)
79 if (!tracing_selftest_disabled) {
80 tracing_selftest_disabled = true;
81 pr_info("Ftrace startup test is disabled due to %s\n", reason);
86 /* Pipe tracepoints to printk */
87 static struct trace_iterator *tracepoint_print_iter;
88 int tracepoint_printk;
89 static bool tracepoint_printk_stop_on_boot __initdata;
90 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
92 /* For tracers that don't implement custom flags */
93 static struct tracer_opt dummy_tracer_opt[] = {
98 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
104 * To prevent the comm cache from being overwritten when no
105 * tracing is active, only save the comm when a trace event
108 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
111 * Kill all tracing for good (never come back).
112 * It is initialized to 1 but will turn to zero if the initialization
113 * of the tracer is successful. But that is the only place that sets
116 static int tracing_disabled = 1;
118 cpumask_var_t __read_mostly tracing_buffer_mask;
121 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
123 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
124 * is set, then ftrace_dump is called. This will output the contents
125 * of the ftrace buffers to the console. This is very useful for
126 * capturing traces that lead to crashes and outputing it to a
129 * It is default off, but you can enable it with either specifying
130 * "ftrace_dump_on_oops" in the kernel command line, or setting
131 * /proc/sys/kernel/ftrace_dump_on_oops
132 * Set 1 if you want to dump buffers of all CPUs
133 * Set 2 if you want to dump the buffer of the CPU that triggered oops
136 enum ftrace_dump_mode ftrace_dump_on_oops;
138 /* When set, tracing will stop when a WARN*() is hit */
139 int __disable_trace_on_warning;
141 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
142 /* Map of enums to their values, for "eval_map" file */
143 struct trace_eval_map_head {
145 unsigned long length;
148 union trace_eval_map_item;
150 struct trace_eval_map_tail {
152 * "end" is first and points to NULL as it must be different
153 * than "mod" or "eval_string"
155 union trace_eval_map_item *next;
156 const char *end; /* points to NULL */
159 static DEFINE_MUTEX(trace_eval_mutex);
162 * The trace_eval_maps are saved in an array with two extra elements,
163 * one at the beginning, and one at the end. The beginning item contains
164 * the count of the saved maps (head.length), and the module they
165 * belong to if not built in (head.mod). The ending item contains a
166 * pointer to the next array of saved eval_map items.
168 union trace_eval_map_item {
169 struct trace_eval_map map;
170 struct trace_eval_map_head head;
171 struct trace_eval_map_tail tail;
174 static union trace_eval_map_item *trace_eval_maps;
175 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
177 int tracing_set_tracer(struct trace_array *tr, const char *buf);
178 static void ftrace_trace_userstack(struct trace_array *tr,
179 struct trace_buffer *buffer,
180 unsigned int trace_ctx);
182 #define MAX_TRACER_SIZE 100
183 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
184 static char *default_bootup_tracer;
186 static bool allocate_snapshot;
187 static bool snapshot_at_boot;
189 static int __init set_cmdline_ftrace(char *str)
191 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
192 default_bootup_tracer = bootup_tracer_buf;
193 /* We are using ftrace early, expand it */
194 ring_buffer_expanded = true;
197 __setup("ftrace=", set_cmdline_ftrace);
199 static int __init set_ftrace_dump_on_oops(char *str)
201 if (*str++ != '=' || !*str || !strcmp("1", str)) {
202 ftrace_dump_on_oops = DUMP_ALL;
206 if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
207 ftrace_dump_on_oops = DUMP_ORIG;
213 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
215 static int __init stop_trace_on_warning(char *str)
217 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
218 __disable_trace_on_warning = 1;
221 __setup("traceoff_on_warning", stop_trace_on_warning);
223 static int __init boot_alloc_snapshot(char *str)
225 allocate_snapshot = true;
226 /* We also need the main ring buffer expanded */
227 ring_buffer_expanded = true;
230 __setup("alloc_snapshot", boot_alloc_snapshot);
233 static int __init boot_snapshot(char *str)
235 snapshot_at_boot = true;
236 boot_alloc_snapshot(str);
239 __setup("ftrace_boot_snapshot", boot_snapshot);
242 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
244 static int __init set_trace_boot_options(char *str)
246 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
249 __setup("trace_options=", set_trace_boot_options);
251 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
252 static char *trace_boot_clock __initdata;
254 static int __init set_trace_boot_clock(char *str)
256 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
257 trace_boot_clock = trace_boot_clock_buf;
260 __setup("trace_clock=", set_trace_boot_clock);
262 static int __init set_tracepoint_printk(char *str)
264 /* Ignore the "tp_printk_stop_on_boot" param */
268 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
269 tracepoint_printk = 1;
272 __setup("tp_printk", set_tracepoint_printk);
274 static int __init set_tracepoint_printk_stop(char *str)
276 tracepoint_printk_stop_on_boot = true;
279 __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
281 unsigned long long ns2usecs(u64 nsec)
289 trace_process_export(struct trace_export *export,
290 struct ring_buffer_event *event, int flag)
292 struct trace_entry *entry;
293 unsigned int size = 0;
295 if (export->flags & flag) {
296 entry = ring_buffer_event_data(event);
297 size = ring_buffer_event_length(event);
298 export->write(export, entry, size);
302 static DEFINE_MUTEX(ftrace_export_lock);
304 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
306 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
307 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
308 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
310 static inline void ftrace_exports_enable(struct trace_export *export)
312 if (export->flags & TRACE_EXPORT_FUNCTION)
313 static_branch_inc(&trace_function_exports_enabled);
315 if (export->flags & TRACE_EXPORT_EVENT)
316 static_branch_inc(&trace_event_exports_enabled);
318 if (export->flags & TRACE_EXPORT_MARKER)
319 static_branch_inc(&trace_marker_exports_enabled);
322 static inline void ftrace_exports_disable(struct trace_export *export)
324 if (export->flags & TRACE_EXPORT_FUNCTION)
325 static_branch_dec(&trace_function_exports_enabled);
327 if (export->flags & TRACE_EXPORT_EVENT)
328 static_branch_dec(&trace_event_exports_enabled);
330 if (export->flags & TRACE_EXPORT_MARKER)
331 static_branch_dec(&trace_marker_exports_enabled);
334 static void ftrace_exports(struct ring_buffer_event *event, int flag)
336 struct trace_export *export;
338 preempt_disable_notrace();
340 export = rcu_dereference_raw_check(ftrace_exports_list);
342 trace_process_export(export, event, flag);
343 export = rcu_dereference_raw_check(export->next);
346 preempt_enable_notrace();
350 add_trace_export(struct trace_export **list, struct trace_export *export)
352 rcu_assign_pointer(export->next, *list);
354 * We are entering export into the list but another
355 * CPU might be walking that list. We need to make sure
356 * the export->next pointer is valid before another CPU sees
357 * the export pointer included into the list.
359 rcu_assign_pointer(*list, export);
363 rm_trace_export(struct trace_export **list, struct trace_export *export)
365 struct trace_export **p;
367 for (p = list; *p != NULL; p = &(*p)->next)
374 rcu_assign_pointer(*p, (*p)->next);
380 add_ftrace_export(struct trace_export **list, struct trace_export *export)
382 ftrace_exports_enable(export);
384 add_trace_export(list, export);
388 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
392 ret = rm_trace_export(list, export);
393 ftrace_exports_disable(export);
398 int register_ftrace_export(struct trace_export *export)
400 if (WARN_ON_ONCE(!export->write))
403 mutex_lock(&ftrace_export_lock);
405 add_ftrace_export(&ftrace_exports_list, export);
407 mutex_unlock(&ftrace_export_lock);
411 EXPORT_SYMBOL_GPL(register_ftrace_export);
413 int unregister_ftrace_export(struct trace_export *export)
417 mutex_lock(&ftrace_export_lock);
419 ret = rm_ftrace_export(&ftrace_exports_list, export);
421 mutex_unlock(&ftrace_export_lock);
425 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
427 /* trace_flags holds trace_options default values */
428 #define TRACE_DEFAULT_FLAGS \
429 (FUNCTION_DEFAULT_FLAGS | \
430 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
431 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
432 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
433 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \
436 /* trace_options that are only supported by global_trace */
437 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
438 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
440 /* trace_flags that are default zero for instances */
441 #define ZEROED_TRACE_FLAGS \
442 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
445 * The global_trace is the descriptor that holds the top-level tracing
446 * buffers for the live tracing.
448 static struct trace_array global_trace = {
449 .trace_flags = TRACE_DEFAULT_FLAGS,
452 LIST_HEAD(ftrace_trace_arrays);
454 int trace_array_get(struct trace_array *this_tr)
456 struct trace_array *tr;
459 mutex_lock(&trace_types_lock);
460 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
467 mutex_unlock(&trace_types_lock);
472 static void __trace_array_put(struct trace_array *this_tr)
474 WARN_ON(!this_tr->ref);
479 * trace_array_put - Decrement the reference counter for this trace array.
480 * @this_tr : pointer to the trace array
482 * NOTE: Use this when we no longer need the trace array returned by
483 * trace_array_get_by_name(). This ensures the trace array can be later
487 void trace_array_put(struct trace_array *this_tr)
492 mutex_lock(&trace_types_lock);
493 __trace_array_put(this_tr);
494 mutex_unlock(&trace_types_lock);
496 EXPORT_SYMBOL_GPL(trace_array_put);
498 int tracing_check_open_get_tr(struct trace_array *tr)
502 ret = security_locked_down(LOCKDOWN_TRACEFS);
506 if (tracing_disabled)
509 if (tr && trace_array_get(tr) < 0)
515 int call_filter_check_discard(struct trace_event_call *call, void *rec,
516 struct trace_buffer *buffer,
517 struct ring_buffer_event *event)
519 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
520 !filter_match_preds(call->filter, rec)) {
521 __trace_event_discard_commit(buffer, event);
529 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
530 * @filtered_pids: The list of pids to check
531 * @search_pid: The PID to find in @filtered_pids
533 * Returns true if @search_pid is found in @filtered_pids, and false otherwise.
536 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
538 return trace_pid_list_is_set(filtered_pids, search_pid);
542 * trace_ignore_this_task - should a task be ignored for tracing
543 * @filtered_pids: The list of pids to check
544 * @filtered_no_pids: The list of pids not to be traced
545 * @task: The task that should be ignored if not filtered
547 * Checks if @task should be traced or not from @filtered_pids.
548 * Returns true if @task should *NOT* be traced.
549 * Returns false if @task should be traced.
552 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
553 struct trace_pid_list *filtered_no_pids,
554 struct task_struct *task)
557 * If filtered_no_pids is not empty, and the task's pid is listed
558 * in filtered_no_pids, then return true.
559 * Otherwise, if filtered_pids is empty, that means we can
560 * trace all tasks. If it has content, then only trace pids
561 * within filtered_pids.
564 return (filtered_pids &&
565 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
567 trace_find_filtered_pid(filtered_no_pids, task->pid));
571 * trace_filter_add_remove_task - Add or remove a task from a pid_list
572 * @pid_list: The list to modify
573 * @self: The current task for fork or NULL for exit
574 * @task: The task to add or remove
576 * If adding a task, if @self is defined, the task is only added if @self
577 * is also included in @pid_list. This happens on fork and tasks should
578 * only be added when the parent is listed. If @self is NULL, then the
579 * @task pid will be removed from the list, which would happen on exit
582 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
583 struct task_struct *self,
584 struct task_struct *task)
589 /* For forks, we only add if the forking task is listed */
591 if (!trace_find_filtered_pid(pid_list, self->pid))
595 /* "self" is set for forks, and NULL for exits */
597 trace_pid_list_set(pid_list, task->pid);
599 trace_pid_list_clear(pid_list, task->pid);
603 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
604 * @pid_list: The pid list to show
605 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
606 * @pos: The position of the file
608 * This is used by the seq_file "next" operation to iterate the pids
609 * listed in a trace_pid_list structure.
611 * Returns the pid+1 as we want to display pid of zero, but NULL would
612 * stop the iteration.
614 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
616 long pid = (unsigned long)v;
621 /* pid already is +1 of the actual previous bit */
622 if (trace_pid_list_next(pid_list, pid, &next) < 0)
627 /* Return pid + 1 to allow zero to be represented */
628 return (void *)(pid + 1);
632 * trace_pid_start - Used for seq_file to start reading pid lists
633 * @pid_list: The pid list to show
634 * @pos: The position of the file
636 * This is used by seq_file "start" operation to start the iteration
639 * Returns the pid+1 as we want to display pid of zero, but NULL would
640 * stop the iteration.
642 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
648 if (trace_pid_list_first(pid_list, &first) < 0)
653 /* Return pid + 1 so that zero can be the exit value */
654 for (pid++; pid && l < *pos;
655 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
661 * trace_pid_show - show the current pid in seq_file processing
662 * @m: The seq_file structure to write into
663 * @v: A void pointer of the pid (+1) value to display
665 * Can be directly used by seq_file operations to display the current
668 int trace_pid_show(struct seq_file *m, void *v)
670 unsigned long pid = (unsigned long)v - 1;
672 seq_printf(m, "%lu\n", pid);
676 /* 128 should be much more than enough */
677 #define PID_BUF_SIZE 127
679 int trace_pid_write(struct trace_pid_list *filtered_pids,
680 struct trace_pid_list **new_pid_list,
681 const char __user *ubuf, size_t cnt)
683 struct trace_pid_list *pid_list;
684 struct trace_parser parser;
692 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
696 * Always recreate a new array. The write is an all or nothing
697 * operation. Always create a new array when adding new pids by
698 * the user. If the operation fails, then the current list is
701 pid_list = trace_pid_list_alloc();
703 trace_parser_put(&parser);
708 /* copy the current bits to the new max */
709 ret = trace_pid_list_first(filtered_pids, &pid);
711 trace_pid_list_set(pid_list, pid);
712 ret = trace_pid_list_next(filtered_pids, pid + 1, &pid);
722 ret = trace_get_user(&parser, ubuf, cnt, &pos);
730 if (!trace_parser_loaded(&parser))
734 if (kstrtoul(parser.buffer, 0, &val))
739 if (trace_pid_list_set(pid_list, pid) < 0) {
745 trace_parser_clear(&parser);
748 trace_parser_put(&parser);
751 trace_pid_list_free(pid_list);
756 /* Cleared the list of pids */
757 trace_pid_list_free(pid_list);
761 *new_pid_list = pid_list;
766 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
770 /* Early boot up does not have a buffer yet */
772 return trace_clock_local();
774 ts = ring_buffer_time_stamp(buf->buffer);
775 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
780 u64 ftrace_now(int cpu)
782 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
786 * tracing_is_enabled - Show if global_trace has been enabled
788 * Shows if the global trace has been enabled or not. It uses the
789 * mirror flag "buffer_disabled" to be used in fast paths such as for
790 * the irqsoff tracer. But it may be inaccurate due to races. If you
791 * need to know the accurate state, use tracing_is_on() which is a little
792 * slower, but accurate.
794 int tracing_is_enabled(void)
797 * For quick access (irqsoff uses this in fast path), just
798 * return the mirror variable of the state of the ring buffer.
799 * It's a little racy, but we don't really care.
802 return !global_trace.buffer_disabled;
806 * trace_buf_size is the size in bytes that is allocated
807 * for a buffer. Note, the number of bytes is always rounded
810 * This number is purposely set to a low number of 16384.
811 * If the dump on oops happens, it will be much appreciated
812 * to not have to wait for all that output. Anyway this can be
813 * boot time and run time configurable.
815 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
817 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
819 /* trace_types holds a link list of available tracers. */
820 static struct tracer *trace_types __read_mostly;
823 * trace_types_lock is used to protect the trace_types list.
825 DEFINE_MUTEX(trace_types_lock);
828 * serialize the access of the ring buffer
830 * ring buffer serializes readers, but it is low level protection.
831 * The validity of the events (which returns by ring_buffer_peek() ..etc)
832 * are not protected by ring buffer.
834 * The content of events may become garbage if we allow other process consumes
835 * these events concurrently:
836 * A) the page of the consumed events may become a normal page
837 * (not reader page) in ring buffer, and this page will be rewritten
838 * by events producer.
839 * B) The page of the consumed events may become a page for splice_read,
840 * and this page will be returned to system.
842 * These primitives allow multi process access to different cpu ring buffer
845 * These primitives don't distinguish read-only and read-consume access.
846 * Multi read-only access are also serialized.
850 static DECLARE_RWSEM(all_cpu_access_lock);
851 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
853 static inline void trace_access_lock(int cpu)
855 if (cpu == RING_BUFFER_ALL_CPUS) {
856 /* gain it for accessing the whole ring buffer. */
857 down_write(&all_cpu_access_lock);
859 /* gain it for accessing a cpu ring buffer. */
861 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
862 down_read(&all_cpu_access_lock);
864 /* Secondly block other access to this @cpu ring buffer. */
865 mutex_lock(&per_cpu(cpu_access_lock, cpu));
869 static inline void trace_access_unlock(int cpu)
871 if (cpu == RING_BUFFER_ALL_CPUS) {
872 up_write(&all_cpu_access_lock);
874 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
875 up_read(&all_cpu_access_lock);
879 static inline void trace_access_lock_init(void)
883 for_each_possible_cpu(cpu)
884 mutex_init(&per_cpu(cpu_access_lock, cpu));
889 static DEFINE_MUTEX(access_lock);
891 static inline void trace_access_lock(int cpu)
894 mutex_lock(&access_lock);
897 static inline void trace_access_unlock(int cpu)
900 mutex_unlock(&access_lock);
903 static inline void trace_access_lock_init(void)
909 #ifdef CONFIG_STACKTRACE
910 static void __ftrace_trace_stack(struct trace_buffer *buffer,
911 unsigned int trace_ctx,
912 int skip, struct pt_regs *regs);
913 static inline void ftrace_trace_stack(struct trace_array *tr,
914 struct trace_buffer *buffer,
915 unsigned int trace_ctx,
916 int skip, struct pt_regs *regs);
919 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
920 unsigned int trace_ctx,
921 int skip, struct pt_regs *regs)
924 static inline void ftrace_trace_stack(struct trace_array *tr,
925 struct trace_buffer *buffer,
926 unsigned long trace_ctx,
927 int skip, struct pt_regs *regs)
933 static __always_inline void
934 trace_event_setup(struct ring_buffer_event *event,
935 int type, unsigned int trace_ctx)
937 struct trace_entry *ent = ring_buffer_event_data(event);
939 tracing_generic_entry_update(ent, type, trace_ctx);
942 static __always_inline struct ring_buffer_event *
943 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
946 unsigned int trace_ctx)
948 struct ring_buffer_event *event;
950 event = ring_buffer_lock_reserve(buffer, len);
952 trace_event_setup(event, type, trace_ctx);
957 void tracer_tracing_on(struct trace_array *tr)
959 if (tr->array_buffer.buffer)
960 ring_buffer_record_on(tr->array_buffer.buffer);
962 * This flag is looked at when buffers haven't been allocated
963 * yet, or by some tracers (like irqsoff), that just want to
964 * know if the ring buffer has been disabled, but it can handle
965 * races of where it gets disabled but we still do a record.
966 * As the check is in the fast path of the tracers, it is more
967 * important to be fast than accurate.
969 tr->buffer_disabled = 0;
970 /* Make the flag seen by readers */
975 * tracing_on - enable tracing buffers
977 * This function enables tracing buffers that may have been
978 * disabled with tracing_off.
980 void tracing_on(void)
982 tracer_tracing_on(&global_trace);
984 EXPORT_SYMBOL_GPL(tracing_on);
987 static __always_inline void
988 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
990 __this_cpu_write(trace_taskinfo_save, true);
992 /* If this is the temp buffer, we need to commit fully */
993 if (this_cpu_read(trace_buffered_event) == event) {
994 /* Length is in event->array[0] */
995 ring_buffer_write(buffer, event->array[0], &event->array[1]);
996 /* Release the temp buffer */
997 this_cpu_dec(trace_buffered_event_cnt);
998 /* ring_buffer_unlock_commit() enables preemption */
999 preempt_enable_notrace();
1001 ring_buffer_unlock_commit(buffer);
1005 * __trace_puts - write a constant string into the trace buffer.
1006 * @ip: The address of the caller
1007 * @str: The constant string to write
1008 * @size: The size of the string.
1010 int __trace_puts(unsigned long ip, const char *str, int size)
1012 struct ring_buffer_event *event;
1013 struct trace_buffer *buffer;
1014 struct print_entry *entry;
1015 unsigned int trace_ctx;
1018 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1021 if (unlikely(tracing_selftest_running || tracing_disabled))
1024 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1026 trace_ctx = tracing_gen_ctx();
1027 buffer = global_trace.array_buffer.buffer;
1028 ring_buffer_nest_start(buffer);
1029 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1036 entry = ring_buffer_event_data(event);
1039 memcpy(&entry->buf, str, size);
1041 /* Add a newline if necessary */
1042 if (entry->buf[size - 1] != '\n') {
1043 entry->buf[size] = '\n';
1044 entry->buf[size + 1] = '\0';
1046 entry->buf[size] = '\0';
1048 __buffer_unlock_commit(buffer, event);
1049 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1051 ring_buffer_nest_end(buffer);
1054 EXPORT_SYMBOL_GPL(__trace_puts);
1057 * __trace_bputs - write the pointer to a constant string into trace buffer
1058 * @ip: The address of the caller
1059 * @str: The constant string to write to the buffer to
1061 int __trace_bputs(unsigned long ip, const char *str)
1063 struct ring_buffer_event *event;
1064 struct trace_buffer *buffer;
1065 struct bputs_entry *entry;
1066 unsigned int trace_ctx;
1067 int size = sizeof(struct bputs_entry);
1070 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1073 if (unlikely(tracing_selftest_running || tracing_disabled))
1076 trace_ctx = tracing_gen_ctx();
1077 buffer = global_trace.array_buffer.buffer;
1079 ring_buffer_nest_start(buffer);
1080 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1085 entry = ring_buffer_event_data(event);
1089 __buffer_unlock_commit(buffer, event);
1090 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 4, NULL);
1094 ring_buffer_nest_end(buffer);
1097 EXPORT_SYMBOL_GPL(__trace_bputs);
1099 #ifdef CONFIG_TRACER_SNAPSHOT
1100 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1103 struct tracer *tracer = tr->current_trace;
1104 unsigned long flags;
1107 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1108 internal_trace_puts("*** snapshot is being ignored ***\n");
1112 if (!tr->allocated_snapshot) {
1113 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1114 internal_trace_puts("*** stopping trace here! ***\n");
1119 /* Note, snapshot can not be used when the tracer uses it */
1120 if (tracer->use_max_tr) {
1121 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1122 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1126 local_irq_save(flags);
1127 update_max_tr(tr, current, smp_processor_id(), cond_data);
1128 local_irq_restore(flags);
1131 void tracing_snapshot_instance(struct trace_array *tr)
1133 tracing_snapshot_instance_cond(tr, NULL);
1137 * tracing_snapshot - take a snapshot of the current buffer.
1139 * This causes a swap between the snapshot buffer and the current live
1140 * tracing buffer. You can use this to take snapshots of the live
1141 * trace when some condition is triggered, but continue to trace.
1143 * Note, make sure to allocate the snapshot with either
1144 * a tracing_snapshot_alloc(), or by doing it manually
1145 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1147 * If the snapshot buffer is not allocated, it will stop tracing.
1148 * Basically making a permanent snapshot.
1150 void tracing_snapshot(void)
1152 struct trace_array *tr = &global_trace;
1154 tracing_snapshot_instance(tr);
1156 EXPORT_SYMBOL_GPL(tracing_snapshot);
1159 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1160 * @tr: The tracing instance to snapshot
1161 * @cond_data: The data to be tested conditionally, and possibly saved
1163 * This is the same as tracing_snapshot() except that the snapshot is
1164 * conditional - the snapshot will only happen if the
1165 * cond_snapshot.update() implementation receiving the cond_data
1166 * returns true, which means that the trace array's cond_snapshot
1167 * update() operation used the cond_data to determine whether the
1168 * snapshot should be taken, and if it was, presumably saved it along
1169 * with the snapshot.
1171 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1173 tracing_snapshot_instance_cond(tr, cond_data);
1175 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1178 * tracing_cond_snapshot_data - get the user data associated with a snapshot
1179 * @tr: The tracing instance
1181 * When the user enables a conditional snapshot using
1182 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1183 * with the snapshot. This accessor is used to retrieve it.
1185 * Should not be called from cond_snapshot.update(), since it takes
1186 * the tr->max_lock lock, which the code calling
1187 * cond_snapshot.update() has already done.
1189 * Returns the cond_data associated with the trace array's snapshot.
1191 void *tracing_cond_snapshot_data(struct trace_array *tr)
1193 void *cond_data = NULL;
1195 local_irq_disable();
1196 arch_spin_lock(&tr->max_lock);
1198 if (tr->cond_snapshot)
1199 cond_data = tr->cond_snapshot->cond_data;
1201 arch_spin_unlock(&tr->max_lock);
1206 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1208 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1209 struct array_buffer *size_buf, int cpu_id);
1210 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1212 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1216 if (!tr->allocated_snapshot) {
1218 /* allocate spare buffer */
1219 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1220 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1224 tr->allocated_snapshot = true;
1230 static void free_snapshot(struct trace_array *tr)
1233 * We don't free the ring buffer. instead, resize it because
1234 * The max_tr ring buffer has some state (e.g. ring->clock) and
1235 * we want preserve it.
1237 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1238 set_buffer_entries(&tr->max_buffer, 1);
1239 tracing_reset_online_cpus(&tr->max_buffer);
1240 tr->allocated_snapshot = false;
1244 * tracing_alloc_snapshot - allocate snapshot buffer.
1246 * This only allocates the snapshot buffer if it isn't already
1247 * allocated - it doesn't also take a snapshot.
1249 * This is meant to be used in cases where the snapshot buffer needs
1250 * to be set up for events that can't sleep but need to be able to
1251 * trigger a snapshot.
1253 int tracing_alloc_snapshot(void)
1255 struct trace_array *tr = &global_trace;
1258 ret = tracing_alloc_snapshot_instance(tr);
1263 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1266 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1268 * This is similar to tracing_snapshot(), but it will allocate the
1269 * snapshot buffer if it isn't already allocated. Use this only
1270 * where it is safe to sleep, as the allocation may sleep.
1272 * This causes a swap between the snapshot buffer and the current live
1273 * tracing buffer. You can use this to take snapshots of the live
1274 * trace when some condition is triggered, but continue to trace.
1276 void tracing_snapshot_alloc(void)
1280 ret = tracing_alloc_snapshot();
1286 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1289 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1290 * @tr: The tracing instance
1291 * @cond_data: User data to associate with the snapshot
1292 * @update: Implementation of the cond_snapshot update function
1294 * Check whether the conditional snapshot for the given instance has
1295 * already been enabled, or if the current tracer is already using a
1296 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1297 * save the cond_data and update function inside.
1299 * Returns 0 if successful, error otherwise.
1301 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1302 cond_update_fn_t update)
1304 struct cond_snapshot *cond_snapshot;
1307 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1311 cond_snapshot->cond_data = cond_data;
1312 cond_snapshot->update = update;
1314 mutex_lock(&trace_types_lock);
1316 ret = tracing_alloc_snapshot_instance(tr);
1320 if (tr->current_trace->use_max_tr) {
1326 * The cond_snapshot can only change to NULL without the
1327 * trace_types_lock. We don't care if we race with it going
1328 * to NULL, but we want to make sure that it's not set to
1329 * something other than NULL when we get here, which we can
1330 * do safely with only holding the trace_types_lock and not
1331 * having to take the max_lock.
1333 if (tr->cond_snapshot) {
1338 local_irq_disable();
1339 arch_spin_lock(&tr->max_lock);
1340 tr->cond_snapshot = cond_snapshot;
1341 arch_spin_unlock(&tr->max_lock);
1344 mutex_unlock(&trace_types_lock);
1349 mutex_unlock(&trace_types_lock);
1350 kfree(cond_snapshot);
1353 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1356 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1357 * @tr: The tracing instance
1359 * Check whether the conditional snapshot for the given instance is
1360 * enabled; if so, free the cond_snapshot associated with it,
1361 * otherwise return -EINVAL.
1363 * Returns 0 if successful, error otherwise.
1365 int tracing_snapshot_cond_disable(struct trace_array *tr)
1369 local_irq_disable();
1370 arch_spin_lock(&tr->max_lock);
1372 if (!tr->cond_snapshot)
1375 kfree(tr->cond_snapshot);
1376 tr->cond_snapshot = NULL;
1379 arch_spin_unlock(&tr->max_lock);
1384 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1386 void tracing_snapshot(void)
1388 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1390 EXPORT_SYMBOL_GPL(tracing_snapshot);
1391 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1393 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1395 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1396 int tracing_alloc_snapshot(void)
1398 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1401 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1402 void tracing_snapshot_alloc(void)
1407 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1408 void *tracing_cond_snapshot_data(struct trace_array *tr)
1412 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1413 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1417 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1418 int tracing_snapshot_cond_disable(struct trace_array *tr)
1422 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1423 #define free_snapshot(tr) do { } while (0)
1424 #endif /* CONFIG_TRACER_SNAPSHOT */
1426 void tracer_tracing_off(struct trace_array *tr)
1428 if (tr->array_buffer.buffer)
1429 ring_buffer_record_off(tr->array_buffer.buffer);
1431 * This flag is looked at when buffers haven't been allocated
1432 * yet, or by some tracers (like irqsoff), that just want to
1433 * know if the ring buffer has been disabled, but it can handle
1434 * races of where it gets disabled but we still do a record.
1435 * As the check is in the fast path of the tracers, it is more
1436 * important to be fast than accurate.
1438 tr->buffer_disabled = 1;
1439 /* Make the flag seen by readers */
1444 * tracing_off - turn off tracing buffers
1446 * This function stops the tracing buffers from recording data.
1447 * It does not disable any overhead the tracers themselves may
1448 * be causing. This function simply causes all recording to
1449 * the ring buffers to fail.
1451 void tracing_off(void)
1453 tracer_tracing_off(&global_trace);
1455 EXPORT_SYMBOL_GPL(tracing_off);
1457 void disable_trace_on_warning(void)
1459 if (__disable_trace_on_warning) {
1460 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1461 "Disabling tracing due to warning\n");
1467 * tracer_tracing_is_on - show real state of ring buffer enabled
1468 * @tr : the trace array to know if ring buffer is enabled
1470 * Shows real state of the ring buffer if it is enabled or not.
1472 bool tracer_tracing_is_on(struct trace_array *tr)
1474 if (tr->array_buffer.buffer)
1475 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1476 return !tr->buffer_disabled;
1480 * tracing_is_on - show state of ring buffers enabled
1482 int tracing_is_on(void)
1484 return tracer_tracing_is_on(&global_trace);
1486 EXPORT_SYMBOL_GPL(tracing_is_on);
1488 static int __init set_buf_size(char *str)
1490 unsigned long buf_size;
1494 buf_size = memparse(str, &str);
1496 * nr_entries can not be zero and the startup
1497 * tests require some buffer space. Therefore
1498 * ensure we have at least 4096 bytes of buffer.
1500 trace_buf_size = max(4096UL, buf_size);
1503 __setup("trace_buf_size=", set_buf_size);
1505 static int __init set_tracing_thresh(char *str)
1507 unsigned long threshold;
1512 ret = kstrtoul(str, 0, &threshold);
1515 tracing_thresh = threshold * 1000;
1518 __setup("tracing_thresh=", set_tracing_thresh);
1520 unsigned long nsecs_to_usecs(unsigned long nsecs)
1522 return nsecs / 1000;
1526 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1527 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1528 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1529 * of strings in the order that the evals (enum) were defined.
1534 /* These must match the bit positions in trace_iterator_flags */
1535 static const char *trace_options[] = {
1543 int in_ns; /* is this clock in nanoseconds? */
1544 } trace_clocks[] = {
1545 { trace_clock_local, "local", 1 },
1546 { trace_clock_global, "global", 1 },
1547 { trace_clock_counter, "counter", 0 },
1548 { trace_clock_jiffies, "uptime", 0 },
1549 { trace_clock, "perf", 1 },
1550 { ktime_get_mono_fast_ns, "mono", 1 },
1551 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1552 { ktime_get_boot_fast_ns, "boot", 1 },
1553 { ktime_get_tai_fast_ns, "tai", 1 },
1557 bool trace_clock_in_ns(struct trace_array *tr)
1559 if (trace_clocks[tr->clock_id].in_ns)
1566 * trace_parser_get_init - gets the buffer for trace parser
1568 int trace_parser_get_init(struct trace_parser *parser, int size)
1570 memset(parser, 0, sizeof(*parser));
1572 parser->buffer = kmalloc(size, GFP_KERNEL);
1573 if (!parser->buffer)
1576 parser->size = size;
1581 * trace_parser_put - frees the buffer for trace parser
1583 void trace_parser_put(struct trace_parser *parser)
1585 kfree(parser->buffer);
1586 parser->buffer = NULL;
1590 * trace_get_user - reads the user input string separated by space
1591 * (matched by isspace(ch))
1593 * For each string found the 'struct trace_parser' is updated,
1594 * and the function returns.
1596 * Returns number of bytes read.
1598 * See kernel/trace/trace.h for 'struct trace_parser' details.
1600 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1601 size_t cnt, loff_t *ppos)
1608 trace_parser_clear(parser);
1610 ret = get_user(ch, ubuf++);
1618 * The parser is not finished with the last write,
1619 * continue reading the user input without skipping spaces.
1621 if (!parser->cont) {
1622 /* skip white space */
1623 while (cnt && isspace(ch)) {
1624 ret = get_user(ch, ubuf++);
1633 /* only spaces were written */
1634 if (isspace(ch) || !ch) {
1641 /* read the non-space input */
1642 while (cnt && !isspace(ch) && ch) {
1643 if (parser->idx < parser->size - 1)
1644 parser->buffer[parser->idx++] = ch;
1649 ret = get_user(ch, ubuf++);
1656 /* We either got finished input or we have to wait for another call. */
1657 if (isspace(ch) || !ch) {
1658 parser->buffer[parser->idx] = 0;
1659 parser->cont = false;
1660 } else if (parser->idx < parser->size - 1) {
1661 parser->cont = true;
1662 parser->buffer[parser->idx++] = ch;
1663 /* Make sure the parsed string always terminates with '\0'. */
1664 parser->buffer[parser->idx] = 0;
1677 /* TODO add a seq_buf_to_buffer() */
1678 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1682 if (trace_seq_used(s) <= s->seq.readpos)
1685 len = trace_seq_used(s) - s->seq.readpos;
1688 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1690 s->seq.readpos += cnt;
1694 unsigned long __read_mostly tracing_thresh;
1696 #ifdef CONFIG_TRACER_MAX_TRACE
1697 static const struct file_operations tracing_max_lat_fops;
1699 #ifdef LATENCY_FS_NOTIFY
1701 static struct workqueue_struct *fsnotify_wq;
1703 static void latency_fsnotify_workfn(struct work_struct *work)
1705 struct trace_array *tr = container_of(work, struct trace_array,
1707 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1710 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1712 struct trace_array *tr = container_of(iwork, struct trace_array,
1714 queue_work(fsnotify_wq, &tr->fsnotify_work);
1717 static void trace_create_maxlat_file(struct trace_array *tr,
1718 struct dentry *d_tracer)
1720 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1721 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1722 tr->d_max_latency = trace_create_file("tracing_max_latency",
1724 d_tracer, &tr->max_latency,
1725 &tracing_max_lat_fops);
1728 __init static int latency_fsnotify_init(void)
1730 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1731 WQ_UNBOUND | WQ_HIGHPRI, 0);
1733 pr_err("Unable to allocate tr_max_lat_wq\n");
1739 late_initcall_sync(latency_fsnotify_init);
1741 void latency_fsnotify(struct trace_array *tr)
1746 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1747 * possible that we are called from __schedule() or do_idle(), which
1748 * could cause a deadlock.
1750 irq_work_queue(&tr->fsnotify_irqwork);
1753 #else /* !LATENCY_FS_NOTIFY */
1755 #define trace_create_maxlat_file(tr, d_tracer) \
1756 trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \
1757 d_tracer, &tr->max_latency, &tracing_max_lat_fops)
1762 * Copy the new maximum trace into the separate maximum-trace
1763 * structure. (this way the maximum trace is permanently saved,
1764 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1767 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1769 struct array_buffer *trace_buf = &tr->array_buffer;
1770 struct array_buffer *max_buf = &tr->max_buffer;
1771 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1772 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1775 max_buf->time_start = data->preempt_timestamp;
1777 max_data->saved_latency = tr->max_latency;
1778 max_data->critical_start = data->critical_start;
1779 max_data->critical_end = data->critical_end;
1781 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1782 max_data->pid = tsk->pid;
1784 * If tsk == current, then use current_uid(), as that does not use
1785 * RCU. The irq tracer can be called out of RCU scope.
1788 max_data->uid = current_uid();
1790 max_data->uid = task_uid(tsk);
1792 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1793 max_data->policy = tsk->policy;
1794 max_data->rt_priority = tsk->rt_priority;
1796 /* record this tasks comm */
1797 tracing_record_cmdline(tsk);
1798 latency_fsnotify(tr);
1802 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1804 * @tsk: the task with the latency
1805 * @cpu: The cpu that initiated the trace.
1806 * @cond_data: User data associated with a conditional snapshot
1808 * Flip the buffers between the @tr and the max_tr and record information
1809 * about which task was the cause of this latency.
1812 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1818 WARN_ON_ONCE(!irqs_disabled());
1820 if (!tr->allocated_snapshot) {
1821 /* Only the nop tracer should hit this when disabling */
1822 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1826 arch_spin_lock(&tr->max_lock);
1828 /* Inherit the recordable setting from array_buffer */
1829 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1830 ring_buffer_record_on(tr->max_buffer.buffer);
1832 ring_buffer_record_off(tr->max_buffer.buffer);
1834 #ifdef CONFIG_TRACER_SNAPSHOT
1835 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) {
1836 arch_spin_unlock(&tr->max_lock);
1840 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1842 __update_max_tr(tr, tsk, cpu);
1844 arch_spin_unlock(&tr->max_lock);
1848 * update_max_tr_single - only copy one trace over, and reset the rest
1850 * @tsk: task with the latency
1851 * @cpu: the cpu of the buffer to copy.
1853 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1856 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1863 WARN_ON_ONCE(!irqs_disabled());
1864 if (!tr->allocated_snapshot) {
1865 /* Only the nop tracer should hit this when disabling */
1866 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1870 arch_spin_lock(&tr->max_lock);
1872 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1874 if (ret == -EBUSY) {
1876 * We failed to swap the buffer due to a commit taking
1877 * place on this CPU. We fail to record, but we reset
1878 * the max trace buffer (no one writes directly to it)
1879 * and flag that it failed.
1881 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1882 "Failed to swap buffers due to commit in progress\n");
1885 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1887 __update_max_tr(tr, tsk, cpu);
1888 arch_spin_unlock(&tr->max_lock);
1891 #endif /* CONFIG_TRACER_MAX_TRACE */
1893 static int wait_on_pipe(struct trace_iterator *iter, int full)
1895 /* Iterators are static, they should be filled or empty */
1896 if (trace_buffer_iter(iter, iter->cpu_file))
1899 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1903 #ifdef CONFIG_FTRACE_STARTUP_TEST
1904 static bool selftests_can_run;
1906 struct trace_selftests {
1907 struct list_head list;
1908 struct tracer *type;
1911 static LIST_HEAD(postponed_selftests);
1913 static int save_selftest(struct tracer *type)
1915 struct trace_selftests *selftest;
1917 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1921 selftest->type = type;
1922 list_add(&selftest->list, &postponed_selftests);
1926 static int run_tracer_selftest(struct tracer *type)
1928 struct trace_array *tr = &global_trace;
1929 struct tracer *saved_tracer = tr->current_trace;
1932 if (!type->selftest || tracing_selftest_disabled)
1936 * If a tracer registers early in boot up (before scheduling is
1937 * initialized and such), then do not run its selftests yet.
1938 * Instead, run it a little later in the boot process.
1940 if (!selftests_can_run)
1941 return save_selftest(type);
1943 if (!tracing_is_on()) {
1944 pr_warn("Selftest for tracer %s skipped due to tracing disabled\n",
1950 * Run a selftest on this tracer.
1951 * Here we reset the trace buffer, and set the current
1952 * tracer to be this tracer. The tracer can then run some
1953 * internal tracing to verify that everything is in order.
1954 * If we fail, we do not register this tracer.
1956 tracing_reset_online_cpus(&tr->array_buffer);
1958 tr->current_trace = type;
1960 #ifdef CONFIG_TRACER_MAX_TRACE
1961 if (type->use_max_tr) {
1962 /* If we expanded the buffers, make sure the max is expanded too */
1963 if (ring_buffer_expanded)
1964 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1965 RING_BUFFER_ALL_CPUS);
1966 tr->allocated_snapshot = true;
1970 /* the test is responsible for initializing and enabling */
1971 pr_info("Testing tracer %s: ", type->name);
1972 ret = type->selftest(type, tr);
1973 /* the test is responsible for resetting too */
1974 tr->current_trace = saved_tracer;
1976 printk(KERN_CONT "FAILED!\n");
1977 /* Add the warning after printing 'FAILED' */
1981 /* Only reset on passing, to avoid touching corrupted buffers */
1982 tracing_reset_online_cpus(&tr->array_buffer);
1984 #ifdef CONFIG_TRACER_MAX_TRACE
1985 if (type->use_max_tr) {
1986 tr->allocated_snapshot = false;
1988 /* Shrink the max buffer again */
1989 if (ring_buffer_expanded)
1990 ring_buffer_resize(tr->max_buffer.buffer, 1,
1991 RING_BUFFER_ALL_CPUS);
1995 printk(KERN_CONT "PASSED\n");
1999 static __init int init_trace_selftests(void)
2001 struct trace_selftests *p, *n;
2002 struct tracer *t, **last;
2005 selftests_can_run = true;
2007 mutex_lock(&trace_types_lock);
2009 if (list_empty(&postponed_selftests))
2012 pr_info("Running postponed tracer tests:\n");
2014 tracing_selftest_running = true;
2015 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
2016 /* This loop can take minutes when sanitizers are enabled, so
2017 * lets make sure we allow RCU processing.
2020 ret = run_tracer_selftest(p->type);
2021 /* If the test fails, then warn and remove from available_tracers */
2023 WARN(1, "tracer: %s failed selftest, disabling\n",
2025 last = &trace_types;
2026 for (t = trace_types; t; t = t->next) {
2037 tracing_selftest_running = false;
2040 mutex_unlock(&trace_types_lock);
2044 core_initcall(init_trace_selftests);
2046 static inline int run_tracer_selftest(struct tracer *type)
2050 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2052 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2054 static void __init apply_trace_boot_options(void);
2057 * register_tracer - register a tracer with the ftrace system.
2058 * @type: the plugin for the tracer
2060 * Register a new plugin tracer.
2062 int __init register_tracer(struct tracer *type)
2068 pr_info("Tracer must have a name\n");
2072 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2073 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2077 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2078 pr_warn("Can not register tracer %s due to lockdown\n",
2083 mutex_lock(&trace_types_lock);
2085 tracing_selftest_running = true;
2087 for (t = trace_types; t; t = t->next) {
2088 if (strcmp(type->name, t->name) == 0) {
2090 pr_info("Tracer %s already registered\n",
2097 if (!type->set_flag)
2098 type->set_flag = &dummy_set_flag;
2100 /*allocate a dummy tracer_flags*/
2101 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2106 type->flags->val = 0;
2107 type->flags->opts = dummy_tracer_opt;
2109 if (!type->flags->opts)
2110 type->flags->opts = dummy_tracer_opt;
2112 /* store the tracer for __set_tracer_option */
2113 type->flags->trace = type;
2115 ret = run_tracer_selftest(type);
2119 type->next = trace_types;
2121 add_tracer_options(&global_trace, type);
2124 tracing_selftest_running = false;
2125 mutex_unlock(&trace_types_lock);
2127 if (ret || !default_bootup_tracer)
2130 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2133 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2134 /* Do we want this tracer to start on bootup? */
2135 tracing_set_tracer(&global_trace, type->name);
2136 default_bootup_tracer = NULL;
2138 apply_trace_boot_options();
2140 /* disable other selftests, since this will break it. */
2141 disable_tracing_selftest("running a tracer");
2147 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2149 struct trace_buffer *buffer = buf->buffer;
2154 ring_buffer_record_disable(buffer);
2156 /* Make sure all commits have finished */
2158 ring_buffer_reset_cpu(buffer, cpu);
2160 ring_buffer_record_enable(buffer);
2163 void tracing_reset_online_cpus(struct array_buffer *buf)
2165 struct trace_buffer *buffer = buf->buffer;
2170 ring_buffer_record_disable(buffer);
2172 /* Make sure all commits have finished */
2175 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2177 ring_buffer_reset_online_cpus(buffer);
2179 ring_buffer_record_enable(buffer);
2182 /* Must have trace_types_lock held */
2183 void tracing_reset_all_online_cpus_unlocked(void)
2185 struct trace_array *tr;
2187 lockdep_assert_held(&trace_types_lock);
2189 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2190 if (!tr->clear_trace)
2192 tr->clear_trace = false;
2193 tracing_reset_online_cpus(&tr->array_buffer);
2194 #ifdef CONFIG_TRACER_MAX_TRACE
2195 tracing_reset_online_cpus(&tr->max_buffer);
2200 void tracing_reset_all_online_cpus(void)
2202 mutex_lock(&trace_types_lock);
2203 tracing_reset_all_online_cpus_unlocked();
2204 mutex_unlock(&trace_types_lock);
2208 * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
2209 * is the tgid last observed corresponding to pid=i.
2211 static int *tgid_map;
2213 /* The maximum valid index into tgid_map. */
2214 static size_t tgid_map_max;
2216 #define SAVED_CMDLINES_DEFAULT 128
2217 #define NO_CMDLINE_MAP UINT_MAX
2219 * Preemption must be disabled before acquiring trace_cmdline_lock.
2220 * The various trace_arrays' max_lock must be acquired in a context
2221 * where interrupt is disabled.
2223 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2224 struct saved_cmdlines_buffer {
2225 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2226 unsigned *map_cmdline_to_pid;
2227 unsigned cmdline_num;
2229 char *saved_cmdlines;
2231 static struct saved_cmdlines_buffer *savedcmd;
2233 static inline char *get_saved_cmdlines(int idx)
2235 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2238 static inline void set_cmdline(int idx, const char *cmdline)
2240 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2243 static int allocate_cmdlines_buffer(unsigned int val,
2244 struct saved_cmdlines_buffer *s)
2246 s->map_cmdline_to_pid = kmalloc_array(val,
2247 sizeof(*s->map_cmdline_to_pid),
2249 if (!s->map_cmdline_to_pid)
2252 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2253 if (!s->saved_cmdlines) {
2254 kfree(s->map_cmdline_to_pid);
2259 s->cmdline_num = val;
2260 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2261 sizeof(s->map_pid_to_cmdline));
2262 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2263 val * sizeof(*s->map_cmdline_to_pid));
2268 static int trace_create_savedcmd(void)
2272 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2276 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2286 int is_tracing_stopped(void)
2288 return global_trace.stop_count;
2292 * tracing_start - quick start of the tracer
2294 * If tracing is enabled but was stopped by tracing_stop,
2295 * this will start the tracer back up.
2297 void tracing_start(void)
2299 struct trace_buffer *buffer;
2300 unsigned long flags;
2302 if (tracing_disabled)
2305 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2306 if (--global_trace.stop_count) {
2307 if (global_trace.stop_count < 0) {
2308 /* Someone screwed up their debugging */
2310 global_trace.stop_count = 0;
2315 /* Prevent the buffers from switching */
2316 arch_spin_lock(&global_trace.max_lock);
2318 buffer = global_trace.array_buffer.buffer;
2320 ring_buffer_record_enable(buffer);
2322 #ifdef CONFIG_TRACER_MAX_TRACE
2323 buffer = global_trace.max_buffer.buffer;
2325 ring_buffer_record_enable(buffer);
2328 arch_spin_unlock(&global_trace.max_lock);
2331 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2334 static void tracing_start_tr(struct trace_array *tr)
2336 struct trace_buffer *buffer;
2337 unsigned long flags;
2339 if (tracing_disabled)
2342 /* If global, we need to also start the max tracer */
2343 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2344 return tracing_start();
2346 raw_spin_lock_irqsave(&tr->start_lock, flags);
2348 if (--tr->stop_count) {
2349 if (tr->stop_count < 0) {
2350 /* Someone screwed up their debugging */
2357 buffer = tr->array_buffer.buffer;
2359 ring_buffer_record_enable(buffer);
2362 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2366 * tracing_stop - quick stop of the tracer
2368 * Light weight way to stop tracing. Use in conjunction with
2371 void tracing_stop(void)
2373 struct trace_buffer *buffer;
2374 unsigned long flags;
2376 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2377 if (global_trace.stop_count++)
2380 /* Prevent the buffers from switching */
2381 arch_spin_lock(&global_trace.max_lock);
2383 buffer = global_trace.array_buffer.buffer;
2385 ring_buffer_record_disable(buffer);
2387 #ifdef CONFIG_TRACER_MAX_TRACE
2388 buffer = global_trace.max_buffer.buffer;
2390 ring_buffer_record_disable(buffer);
2393 arch_spin_unlock(&global_trace.max_lock);
2396 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2399 static void tracing_stop_tr(struct trace_array *tr)
2401 struct trace_buffer *buffer;
2402 unsigned long flags;
2404 /* If global, we need to also stop the max tracer */
2405 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2406 return tracing_stop();
2408 raw_spin_lock_irqsave(&tr->start_lock, flags);
2409 if (tr->stop_count++)
2412 buffer = tr->array_buffer.buffer;
2414 ring_buffer_record_disable(buffer);
2417 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2420 static int trace_save_cmdline(struct task_struct *tsk)
2424 /* treat recording of idle task as a success */
2428 tpid = tsk->pid & (PID_MAX_DEFAULT - 1);
2431 * It's not the end of the world if we don't get
2432 * the lock, but we also don't want to spin
2433 * nor do we want to disable interrupts,
2434 * so if we miss here, then better luck next time.
2436 * This is called within the scheduler and wake up, so interrupts
2437 * had better been disabled and run queue lock been held.
2439 lockdep_assert_preemption_disabled();
2440 if (!arch_spin_trylock(&trace_cmdline_lock))
2443 idx = savedcmd->map_pid_to_cmdline[tpid];
2444 if (idx == NO_CMDLINE_MAP) {
2445 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2447 savedcmd->map_pid_to_cmdline[tpid] = idx;
2448 savedcmd->cmdline_idx = idx;
2451 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2452 set_cmdline(idx, tsk->comm);
2454 arch_spin_unlock(&trace_cmdline_lock);
2459 static void __trace_find_cmdline(int pid, char comm[])
2465 strcpy(comm, "<idle>");
2469 if (WARN_ON_ONCE(pid < 0)) {
2470 strcpy(comm, "<XXX>");
2474 tpid = pid & (PID_MAX_DEFAULT - 1);
2475 map = savedcmd->map_pid_to_cmdline[tpid];
2476 if (map != NO_CMDLINE_MAP) {
2477 tpid = savedcmd->map_cmdline_to_pid[map];
2479 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2483 strcpy(comm, "<...>");
2486 void trace_find_cmdline(int pid, char comm[])
2489 arch_spin_lock(&trace_cmdline_lock);
2491 __trace_find_cmdline(pid, comm);
2493 arch_spin_unlock(&trace_cmdline_lock);
2497 static int *trace_find_tgid_ptr(int pid)
2500 * Pairs with the smp_store_release in set_tracer_flag() to ensure that
2501 * if we observe a non-NULL tgid_map then we also observe the correct
2504 int *map = smp_load_acquire(&tgid_map);
2506 if (unlikely(!map || pid > tgid_map_max))
2512 int trace_find_tgid(int pid)
2514 int *ptr = trace_find_tgid_ptr(pid);
2516 return ptr ? *ptr : 0;
2519 static int trace_save_tgid(struct task_struct *tsk)
2523 /* treat recording of idle task as a success */
2527 ptr = trace_find_tgid_ptr(tsk->pid);
2535 static bool tracing_record_taskinfo_skip(int flags)
2537 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2539 if (!__this_cpu_read(trace_taskinfo_save))
2545 * tracing_record_taskinfo - record the task info of a task
2547 * @task: task to record
2548 * @flags: TRACE_RECORD_CMDLINE for recording comm
2549 * TRACE_RECORD_TGID for recording tgid
2551 void tracing_record_taskinfo(struct task_struct *task, int flags)
2555 if (tracing_record_taskinfo_skip(flags))
2559 * Record as much task information as possible. If some fail, continue
2560 * to try to record the others.
2562 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2563 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2565 /* If recording any information failed, retry again soon. */
2569 __this_cpu_write(trace_taskinfo_save, false);
2573 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2575 * @prev: previous task during sched_switch
2576 * @next: next task during sched_switch
2577 * @flags: TRACE_RECORD_CMDLINE for recording comm
2578 * TRACE_RECORD_TGID for recording tgid
2580 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2581 struct task_struct *next, int flags)
2585 if (tracing_record_taskinfo_skip(flags))
2589 * Record as much task information as possible. If some fail, continue
2590 * to try to record the others.
2592 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2593 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2594 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2595 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2597 /* If recording any information failed, retry again soon. */
2601 __this_cpu_write(trace_taskinfo_save, false);
2604 /* Helpers to record a specific task information */
2605 void tracing_record_cmdline(struct task_struct *task)
2607 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2610 void tracing_record_tgid(struct task_struct *task)
2612 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2616 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2617 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2618 * simplifies those functions and keeps them in sync.
2620 enum print_line_t trace_handle_return(struct trace_seq *s)
2622 return trace_seq_has_overflowed(s) ?
2623 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2625 EXPORT_SYMBOL_GPL(trace_handle_return);
2627 static unsigned short migration_disable_value(void)
2629 #if defined(CONFIG_SMP)
2630 return current->migration_disabled;
2636 unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
2638 unsigned int trace_flags = irqs_status;
2641 pc = preempt_count();
2644 trace_flags |= TRACE_FLAG_NMI;
2645 if (pc & HARDIRQ_MASK)
2646 trace_flags |= TRACE_FLAG_HARDIRQ;
2647 if (in_serving_softirq())
2648 trace_flags |= TRACE_FLAG_SOFTIRQ;
2649 if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
2650 trace_flags |= TRACE_FLAG_BH_OFF;
2652 if (tif_need_resched())
2653 trace_flags |= TRACE_FLAG_NEED_RESCHED;
2654 if (test_preempt_need_resched())
2655 trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
2656 return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) |
2657 (min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
2660 struct ring_buffer_event *
2661 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2664 unsigned int trace_ctx)
2666 return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx);
2669 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2670 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2671 static int trace_buffered_event_ref;
2674 * trace_buffered_event_enable - enable buffering events
2676 * When events are being filtered, it is quicker to use a temporary
2677 * buffer to write the event data into if there's a likely chance
2678 * that it will not be committed. The discard of the ring buffer
2679 * is not as fast as committing, and is much slower than copying
2682 * When an event is to be filtered, allocate per cpu buffers to
2683 * write the event data into, and if the event is filtered and discarded
2684 * it is simply dropped, otherwise, the entire data is to be committed
2687 void trace_buffered_event_enable(void)
2689 struct ring_buffer_event *event;
2693 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2695 if (trace_buffered_event_ref++)
2698 for_each_tracing_cpu(cpu) {
2699 page = alloc_pages_node(cpu_to_node(cpu),
2700 GFP_KERNEL | __GFP_NORETRY, 0);
2704 event = page_address(page);
2705 memset(event, 0, sizeof(*event));
2707 per_cpu(trace_buffered_event, cpu) = event;
2710 if (cpu == smp_processor_id() &&
2711 __this_cpu_read(trace_buffered_event) !=
2712 per_cpu(trace_buffered_event, cpu))
2719 trace_buffered_event_disable();
2722 static void enable_trace_buffered_event(void *data)
2724 /* Probably not needed, but do it anyway */
2726 this_cpu_dec(trace_buffered_event_cnt);
2729 static void disable_trace_buffered_event(void *data)
2731 this_cpu_inc(trace_buffered_event_cnt);
2735 * trace_buffered_event_disable - disable buffering events
2737 * When a filter is removed, it is faster to not use the buffered
2738 * events, and to commit directly into the ring buffer. Free up
2739 * the temp buffers when there are no more users. This requires
2740 * special synchronization with current events.
2742 void trace_buffered_event_disable(void)
2746 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2748 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2751 if (--trace_buffered_event_ref)
2755 /* For each CPU, set the buffer as used. */
2756 smp_call_function_many(tracing_buffer_mask,
2757 disable_trace_buffered_event, NULL, 1);
2760 /* Wait for all current users to finish */
2763 for_each_tracing_cpu(cpu) {
2764 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2765 per_cpu(trace_buffered_event, cpu) = NULL;
2768 * Make sure trace_buffered_event is NULL before clearing
2769 * trace_buffered_event_cnt.
2774 /* Do the work on each cpu */
2775 smp_call_function_many(tracing_buffer_mask,
2776 enable_trace_buffered_event, NULL, 1);
2780 static struct trace_buffer *temp_buffer;
2782 struct ring_buffer_event *
2783 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2784 struct trace_event_file *trace_file,
2785 int type, unsigned long len,
2786 unsigned int trace_ctx)
2788 struct ring_buffer_event *entry;
2789 struct trace_array *tr = trace_file->tr;
2792 *current_rb = tr->array_buffer.buffer;
2794 if (!tr->no_filter_buffering_ref &&
2795 (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) {
2796 preempt_disable_notrace();
2798 * Filtering is on, so try to use the per cpu buffer first.
2799 * This buffer will simulate a ring_buffer_event,
2800 * where the type_len is zero and the array[0] will
2801 * hold the full length.
2802 * (see include/linux/ring-buffer.h for details on
2803 * how the ring_buffer_event is structured).
2805 * Using a temp buffer during filtering and copying it
2806 * on a matched filter is quicker than writing directly
2807 * into the ring buffer and then discarding it when
2808 * it doesn't match. That is because the discard
2809 * requires several atomic operations to get right.
2810 * Copying on match and doing nothing on a failed match
2811 * is still quicker than no copy on match, but having
2812 * to discard out of the ring buffer on a failed match.
2814 if ((entry = __this_cpu_read(trace_buffered_event))) {
2815 int max_len = PAGE_SIZE - struct_size(entry, array, 1);
2817 val = this_cpu_inc_return(trace_buffered_event_cnt);
2820 * Preemption is disabled, but interrupts and NMIs
2821 * can still come in now. If that happens after
2822 * the above increment, then it will have to go
2823 * back to the old method of allocating the event
2824 * on the ring buffer, and if the filter fails, it
2825 * will have to call ring_buffer_discard_commit()
2828 * Need to also check the unlikely case that the
2829 * length is bigger than the temp buffer size.
2830 * If that happens, then the reserve is pretty much
2831 * guaranteed to fail, as the ring buffer currently
2832 * only allows events less than a page. But that may
2833 * change in the future, so let the ring buffer reserve
2834 * handle the failure in that case.
2836 if (val == 1 && likely(len <= max_len)) {
2837 trace_event_setup(entry, type, trace_ctx);
2838 entry->array[0] = len;
2839 /* Return with preemption disabled */
2842 this_cpu_dec(trace_buffered_event_cnt);
2844 /* __trace_buffer_lock_reserve() disables preemption */
2845 preempt_enable_notrace();
2848 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2851 * If tracing is off, but we have triggers enabled
2852 * we still need to look at the event data. Use the temp_buffer
2853 * to store the trace event for the trigger to use. It's recursive
2854 * safe and will not be recorded anywhere.
2856 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2857 *current_rb = temp_buffer;
2858 entry = __trace_buffer_lock_reserve(*current_rb, type, len,
2863 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2865 static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock);
2866 static DEFINE_MUTEX(tracepoint_printk_mutex);
2868 static void output_printk(struct trace_event_buffer *fbuffer)
2870 struct trace_event_call *event_call;
2871 struct trace_event_file *file;
2872 struct trace_event *event;
2873 unsigned long flags;
2874 struct trace_iterator *iter = tracepoint_print_iter;
2876 /* We should never get here if iter is NULL */
2877 if (WARN_ON_ONCE(!iter))
2880 event_call = fbuffer->trace_file->event_call;
2881 if (!event_call || !event_call->event.funcs ||
2882 !event_call->event.funcs->trace)
2885 file = fbuffer->trace_file;
2886 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2887 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2888 !filter_match_preds(file->filter, fbuffer->entry)))
2891 event = &fbuffer->trace_file->event_call->event;
2893 raw_spin_lock_irqsave(&tracepoint_iter_lock, flags);
2894 trace_seq_init(&iter->seq);
2895 iter->ent = fbuffer->entry;
2896 event_call->event.funcs->trace(iter, 0, event);
2897 trace_seq_putc(&iter->seq, 0);
2898 printk("%s", iter->seq.buffer);
2900 raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2903 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2904 void *buffer, size_t *lenp,
2907 int save_tracepoint_printk;
2910 mutex_lock(&tracepoint_printk_mutex);
2911 save_tracepoint_printk = tracepoint_printk;
2913 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2916 * This will force exiting early, as tracepoint_printk
2917 * is always zero when tracepoint_printk_iter is not allocated
2919 if (!tracepoint_print_iter)
2920 tracepoint_printk = 0;
2922 if (save_tracepoint_printk == tracepoint_printk)
2925 if (tracepoint_printk)
2926 static_key_enable(&tracepoint_printk_key.key);
2928 static_key_disable(&tracepoint_printk_key.key);
2931 mutex_unlock(&tracepoint_printk_mutex);
2936 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2938 enum event_trigger_type tt = ETT_NONE;
2939 struct trace_event_file *file = fbuffer->trace_file;
2941 if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event,
2942 fbuffer->entry, &tt))
2945 if (static_key_false(&tracepoint_printk_key.key))
2946 output_printk(fbuffer);
2948 if (static_branch_unlikely(&trace_event_exports_enabled))
2949 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2951 trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer,
2952 fbuffer->event, fbuffer->trace_ctx, fbuffer->regs);
2956 event_triggers_post_call(file, tt);
2959 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2964 * trace_buffer_unlock_commit_regs()
2965 * trace_event_buffer_commit()
2966 * trace_event_raw_event_xxx()
2968 # define STACK_SKIP 3
2970 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2971 struct trace_buffer *buffer,
2972 struct ring_buffer_event *event,
2973 unsigned int trace_ctx,
2974 struct pt_regs *regs)
2976 __buffer_unlock_commit(buffer, event);
2979 * If regs is not set, then skip the necessary functions.
2980 * Note, we can still get here via blktrace, wakeup tracer
2981 * and mmiotrace, but that's ok if they lose a function or
2982 * two. They are not that meaningful.
2984 ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs);
2985 ftrace_trace_userstack(tr, buffer, trace_ctx);
2989 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2992 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2993 struct ring_buffer_event *event)
2995 __buffer_unlock_commit(buffer, event);
2999 trace_function(struct trace_array *tr, unsigned long ip, unsigned long
3000 parent_ip, unsigned int trace_ctx)
3002 struct trace_event_call *call = &event_function;
3003 struct trace_buffer *buffer = tr->array_buffer.buffer;
3004 struct ring_buffer_event *event;
3005 struct ftrace_entry *entry;
3007 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
3011 entry = ring_buffer_event_data(event);
3013 entry->parent_ip = parent_ip;
3015 if (!call_filter_check_discard(call, entry, buffer, event)) {
3016 if (static_branch_unlikely(&trace_function_exports_enabled))
3017 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
3018 __buffer_unlock_commit(buffer, event);
3022 #ifdef CONFIG_STACKTRACE
3024 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
3025 #define FTRACE_KSTACK_NESTING 4
3027 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
3029 struct ftrace_stack {
3030 unsigned long calls[FTRACE_KSTACK_ENTRIES];
3034 struct ftrace_stacks {
3035 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
3038 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
3039 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
3041 static void __ftrace_trace_stack(struct trace_buffer *buffer,
3042 unsigned int trace_ctx,
3043 int skip, struct pt_regs *regs)
3045 struct trace_event_call *call = &event_kernel_stack;
3046 struct ring_buffer_event *event;
3047 unsigned int size, nr_entries;
3048 struct ftrace_stack *fstack;
3049 struct stack_entry *entry;
3053 * Add one, for this function and the call to save_stack_trace()
3054 * If regs is set, then these functions will not be in the way.
3056 #ifndef CONFIG_UNWINDER_ORC
3061 preempt_disable_notrace();
3063 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3065 /* This should never happen. If it does, yell once and skip */
3066 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3070 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
3071 * interrupt will either see the value pre increment or post
3072 * increment. If the interrupt happens pre increment it will have
3073 * restored the counter when it returns. We just need a barrier to
3074 * keep gcc from moving things around.
3078 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
3079 size = ARRAY_SIZE(fstack->calls);
3082 nr_entries = stack_trace_save_regs(regs, fstack->calls,
3085 nr_entries = stack_trace_save(fstack->calls, size, skip);
3088 size = nr_entries * sizeof(unsigned long);
3089 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
3090 (sizeof(*entry) - sizeof(entry->caller)) + size,
3094 entry = ring_buffer_event_data(event);
3096 memcpy(&entry->caller, fstack->calls, size);
3097 entry->size = nr_entries;
3099 if (!call_filter_check_discard(call, entry, buffer, event))
3100 __buffer_unlock_commit(buffer, event);
3103 /* Again, don't let gcc optimize things here */
3105 __this_cpu_dec(ftrace_stack_reserve);
3106 preempt_enable_notrace();
3110 static inline void ftrace_trace_stack(struct trace_array *tr,
3111 struct trace_buffer *buffer,
3112 unsigned int trace_ctx,
3113 int skip, struct pt_regs *regs)
3115 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3118 __ftrace_trace_stack(buffer, trace_ctx, skip, regs);
3121 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
3124 struct trace_buffer *buffer = tr->array_buffer.buffer;
3126 if (rcu_is_watching()) {
3127 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3132 * When an NMI triggers, RCU is enabled via ct_nmi_enter(),
3133 * but if the above rcu_is_watching() failed, then the NMI
3134 * triggered someplace critical, and ct_irq_enter() should
3135 * not be called from NMI.
3137 if (unlikely(in_nmi()))
3140 ct_irq_enter_irqson();
3141 __ftrace_trace_stack(buffer, trace_ctx, skip, NULL);
3142 ct_irq_exit_irqson();
3146 * trace_dump_stack - record a stack back trace in the trace buffer
3147 * @skip: Number of functions to skip (helper handlers)
3149 void trace_dump_stack(int skip)
3151 if (tracing_disabled || tracing_selftest_running)
3154 #ifndef CONFIG_UNWINDER_ORC
3155 /* Skip 1 to skip this function. */
3158 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3159 tracing_gen_ctx(), skip, NULL);
3161 EXPORT_SYMBOL_GPL(trace_dump_stack);
3163 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3164 static DEFINE_PER_CPU(int, user_stack_count);
3167 ftrace_trace_userstack(struct trace_array *tr,
3168 struct trace_buffer *buffer, unsigned int trace_ctx)
3170 struct trace_event_call *call = &event_user_stack;
3171 struct ring_buffer_event *event;
3172 struct userstack_entry *entry;
3174 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3178 * NMIs can not handle page faults, even with fix ups.
3179 * The save user stack can (and often does) fault.
3181 if (unlikely(in_nmi()))
3185 * prevent recursion, since the user stack tracing may
3186 * trigger other kernel events.
3189 if (__this_cpu_read(user_stack_count))
3192 __this_cpu_inc(user_stack_count);
3194 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3195 sizeof(*entry), trace_ctx);
3197 goto out_drop_count;
3198 entry = ring_buffer_event_data(event);
3200 entry->tgid = current->tgid;
3201 memset(&entry->caller, 0, sizeof(entry->caller));
3203 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3204 if (!call_filter_check_discard(call, entry, buffer, event))
3205 __buffer_unlock_commit(buffer, event);
3208 __this_cpu_dec(user_stack_count);
3212 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3213 static void ftrace_trace_userstack(struct trace_array *tr,
3214 struct trace_buffer *buffer,
3215 unsigned int trace_ctx)
3218 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3220 #endif /* CONFIG_STACKTRACE */
3223 func_repeats_set_delta_ts(struct func_repeats_entry *entry,
3224 unsigned long long delta)
3226 entry->bottom_delta_ts = delta & U32_MAX;
3227 entry->top_delta_ts = (delta >> 32);
3230 void trace_last_func_repeats(struct trace_array *tr,
3231 struct trace_func_repeats *last_info,
3232 unsigned int trace_ctx)
3234 struct trace_buffer *buffer = tr->array_buffer.buffer;
3235 struct func_repeats_entry *entry;
3236 struct ring_buffer_event *event;
3239 event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS,
3240 sizeof(*entry), trace_ctx);
3244 delta = ring_buffer_event_time_stamp(buffer, event) -
3245 last_info->ts_last_call;
3247 entry = ring_buffer_event_data(event);
3248 entry->ip = last_info->ip;
3249 entry->parent_ip = last_info->parent_ip;
3250 entry->count = last_info->count;
3251 func_repeats_set_delta_ts(entry, delta);
3253 __buffer_unlock_commit(buffer, event);
3256 /* created for use with alloc_percpu */
3257 struct trace_buffer_struct {
3259 char buffer[4][TRACE_BUF_SIZE];
3262 static struct trace_buffer_struct __percpu *trace_percpu_buffer;
3265 * This allows for lockless recording. If we're nested too deeply, then
3266 * this returns NULL.
3268 static char *get_trace_buf(void)
3270 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3272 if (!trace_percpu_buffer || buffer->nesting >= 4)
3277 /* Interrupts must see nesting incremented before we use the buffer */
3279 return &buffer->buffer[buffer->nesting - 1][0];
3282 static void put_trace_buf(void)
3284 /* Don't let the decrement of nesting leak before this */
3286 this_cpu_dec(trace_percpu_buffer->nesting);
3289 static int alloc_percpu_trace_buffer(void)
3291 struct trace_buffer_struct __percpu *buffers;
3293 if (trace_percpu_buffer)
3296 buffers = alloc_percpu(struct trace_buffer_struct);
3297 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3300 trace_percpu_buffer = buffers;
3304 static int buffers_allocated;
3306 void trace_printk_init_buffers(void)
3308 if (buffers_allocated)
3311 if (alloc_percpu_trace_buffer())
3314 /* trace_printk() is for debug use only. Don't use it in production. */
3317 pr_warn("**********************************************************\n");
3318 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3320 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3322 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3323 pr_warn("** unsafe for production use. **\n");
3325 pr_warn("** If you see this message and you are not debugging **\n");
3326 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3328 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3329 pr_warn("**********************************************************\n");
3331 /* Expand the buffers to set size */
3332 tracing_update_buffers();
3334 buffers_allocated = 1;
3337 * trace_printk_init_buffers() can be called by modules.
3338 * If that happens, then we need to start cmdline recording
3339 * directly here. If the global_trace.buffer is already
3340 * allocated here, then this was called by module code.
3342 if (global_trace.array_buffer.buffer)
3343 tracing_start_cmdline_record();
3345 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3347 void trace_printk_start_comm(void)
3349 /* Start tracing comms if trace printk is set */
3350 if (!buffers_allocated)
3352 tracing_start_cmdline_record();
3355 static void trace_printk_start_stop_comm(int enabled)
3357 if (!buffers_allocated)
3361 tracing_start_cmdline_record();
3363 tracing_stop_cmdline_record();
3367 * trace_vbprintk - write binary msg to tracing buffer
3368 * @ip: The address of the caller
3369 * @fmt: The string format to write to the buffer
3370 * @args: Arguments for @fmt
3372 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3374 struct trace_event_call *call = &event_bprint;
3375 struct ring_buffer_event *event;
3376 struct trace_buffer *buffer;
3377 struct trace_array *tr = &global_trace;
3378 struct bprint_entry *entry;
3379 unsigned int trace_ctx;
3383 if (unlikely(tracing_selftest_running || tracing_disabled))
3386 /* Don't pollute graph traces with trace_vprintk internals */
3387 pause_graph_tracing();
3389 trace_ctx = tracing_gen_ctx();
3390 preempt_disable_notrace();
3392 tbuffer = get_trace_buf();
3398 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3400 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3403 size = sizeof(*entry) + sizeof(u32) * len;
3404 buffer = tr->array_buffer.buffer;
3405 ring_buffer_nest_start(buffer);
3406 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3410 entry = ring_buffer_event_data(event);
3414 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3415 if (!call_filter_check_discard(call, entry, buffer, event)) {
3416 __buffer_unlock_commit(buffer, event);
3417 ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL);
3421 ring_buffer_nest_end(buffer);
3426 preempt_enable_notrace();
3427 unpause_graph_tracing();
3431 EXPORT_SYMBOL_GPL(trace_vbprintk);
3435 __trace_array_vprintk(struct trace_buffer *buffer,
3436 unsigned long ip, const char *fmt, va_list args)
3438 struct trace_event_call *call = &event_print;
3439 struct ring_buffer_event *event;
3441 struct print_entry *entry;
3442 unsigned int trace_ctx;
3445 if (tracing_disabled || tracing_selftest_running)
3448 /* Don't pollute graph traces with trace_vprintk internals */
3449 pause_graph_tracing();
3451 trace_ctx = tracing_gen_ctx();
3452 preempt_disable_notrace();
3455 tbuffer = get_trace_buf();
3461 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3463 size = sizeof(*entry) + len + 1;
3464 ring_buffer_nest_start(buffer);
3465 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3469 entry = ring_buffer_event_data(event);
3472 memcpy(&entry->buf, tbuffer, len + 1);
3473 if (!call_filter_check_discard(call, entry, buffer, event)) {
3474 __buffer_unlock_commit(buffer, event);
3475 ftrace_trace_stack(&global_trace, buffer, trace_ctx, 6, NULL);
3479 ring_buffer_nest_end(buffer);
3483 preempt_enable_notrace();
3484 unpause_graph_tracing();
3490 int trace_array_vprintk(struct trace_array *tr,
3491 unsigned long ip, const char *fmt, va_list args)
3493 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3497 * trace_array_printk - Print a message to a specific instance
3498 * @tr: The instance trace_array descriptor
3499 * @ip: The instruction pointer that this is called from.
3500 * @fmt: The format to print (printf format)
3502 * If a subsystem sets up its own instance, they have the right to
3503 * printk strings into their tracing instance buffer using this
3504 * function. Note, this function will not write into the top level
3505 * buffer (use trace_printk() for that), as writing into the top level
3506 * buffer should only have events that can be individually disabled.
3507 * trace_printk() is only used for debugging a kernel, and should not
3508 * be ever incorporated in normal use.
3510 * trace_array_printk() can be used, as it will not add noise to the
3511 * top level tracing buffer.
3513 * Note, trace_array_init_printk() must be called on @tr before this
3517 int trace_array_printk(struct trace_array *tr,
3518 unsigned long ip, const char *fmt, ...)
3526 /* This is only allowed for created instances */
3527 if (tr == &global_trace)
3530 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3534 ret = trace_array_vprintk(tr, ip, fmt, ap);
3538 EXPORT_SYMBOL_GPL(trace_array_printk);
3541 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3542 * @tr: The trace array to initialize the buffers for
3544 * As trace_array_printk() only writes into instances, they are OK to
3545 * have in the kernel (unlike trace_printk()). This needs to be called
3546 * before trace_array_printk() can be used on a trace_array.
3548 int trace_array_init_printk(struct trace_array *tr)
3553 /* This is only allowed for created instances */
3554 if (tr == &global_trace)
3557 return alloc_percpu_trace_buffer();
3559 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3562 int trace_array_printk_buf(struct trace_buffer *buffer,
3563 unsigned long ip, const char *fmt, ...)
3568 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3572 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3578 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3580 return trace_array_vprintk(&global_trace, ip, fmt, args);
3582 EXPORT_SYMBOL_GPL(trace_vprintk);
3584 static void trace_iterator_increment(struct trace_iterator *iter)
3586 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3590 ring_buffer_iter_advance(buf_iter);
3593 static struct trace_entry *
3594 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3595 unsigned long *lost_events)
3597 struct ring_buffer_event *event;
3598 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3601 event = ring_buffer_iter_peek(buf_iter, ts);
3603 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3604 (unsigned long)-1 : 0;
3606 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3611 iter->ent_size = ring_buffer_event_length(event);
3612 return ring_buffer_event_data(event);
3618 static struct trace_entry *
3619 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3620 unsigned long *missing_events, u64 *ent_ts)
3622 struct trace_buffer *buffer = iter->array_buffer->buffer;
3623 struct trace_entry *ent, *next = NULL;
3624 unsigned long lost_events = 0, next_lost = 0;
3625 int cpu_file = iter->cpu_file;
3626 u64 next_ts = 0, ts;
3632 * If we are in a per_cpu trace file, don't bother by iterating over
3633 * all cpu and peek directly.
3635 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3636 if (ring_buffer_empty_cpu(buffer, cpu_file))
3638 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3640 *ent_cpu = cpu_file;
3645 for_each_tracing_cpu(cpu) {
3647 if (ring_buffer_empty_cpu(buffer, cpu))
3650 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3653 * Pick the entry with the smallest timestamp:
3655 if (ent && (!next || ts < next_ts)) {
3659 next_lost = lost_events;
3660 next_size = iter->ent_size;
3664 iter->ent_size = next_size;
3667 *ent_cpu = next_cpu;
3673 *missing_events = next_lost;
3678 #define STATIC_FMT_BUF_SIZE 128
3679 static char static_fmt_buf[STATIC_FMT_BUF_SIZE];
3681 static char *trace_iter_expand_format(struct trace_iterator *iter)
3686 * iter->tr is NULL when used with tp_printk, which makes
3687 * this get called where it is not safe to call krealloc().
3689 if (!iter->tr || iter->fmt == static_fmt_buf)
3692 tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE,
3695 iter->fmt_size += STATIC_FMT_BUF_SIZE;
3702 /* Returns true if the string is safe to dereference from an event */
3703 static bool trace_safe_str(struct trace_iterator *iter, const char *str,
3706 unsigned long addr = (unsigned long)str;
3707 struct trace_event *trace_event;
3708 struct trace_event_call *event;
3710 /* Ignore strings with no length */
3714 /* OK if part of the event data */
3715 if ((addr >= (unsigned long)iter->ent) &&
3716 (addr < (unsigned long)iter->ent + iter->ent_size))
3719 /* OK if part of the temp seq buffer */
3720 if ((addr >= (unsigned long)iter->tmp_seq.buffer) &&
3721 (addr < (unsigned long)iter->tmp_seq.buffer + PAGE_SIZE))
3724 /* Core rodata can not be freed */
3725 if (is_kernel_rodata(addr))
3728 if (trace_is_tracepoint_string(str))
3732 * Now this could be a module event, referencing core module
3733 * data, which is OK.
3738 trace_event = ftrace_find_event(iter->ent->type);
3742 event = container_of(trace_event, struct trace_event_call, event);
3743 if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module)
3746 /* Would rather have rodata, but this will suffice */
3747 if (within_module_core(addr, event->module))
3753 static const char *show_buffer(struct trace_seq *s)
3755 struct seq_buf *seq = &s->seq;
3757 seq_buf_terminate(seq);
3762 static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
3764 static int test_can_verify_check(const char *fmt, ...)
3771 * The verifier is dependent on vsnprintf() modifies the va_list
3772 * passed to it, where it is sent as a reference. Some architectures
3773 * (like x86_32) passes it by value, which means that vsnprintf()
3774 * does not modify the va_list passed to it, and the verifier
3775 * would then need to be able to understand all the values that
3776 * vsnprintf can use. If it is passed by value, then the verifier
3780 vsnprintf(buf, 16, "%d", ap);
3781 ret = va_arg(ap, int);
3787 static void test_can_verify(void)
3789 if (!test_can_verify_check("%d %d", 0, 1)) {
3790 pr_info("trace event string verifier disabled\n");
3791 static_branch_inc(&trace_no_verify);
3796 * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
3797 * @iter: The iterator that holds the seq buffer and the event being printed
3798 * @fmt: The format used to print the event
3799 * @ap: The va_list holding the data to print from @fmt.
3801 * This writes the data into the @iter->seq buffer using the data from
3802 * @fmt and @ap. If the format has a %s, then the source of the string
3803 * is examined to make sure it is safe to print, otherwise it will
3804 * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
3807 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
3810 const char *p = fmt;
3814 if (WARN_ON_ONCE(!fmt))
3817 if (static_branch_unlikely(&trace_no_verify))
3820 /* Don't bother checking when doing a ftrace_dump() */
3821 if (iter->fmt == static_fmt_buf)
3830 /* We only care about %s and variants */
3831 for (i = 0; p[i]; i++) {
3832 if (i + 1 >= iter->fmt_size) {
3834 * If we can't expand the copy buffer,
3837 if (!trace_iter_expand_format(iter))
3841 if (p[i] == '\\' && p[i+1]) {
3846 /* Need to test cases like %08.*s */
3847 for (j = 1; p[i+j]; j++) {
3848 if (isdigit(p[i+j]) ||
3851 if (p[i+j] == '*') {
3863 /* If no %s found then just print normally */
3867 /* Copy up to the %s, and print that */
3868 strncpy(iter->fmt, p, i);
3869 iter->fmt[i] = '\0';
3870 trace_seq_vprintf(&iter->seq, iter->fmt, ap);
3873 * If iter->seq is full, the above call no longer guarantees
3874 * that ap is in sync with fmt processing, and further calls
3875 * to va_arg() can return wrong positional arguments.
3877 * Ensure that ap is no longer used in this case.
3879 if (iter->seq.full) {
3885 len = va_arg(ap, int);
3887 /* The ap now points to the string data of the %s */
3888 str = va_arg(ap, const char *);
3891 * If you hit this warning, it is likely that the
3892 * trace event in question used %s on a string that
3893 * was saved at the time of the event, but may not be
3894 * around when the trace is read. Use __string(),
3895 * __assign_str() and __get_str() helpers in the TRACE_EVENT()
3896 * instead. See samples/trace_events/trace-events-sample.h
3899 if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
3900 "fmt: '%s' current_buffer: '%s'",
3901 fmt, show_buffer(&iter->seq))) {
3904 /* Try to safely read the string */
3906 if (len + 1 > iter->fmt_size)
3907 len = iter->fmt_size - 1;
3910 ret = copy_from_kernel_nofault(iter->fmt, str, len);
3914 ret = strncpy_from_kernel_nofault(iter->fmt, str,
3918 trace_seq_printf(&iter->seq, "(0x%px)", str);
3920 trace_seq_printf(&iter->seq, "(0x%px:%s)",
3922 str = "[UNSAFE-MEMORY]";
3923 strcpy(iter->fmt, "%s");
3925 strncpy(iter->fmt, p + i, j + 1);
3926 iter->fmt[j+1] = '\0';
3929 trace_seq_printf(&iter->seq, iter->fmt, len, str);
3931 trace_seq_printf(&iter->seq, iter->fmt, str);
3937 trace_seq_vprintf(&iter->seq, p, ap);
3940 const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
3942 const char *p, *new_fmt;
3945 if (WARN_ON_ONCE(!fmt))
3948 if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR)
3952 new_fmt = q = iter->fmt;
3954 if (unlikely(q - new_fmt + 3 > iter->fmt_size)) {
3955 if (!trace_iter_expand_format(iter))
3958 q += iter->fmt - new_fmt;
3959 new_fmt = iter->fmt;
3964 /* Replace %p with %px */
3968 } else if (p[0] == 'p' && !isalnum(p[1])) {
3979 #define STATIC_TEMP_BUF_SIZE 128
3980 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3982 /* Find the next real entry, without updating the iterator itself */
3983 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3984 int *ent_cpu, u64 *ent_ts)
3986 /* __find_next_entry will reset ent_size */
3987 int ent_size = iter->ent_size;
3988 struct trace_entry *entry;
3991 * If called from ftrace_dump(), then the iter->temp buffer
3992 * will be the static_temp_buf and not created from kmalloc.
3993 * If the entry size is greater than the buffer, we can
3994 * not save it. Just return NULL in that case. This is only
3995 * used to add markers when two consecutive events' time
3996 * stamps have a large delta. See trace_print_lat_context()
3998 if (iter->temp == static_temp_buf &&
3999 STATIC_TEMP_BUF_SIZE < ent_size)
4003 * The __find_next_entry() may call peek_next_entry(), which may
4004 * call ring_buffer_peek() that may make the contents of iter->ent
4005 * undefined. Need to copy iter->ent now.
4007 if (iter->ent && iter->ent != iter->temp) {
4008 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
4009 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
4011 temp = kmalloc(iter->ent_size, GFP_KERNEL);
4016 iter->temp_size = iter->ent_size;
4018 memcpy(iter->temp, iter->ent, iter->ent_size);
4019 iter->ent = iter->temp;
4021 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
4022 /* Put back the original ent_size */
4023 iter->ent_size = ent_size;
4028 /* Find the next real entry, and increment the iterator to the next entry */
4029 void *trace_find_next_entry_inc(struct trace_iterator *iter)
4031 iter->ent = __find_next_entry(iter, &iter->cpu,
4032 &iter->lost_events, &iter->ts);
4035 trace_iterator_increment(iter);
4037 return iter->ent ? iter : NULL;
4040 static void trace_consume(struct trace_iterator *iter)
4042 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
4043 &iter->lost_events);
4046 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
4048 struct trace_iterator *iter = m->private;
4052 WARN_ON_ONCE(iter->leftover);
4056 /* can't go backwards */
4061 ent = trace_find_next_entry_inc(iter);
4065 while (ent && iter->idx < i)
4066 ent = trace_find_next_entry_inc(iter);
4073 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
4075 struct ring_buffer_iter *buf_iter;
4076 unsigned long entries = 0;
4079 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
4081 buf_iter = trace_buffer_iter(iter, cpu);
4085 ring_buffer_iter_reset(buf_iter);
4088 * We could have the case with the max latency tracers
4089 * that a reset never took place on a cpu. This is evident
4090 * by the timestamp being before the start of the buffer.
4092 while (ring_buffer_iter_peek(buf_iter, &ts)) {
4093 if (ts >= iter->array_buffer->time_start)
4096 ring_buffer_iter_advance(buf_iter);
4099 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
4103 * The current tracer is copied to avoid a global locking
4106 static void *s_start(struct seq_file *m, loff_t *pos)
4108 struct trace_iterator *iter = m->private;
4109 struct trace_array *tr = iter->tr;
4110 int cpu_file = iter->cpu_file;
4116 * copy the tracer to avoid using a global lock all around.
4117 * iter->trace is a copy of current_trace, the pointer to the
4118 * name may be used instead of a strcmp(), as iter->trace->name
4119 * will point to the same string as current_trace->name.
4121 mutex_lock(&trace_types_lock);
4122 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
4123 *iter->trace = *tr->current_trace;
4124 mutex_unlock(&trace_types_lock);
4126 #ifdef CONFIG_TRACER_MAX_TRACE
4127 if (iter->snapshot && iter->trace->use_max_tr)
4128 return ERR_PTR(-EBUSY);
4131 if (*pos != iter->pos) {
4136 if (cpu_file == RING_BUFFER_ALL_CPUS) {
4137 for_each_tracing_cpu(cpu)
4138 tracing_iter_reset(iter, cpu);
4140 tracing_iter_reset(iter, cpu_file);
4143 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
4148 * If we overflowed the seq_file before, then we want
4149 * to just reuse the trace_seq buffer again.
4155 p = s_next(m, p, &l);
4159 trace_event_read_lock();
4160 trace_access_lock(cpu_file);
4164 static void s_stop(struct seq_file *m, void *p)
4166 struct trace_iterator *iter = m->private;
4168 #ifdef CONFIG_TRACER_MAX_TRACE
4169 if (iter->snapshot && iter->trace->use_max_tr)
4173 trace_access_unlock(iter->cpu_file);
4174 trace_event_read_unlock();
4178 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
4179 unsigned long *entries, int cpu)
4181 unsigned long count;
4183 count = ring_buffer_entries_cpu(buf->buffer, cpu);
4185 * If this buffer has skipped entries, then we hold all
4186 * entries for the trace and we need to ignore the
4187 * ones before the time stamp.
4189 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
4190 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
4191 /* total is the same as the entries */
4195 ring_buffer_overrun_cpu(buf->buffer, cpu);
4200 get_total_entries(struct array_buffer *buf,
4201 unsigned long *total, unsigned long *entries)
4209 for_each_tracing_cpu(cpu) {
4210 get_total_entries_cpu(buf, &t, &e, cpu);
4216 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
4218 unsigned long total, entries;
4223 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
4228 unsigned long trace_total_entries(struct trace_array *tr)
4230 unsigned long total, entries;
4235 get_total_entries(&tr->array_buffer, &total, &entries);
4240 static void print_lat_help_header(struct seq_file *m)
4242 seq_puts(m, "# _------=> CPU# \n"
4243 "# / _-----=> irqs-off/BH-disabled\n"
4244 "# | / _----=> need-resched \n"
4245 "# || / _---=> hardirq/softirq \n"
4246 "# ||| / _--=> preempt-depth \n"
4247 "# |||| / _-=> migrate-disable \n"
4248 "# ||||| / delay \n"
4249 "# cmd pid |||||| time | caller \n"
4250 "# \\ / |||||| \\ | / \n");
4253 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
4255 unsigned long total;
4256 unsigned long entries;
4258 get_total_entries(buf, &total, &entries);
4259 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
4260 entries, total, num_online_cpus());
4264 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
4267 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4269 print_event_info(buf, m);
4271 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
4272 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
4275 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
4278 bool tgid = flags & TRACE_ITER_RECORD_TGID;
4279 static const char space[] = " ";
4280 int prec = tgid ? 12 : 2;
4282 print_event_info(buf, m);
4284 seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space);
4285 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
4286 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
4287 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
4288 seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space);
4289 seq_printf(m, "# %.*s|||| / delay\n", prec, space);
4290 seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID ");
4291 seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | ");
4295 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
4297 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
4298 struct array_buffer *buf = iter->array_buffer;
4299 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
4300 struct tracer *type = iter->trace;
4301 unsigned long entries;
4302 unsigned long total;
4303 const char *name = type->name;
4305 get_total_entries(buf, &total, &entries);
4307 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
4309 seq_puts(m, "# -----------------------------------"
4310 "---------------------------------\n");
4311 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
4312 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
4313 nsecs_to_usecs(data->saved_latency),
4317 preempt_model_none() ? "server" :
4318 preempt_model_voluntary() ? "desktop" :
4319 preempt_model_full() ? "preempt" :
4320 preempt_model_rt() ? "preempt_rt" :
4322 /* These are reserved for later use */
4325 seq_printf(m, " #P:%d)\n", num_online_cpus());
4329 seq_puts(m, "# -----------------\n");
4330 seq_printf(m, "# | task: %.16s-%d "
4331 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
4332 data->comm, data->pid,
4333 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
4334 data->policy, data->rt_priority);
4335 seq_puts(m, "# -----------------\n");
4337 if (data->critical_start) {
4338 seq_puts(m, "# => started at: ");
4339 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
4340 trace_print_seq(m, &iter->seq);
4341 seq_puts(m, "\n# => ended at: ");
4342 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
4343 trace_print_seq(m, &iter->seq);
4344 seq_puts(m, "\n#\n");
4350 static void test_cpu_buff_start(struct trace_iterator *iter)
4352 struct trace_seq *s = &iter->seq;
4353 struct trace_array *tr = iter->tr;
4355 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
4358 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
4361 if (cpumask_available(iter->started) &&
4362 cpumask_test_cpu(iter->cpu, iter->started))
4365 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
4368 if (cpumask_available(iter->started))
4369 cpumask_set_cpu(iter->cpu, iter->started);
4371 /* Don't print started cpu buffer for the first entry of the trace */
4373 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
4377 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
4379 struct trace_array *tr = iter->tr;
4380 struct trace_seq *s = &iter->seq;
4381 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4382 struct trace_entry *entry;
4383 struct trace_event *event;
4387 test_cpu_buff_start(iter);
4389 event = ftrace_find_event(entry->type);
4391 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4392 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4393 trace_print_lat_context(iter);
4395 trace_print_context(iter);
4398 if (trace_seq_has_overflowed(s))
4399 return TRACE_TYPE_PARTIAL_LINE;
4402 return event->funcs->trace(iter, sym_flags, event);
4404 trace_seq_printf(s, "Unknown type %d\n", entry->type);
4406 return trace_handle_return(s);
4409 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
4411 struct trace_array *tr = iter->tr;
4412 struct trace_seq *s = &iter->seq;
4413 struct trace_entry *entry;
4414 struct trace_event *event;
4418 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
4419 trace_seq_printf(s, "%d %d %llu ",
4420 entry->pid, iter->cpu, iter->ts);
4422 if (trace_seq_has_overflowed(s))
4423 return TRACE_TYPE_PARTIAL_LINE;
4425 event = ftrace_find_event(entry->type);
4427 return event->funcs->raw(iter, 0, event);
4429 trace_seq_printf(s, "%d ?\n", entry->type);
4431 return trace_handle_return(s);
4434 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4436 struct trace_array *tr = iter->tr;
4437 struct trace_seq *s = &iter->seq;
4438 unsigned char newline = '\n';
4439 struct trace_entry *entry;
4440 struct trace_event *event;
4444 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4445 SEQ_PUT_HEX_FIELD(s, entry->pid);
4446 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4447 SEQ_PUT_HEX_FIELD(s, iter->ts);
4448 if (trace_seq_has_overflowed(s))
4449 return TRACE_TYPE_PARTIAL_LINE;
4452 event = ftrace_find_event(entry->type);
4454 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4455 if (ret != TRACE_TYPE_HANDLED)
4459 SEQ_PUT_FIELD(s, newline);
4461 return trace_handle_return(s);
4464 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4466 struct trace_array *tr = iter->tr;
4467 struct trace_seq *s = &iter->seq;
4468 struct trace_entry *entry;
4469 struct trace_event *event;
4473 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4474 SEQ_PUT_FIELD(s, entry->pid);
4475 SEQ_PUT_FIELD(s, iter->cpu);
4476 SEQ_PUT_FIELD(s, iter->ts);
4477 if (trace_seq_has_overflowed(s))
4478 return TRACE_TYPE_PARTIAL_LINE;
4481 event = ftrace_find_event(entry->type);
4482 return event ? event->funcs->binary(iter, 0, event) :
4486 int trace_empty(struct trace_iterator *iter)
4488 struct ring_buffer_iter *buf_iter;
4491 /* If we are looking at one CPU buffer, only check that one */
4492 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4493 cpu = iter->cpu_file;
4494 buf_iter = trace_buffer_iter(iter, cpu);
4496 if (!ring_buffer_iter_empty(buf_iter))
4499 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4505 for_each_tracing_cpu(cpu) {
4506 buf_iter = trace_buffer_iter(iter, cpu);
4508 if (!ring_buffer_iter_empty(buf_iter))
4511 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4519 /* Called with trace_event_read_lock() held. */
4520 enum print_line_t print_trace_line(struct trace_iterator *iter)
4522 struct trace_array *tr = iter->tr;
4523 unsigned long trace_flags = tr->trace_flags;
4524 enum print_line_t ret;
4526 if (iter->lost_events) {
4527 if (iter->lost_events == (unsigned long)-1)
4528 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4531 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4532 iter->cpu, iter->lost_events);
4533 if (trace_seq_has_overflowed(&iter->seq))
4534 return TRACE_TYPE_PARTIAL_LINE;
4537 if (iter->trace && iter->trace->print_line) {
4538 ret = iter->trace->print_line(iter);
4539 if (ret != TRACE_TYPE_UNHANDLED)
4543 if (iter->ent->type == TRACE_BPUTS &&
4544 trace_flags & TRACE_ITER_PRINTK &&
4545 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4546 return trace_print_bputs_msg_only(iter);
4548 if (iter->ent->type == TRACE_BPRINT &&
4549 trace_flags & TRACE_ITER_PRINTK &&
4550 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4551 return trace_print_bprintk_msg_only(iter);
4553 if (iter->ent->type == TRACE_PRINT &&
4554 trace_flags & TRACE_ITER_PRINTK &&
4555 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4556 return trace_print_printk_msg_only(iter);
4558 if (trace_flags & TRACE_ITER_BIN)
4559 return print_bin_fmt(iter);
4561 if (trace_flags & TRACE_ITER_HEX)
4562 return print_hex_fmt(iter);
4564 if (trace_flags & TRACE_ITER_RAW)
4565 return print_raw_fmt(iter);
4567 return print_trace_fmt(iter);
4570 void trace_latency_header(struct seq_file *m)
4572 struct trace_iterator *iter = m->private;
4573 struct trace_array *tr = iter->tr;
4575 /* print nothing if the buffers are empty */
4576 if (trace_empty(iter))
4579 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4580 print_trace_header(m, iter);
4582 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4583 print_lat_help_header(m);
4586 void trace_default_header(struct seq_file *m)
4588 struct trace_iterator *iter = m->private;
4589 struct trace_array *tr = iter->tr;
4590 unsigned long trace_flags = tr->trace_flags;
4592 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4595 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4596 /* print nothing if the buffers are empty */
4597 if (trace_empty(iter))
4599 print_trace_header(m, iter);
4600 if (!(trace_flags & TRACE_ITER_VERBOSE))
4601 print_lat_help_header(m);
4603 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4604 if (trace_flags & TRACE_ITER_IRQ_INFO)
4605 print_func_help_header_irq(iter->array_buffer,
4608 print_func_help_header(iter->array_buffer, m,
4614 static void test_ftrace_alive(struct seq_file *m)
4616 if (!ftrace_is_dead())
4618 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4619 "# MAY BE MISSING FUNCTION EVENTS\n");
4622 #ifdef CONFIG_TRACER_MAX_TRACE
4623 static void show_snapshot_main_help(struct seq_file *m)
4625 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4626 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4627 "# Takes a snapshot of the main buffer.\n"
4628 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4629 "# (Doesn't have to be '2' works with any number that\n"
4630 "# is not a '0' or '1')\n");
4633 static void show_snapshot_percpu_help(struct seq_file *m)
4635 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4636 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4637 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4638 "# Takes a snapshot of the main buffer for this cpu.\n");
4640 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4641 "# Must use main snapshot file to allocate.\n");
4643 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4644 "# (Doesn't have to be '2' works with any number that\n"
4645 "# is not a '0' or '1')\n");
4648 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4650 if (iter->tr->allocated_snapshot)
4651 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4653 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4655 seq_puts(m, "# Snapshot commands:\n");
4656 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4657 show_snapshot_main_help(m);
4659 show_snapshot_percpu_help(m);
4662 /* Should never be called */
4663 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4666 static int s_show(struct seq_file *m, void *v)
4668 struct trace_iterator *iter = v;
4671 if (iter->ent == NULL) {
4673 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4675 test_ftrace_alive(m);
4677 if (iter->snapshot && trace_empty(iter))
4678 print_snapshot_help(m, iter);
4679 else if (iter->trace && iter->trace->print_header)
4680 iter->trace->print_header(m);
4682 trace_default_header(m);
4684 } else if (iter->leftover) {
4686 * If we filled the seq_file buffer earlier, we
4687 * want to just show it now.
4689 ret = trace_print_seq(m, &iter->seq);
4691 /* ret should this time be zero, but you never know */
4692 iter->leftover = ret;
4695 print_trace_line(iter);
4696 ret = trace_print_seq(m, &iter->seq);
4698 * If we overflow the seq_file buffer, then it will
4699 * ask us for this data again at start up.
4701 * ret is 0 if seq_file write succeeded.
4704 iter->leftover = ret;
4711 * Should be used after trace_array_get(), trace_types_lock
4712 * ensures that i_cdev was already initialized.
4714 static inline int tracing_get_cpu(struct inode *inode)
4716 if (inode->i_cdev) /* See trace_create_cpu_file() */
4717 return (long)inode->i_cdev - 1;
4718 return RING_BUFFER_ALL_CPUS;
4721 static const struct seq_operations tracer_seq_ops = {
4728 static struct trace_iterator *
4729 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4731 struct trace_array *tr = inode->i_private;
4732 struct trace_iterator *iter;
4735 if (tracing_disabled)
4736 return ERR_PTR(-ENODEV);
4738 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4740 return ERR_PTR(-ENOMEM);
4742 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4744 if (!iter->buffer_iter)
4748 * trace_find_next_entry() may need to save off iter->ent.
4749 * It will place it into the iter->temp buffer. As most
4750 * events are less than 128, allocate a buffer of that size.
4751 * If one is greater, then trace_find_next_entry() will
4752 * allocate a new buffer to adjust for the bigger iter->ent.
4753 * It's not critical if it fails to get allocated here.
4755 iter->temp = kmalloc(128, GFP_KERNEL);
4757 iter->temp_size = 128;
4760 * trace_event_printf() may need to modify given format
4761 * string to replace %p with %px so that it shows real address
4762 * instead of hash value. However, that is only for the event
4763 * tracing, other tracer may not need. Defer the allocation
4764 * until it is needed.
4770 * We make a copy of the current tracer to avoid concurrent
4771 * changes on it while we are reading.
4773 mutex_lock(&trace_types_lock);
4774 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4778 *iter->trace = *tr->current_trace;
4780 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4785 #ifdef CONFIG_TRACER_MAX_TRACE
4786 /* Currently only the top directory has a snapshot */
4787 if (tr->current_trace->print_max || snapshot)
4788 iter->array_buffer = &tr->max_buffer;
4791 iter->array_buffer = &tr->array_buffer;
4792 iter->snapshot = snapshot;
4794 iter->cpu_file = tracing_get_cpu(inode);
4795 mutex_init(&iter->mutex);
4797 /* Notify the tracer early; before we stop tracing. */
4798 if (iter->trace->open)
4799 iter->trace->open(iter);
4801 /* Annotate start of buffers if we had overruns */
4802 if (ring_buffer_overruns(iter->array_buffer->buffer))
4803 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4805 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4806 if (trace_clocks[tr->clock_id].in_ns)
4807 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4810 * If pause-on-trace is enabled, then stop the trace while
4811 * dumping, unless this is the "snapshot" file
4813 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4814 tracing_stop_tr(tr);
4816 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4817 for_each_tracing_cpu(cpu) {
4818 iter->buffer_iter[cpu] =
4819 ring_buffer_read_prepare(iter->array_buffer->buffer,
4822 ring_buffer_read_prepare_sync();
4823 for_each_tracing_cpu(cpu) {
4824 ring_buffer_read_start(iter->buffer_iter[cpu]);
4825 tracing_iter_reset(iter, cpu);
4828 cpu = iter->cpu_file;
4829 iter->buffer_iter[cpu] =
4830 ring_buffer_read_prepare(iter->array_buffer->buffer,
4832 ring_buffer_read_prepare_sync();
4833 ring_buffer_read_start(iter->buffer_iter[cpu]);
4834 tracing_iter_reset(iter, cpu);
4837 mutex_unlock(&trace_types_lock);
4842 mutex_unlock(&trace_types_lock);
4845 kfree(iter->buffer_iter);
4847 seq_release_private(inode, file);
4848 return ERR_PTR(-ENOMEM);
4851 int tracing_open_generic(struct inode *inode, struct file *filp)
4855 ret = tracing_check_open_get_tr(NULL);
4859 filp->private_data = inode->i_private;
4863 bool tracing_is_disabled(void)
4865 return (tracing_disabled) ? true: false;
4869 * Open and update trace_array ref count.
4870 * Must have the current trace_array passed to it.
4872 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4874 struct trace_array *tr = inode->i_private;
4877 ret = tracing_check_open_get_tr(tr);
4881 filp->private_data = inode->i_private;
4886 static int tracing_mark_open(struct inode *inode, struct file *filp)
4888 stream_open(inode, filp);
4889 return tracing_open_generic_tr(inode, filp);
4892 static int tracing_release(struct inode *inode, struct file *file)
4894 struct trace_array *tr = inode->i_private;
4895 struct seq_file *m = file->private_data;
4896 struct trace_iterator *iter;
4899 if (!(file->f_mode & FMODE_READ)) {
4900 trace_array_put(tr);
4904 /* Writes do not use seq_file */
4906 mutex_lock(&trace_types_lock);
4908 for_each_tracing_cpu(cpu) {
4909 if (iter->buffer_iter[cpu])
4910 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4913 if (iter->trace && iter->trace->close)
4914 iter->trace->close(iter);
4916 if (!iter->snapshot && tr->stop_count)
4917 /* reenable tracing if it was previously enabled */
4918 tracing_start_tr(tr);
4920 __trace_array_put(tr);
4922 mutex_unlock(&trace_types_lock);
4924 mutex_destroy(&iter->mutex);
4925 free_cpumask_var(iter->started);
4929 kfree(iter->buffer_iter);
4930 seq_release_private(inode, file);
4935 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4937 struct trace_array *tr = inode->i_private;
4939 trace_array_put(tr);
4943 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4945 struct trace_array *tr = inode->i_private;
4947 trace_array_put(tr);
4949 return single_release(inode, file);
4952 static int tracing_open(struct inode *inode, struct file *file)
4954 struct trace_array *tr = inode->i_private;
4955 struct trace_iterator *iter;
4958 ret = tracing_check_open_get_tr(tr);
4962 /* If this file was open for write, then erase contents */
4963 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4964 int cpu = tracing_get_cpu(inode);
4965 struct array_buffer *trace_buf = &tr->array_buffer;
4967 #ifdef CONFIG_TRACER_MAX_TRACE
4968 if (tr->current_trace->print_max)
4969 trace_buf = &tr->max_buffer;
4972 if (cpu == RING_BUFFER_ALL_CPUS)
4973 tracing_reset_online_cpus(trace_buf);
4975 tracing_reset_cpu(trace_buf, cpu);
4978 if (file->f_mode & FMODE_READ) {
4979 iter = __tracing_open(inode, file, false);
4981 ret = PTR_ERR(iter);
4982 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4983 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4987 trace_array_put(tr);
4993 * Some tracers are not suitable for instance buffers.
4994 * A tracer is always available for the global array (toplevel)
4995 * or if it explicitly states that it is.
4998 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
5000 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
5003 /* Find the next tracer that this trace array may use */
5004 static struct tracer *
5005 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
5007 while (t && !trace_ok_for_array(t, tr))
5014 t_next(struct seq_file *m, void *v, loff_t *pos)
5016 struct trace_array *tr = m->private;
5017 struct tracer *t = v;
5022 t = get_tracer_for_array(tr, t->next);
5027 static void *t_start(struct seq_file *m, loff_t *pos)
5029 struct trace_array *tr = m->private;
5033 mutex_lock(&trace_types_lock);
5035 t = get_tracer_for_array(tr, trace_types);
5036 for (; t && l < *pos; t = t_next(m, t, &l))
5042 static void t_stop(struct seq_file *m, void *p)
5044 mutex_unlock(&trace_types_lock);
5047 static int t_show(struct seq_file *m, void *v)
5049 struct tracer *t = v;
5054 seq_puts(m, t->name);
5063 static const struct seq_operations show_traces_seq_ops = {
5070 static int show_traces_open(struct inode *inode, struct file *file)
5072 struct trace_array *tr = inode->i_private;
5076 ret = tracing_check_open_get_tr(tr);
5080 ret = seq_open(file, &show_traces_seq_ops);
5082 trace_array_put(tr);
5086 m = file->private_data;
5092 static int show_traces_release(struct inode *inode, struct file *file)
5094 struct trace_array *tr = inode->i_private;
5096 trace_array_put(tr);
5097 return seq_release(inode, file);
5101 tracing_write_stub(struct file *filp, const char __user *ubuf,
5102 size_t count, loff_t *ppos)
5107 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
5111 if (file->f_mode & FMODE_READ)
5112 ret = seq_lseek(file, offset, whence);
5114 file->f_pos = ret = 0;
5119 static const struct file_operations tracing_fops = {
5120 .open = tracing_open,
5122 .write = tracing_write_stub,
5123 .llseek = tracing_lseek,
5124 .release = tracing_release,
5127 static const struct file_operations show_traces_fops = {
5128 .open = show_traces_open,
5130 .llseek = seq_lseek,
5131 .release = show_traces_release,
5135 tracing_cpumask_read(struct file *filp, char __user *ubuf,
5136 size_t count, loff_t *ppos)
5138 struct trace_array *tr = file_inode(filp)->i_private;
5142 len = snprintf(NULL, 0, "%*pb\n",
5143 cpumask_pr_args(tr->tracing_cpumask)) + 1;
5144 mask_str = kmalloc(len, GFP_KERNEL);
5148 len = snprintf(mask_str, len, "%*pb\n",
5149 cpumask_pr_args(tr->tracing_cpumask));
5154 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
5162 int tracing_set_cpumask(struct trace_array *tr,
5163 cpumask_var_t tracing_cpumask_new)
5170 local_irq_disable();
5171 arch_spin_lock(&tr->max_lock);
5172 for_each_tracing_cpu(cpu) {
5174 * Increase/decrease the disabled counter if we are
5175 * about to flip a bit in the cpumask:
5177 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5178 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5179 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5180 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
5182 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
5183 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
5184 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
5185 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
5188 arch_spin_unlock(&tr->max_lock);
5191 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
5197 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
5198 size_t count, loff_t *ppos)
5200 struct trace_array *tr = file_inode(filp)->i_private;
5201 cpumask_var_t tracing_cpumask_new;
5204 if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
5207 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
5211 err = tracing_set_cpumask(tr, tracing_cpumask_new);
5215 free_cpumask_var(tracing_cpumask_new);
5220 free_cpumask_var(tracing_cpumask_new);
5225 static const struct file_operations tracing_cpumask_fops = {
5226 .open = tracing_open_generic_tr,
5227 .read = tracing_cpumask_read,
5228 .write = tracing_cpumask_write,
5229 .release = tracing_release_generic_tr,
5230 .llseek = generic_file_llseek,
5233 static int tracing_trace_options_show(struct seq_file *m, void *v)
5235 struct tracer_opt *trace_opts;
5236 struct trace_array *tr = m->private;
5240 mutex_lock(&trace_types_lock);
5241 tracer_flags = tr->current_trace->flags->val;
5242 trace_opts = tr->current_trace->flags->opts;
5244 for (i = 0; trace_options[i]; i++) {
5245 if (tr->trace_flags & (1 << i))
5246 seq_printf(m, "%s\n", trace_options[i]);
5248 seq_printf(m, "no%s\n", trace_options[i]);
5251 for (i = 0; trace_opts[i].name; i++) {
5252 if (tracer_flags & trace_opts[i].bit)
5253 seq_printf(m, "%s\n", trace_opts[i].name);
5255 seq_printf(m, "no%s\n", trace_opts[i].name);
5257 mutex_unlock(&trace_types_lock);
5262 static int __set_tracer_option(struct trace_array *tr,
5263 struct tracer_flags *tracer_flags,
5264 struct tracer_opt *opts, int neg)
5266 struct tracer *trace = tracer_flags->trace;
5269 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
5274 tracer_flags->val &= ~opts->bit;
5276 tracer_flags->val |= opts->bit;
5280 /* Try to assign a tracer specific option */
5281 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
5283 struct tracer *trace = tr->current_trace;
5284 struct tracer_flags *tracer_flags = trace->flags;
5285 struct tracer_opt *opts = NULL;
5288 for (i = 0; tracer_flags->opts[i].name; i++) {
5289 opts = &tracer_flags->opts[i];
5291 if (strcmp(cmp, opts->name) == 0)
5292 return __set_tracer_option(tr, trace->flags, opts, neg);
5298 /* Some tracers require overwrite to stay enabled */
5299 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
5301 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
5307 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
5311 if ((mask == TRACE_ITER_RECORD_TGID) ||
5312 (mask == TRACE_ITER_RECORD_CMD))
5313 lockdep_assert_held(&event_mutex);
5315 /* do nothing if flag is already set */
5316 if (!!(tr->trace_flags & mask) == !!enabled)
5319 /* Give the tracer a chance to approve the change */
5320 if (tr->current_trace->flag_changed)
5321 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
5325 tr->trace_flags |= mask;
5327 tr->trace_flags &= ~mask;
5329 if (mask == TRACE_ITER_RECORD_CMD)
5330 trace_event_enable_cmd_record(enabled);
5332 if (mask == TRACE_ITER_RECORD_TGID) {
5334 tgid_map_max = pid_max;
5335 map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
5339 * Pairs with smp_load_acquire() in
5340 * trace_find_tgid_ptr() to ensure that if it observes
5341 * the tgid_map we just allocated then it also observes
5342 * the corresponding tgid_map_max value.
5344 smp_store_release(&tgid_map, map);
5347 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
5351 trace_event_enable_tgid_record(enabled);
5354 if (mask == TRACE_ITER_EVENT_FORK)
5355 trace_event_follow_fork(tr, enabled);
5357 if (mask == TRACE_ITER_FUNC_FORK)
5358 ftrace_pid_follow_fork(tr, enabled);
5360 if (mask == TRACE_ITER_OVERWRITE) {
5361 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
5362 #ifdef CONFIG_TRACER_MAX_TRACE
5363 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
5367 if (mask == TRACE_ITER_PRINTK) {
5368 trace_printk_start_stop_comm(enabled);
5369 trace_printk_control(enabled);
5375 int trace_set_options(struct trace_array *tr, char *option)
5380 size_t orig_len = strlen(option);
5383 cmp = strstrip(option);
5385 len = str_has_prefix(cmp, "no");
5391 mutex_lock(&event_mutex);
5392 mutex_lock(&trace_types_lock);
5394 ret = match_string(trace_options, -1, cmp);
5395 /* If no option could be set, test the specific tracer options */
5397 ret = set_tracer_option(tr, cmp, neg);
5399 ret = set_tracer_flag(tr, 1 << ret, !neg);
5401 mutex_unlock(&trace_types_lock);
5402 mutex_unlock(&event_mutex);
5405 * If the first trailing whitespace is replaced with '\0' by strstrip,
5406 * turn it back into a space.
5408 if (orig_len > strlen(option))
5409 option[strlen(option)] = ' ';
5414 static void __init apply_trace_boot_options(void)
5416 char *buf = trace_boot_options_buf;
5420 option = strsep(&buf, ",");
5426 trace_set_options(&global_trace, option);
5428 /* Put back the comma to allow this to be called again */
5435 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
5436 size_t cnt, loff_t *ppos)
5438 struct seq_file *m = filp->private_data;
5439 struct trace_array *tr = m->private;
5443 if (cnt >= sizeof(buf))
5446 if (copy_from_user(buf, ubuf, cnt))
5451 ret = trace_set_options(tr, buf);
5460 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5462 struct trace_array *tr = inode->i_private;
5465 ret = tracing_check_open_get_tr(tr);
5469 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5471 trace_array_put(tr);
5476 static const struct file_operations tracing_iter_fops = {
5477 .open = tracing_trace_options_open,
5479 .llseek = seq_lseek,
5480 .release = tracing_single_release_tr,
5481 .write = tracing_trace_options_write,
5484 static const char readme_msg[] =
5485 "tracing mini-HOWTO:\n\n"
5486 "# echo 0 > tracing_on : quick way to disable tracing\n"
5487 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5488 " Important files:\n"
5489 " trace\t\t\t- The static contents of the buffer\n"
5490 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5491 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5492 " current_tracer\t- function and latency tracers\n"
5493 " available_tracers\t- list of configured tracers for current_tracer\n"
5494 " error_log\t- error log for failed commands (that support it)\n"
5495 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5496 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5497 " trace_clock\t\t- change the clock used to order events\n"
5498 " local: Per cpu clock but may not be synced across CPUs\n"
5499 " global: Synced across CPUs but slows tracing down.\n"
5500 " counter: Not a clock, but just an increment\n"
5501 " uptime: Jiffy counter from time of boot\n"
5502 " perf: Same clock that perf events use\n"
5503 #ifdef CONFIG_X86_64
5504 " x86-tsc: TSC cycle counter\n"
5506 "\n timestamp_mode\t- view the mode used to timestamp events\n"
5507 " delta: Delta difference against a buffer-wide timestamp\n"
5508 " absolute: Absolute (standalone) timestamp\n"
5509 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5510 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5511 " tracing_cpumask\t- Limit which CPUs to trace\n"
5512 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5513 "\t\t\t Remove sub-buffer with rmdir\n"
5514 " trace_options\t\t- Set format or modify how tracing happens\n"
5515 "\t\t\t Disable an option by prefixing 'no' to the\n"
5516 "\t\t\t option name\n"
5517 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5518 #ifdef CONFIG_DYNAMIC_FTRACE
5519 "\n available_filter_functions - list of functions that can be filtered on\n"
5520 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5521 "\t\t\t functions\n"
5522 "\t accepts: func_full_name or glob-matching-pattern\n"
5523 "\t modules: Can select a group via module\n"
5524 "\t Format: :mod:<module-name>\n"
5525 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5526 "\t triggers: a command to perform when function is hit\n"
5527 "\t Format: <function>:<trigger>[:count]\n"
5528 "\t trigger: traceon, traceoff\n"
5529 "\t\t enable_event:<system>:<event>\n"
5530 "\t\t disable_event:<system>:<event>\n"
5531 #ifdef CONFIG_STACKTRACE
5534 #ifdef CONFIG_TRACER_SNAPSHOT
5539 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5540 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5541 "\t The first one will disable tracing every time do_fault is hit\n"
5542 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5543 "\t The first time do trap is hit and it disables tracing, the\n"
5544 "\t counter will decrement to 2. If tracing is already disabled,\n"
5545 "\t the counter will not decrement. It only decrements when the\n"
5546 "\t trigger did work\n"
5547 "\t To remove trigger without count:\n"
5548 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5549 "\t To remove trigger with a count:\n"
5550 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5551 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5552 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5553 "\t modules: Can select a group via module command :mod:\n"
5554 "\t Does not accept triggers\n"
5555 #endif /* CONFIG_DYNAMIC_FTRACE */
5556 #ifdef CONFIG_FUNCTION_TRACER
5557 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5559 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5562 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5563 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5564 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5565 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5567 #ifdef CONFIG_TRACER_SNAPSHOT
5568 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5569 "\t\t\t snapshot buffer. Read the contents for more\n"
5570 "\t\t\t information\n"
5572 #ifdef CONFIG_STACK_TRACER
5573 " stack_trace\t\t- Shows the max stack trace when active\n"
5574 " stack_max_size\t- Shows current max stack size that was traced\n"
5575 "\t\t\t Write into this file to reset the max size (trigger a\n"
5576 "\t\t\t new trace)\n"
5577 #ifdef CONFIG_DYNAMIC_FTRACE
5578 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5581 #endif /* CONFIG_STACK_TRACER */
5582 #ifdef CONFIG_DYNAMIC_EVENTS
5583 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5584 "\t\t\t Write into this file to define/undefine new trace events.\n"
5586 #ifdef CONFIG_KPROBE_EVENTS
5587 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5588 "\t\t\t Write into this file to define/undefine new trace events.\n"
5590 #ifdef CONFIG_UPROBE_EVENTS
5591 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5592 "\t\t\t Write into this file to define/undefine new trace events.\n"
5594 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5595 "\t accepts: event-definitions (one definition per line)\n"
5596 "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n"
5597 "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n"
5598 #ifdef CONFIG_HIST_TRIGGERS
5599 "\t s:[synthetic/]<event> <field> [<field>]\n"
5601 "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>]\n"
5602 "\t -:[<group>/][<event>]\n"
5603 #ifdef CONFIG_KPROBE_EVENTS
5604 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5605 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5607 #ifdef CONFIG_UPROBE_EVENTS
5608 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5610 "\t args: <name>=fetcharg[:type]\n"
5611 "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
5612 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5613 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5615 "\t $stack<index>, $stack, $retval, $comm,\n"
5617 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5618 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5619 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5620 "\t symstr, <type>\\[<array-size>\\]\n"
5621 #ifdef CONFIG_HIST_TRIGGERS
5622 "\t field: <stype> <name>;\n"
5623 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5624 "\t [unsigned] char/int/long\n"
5626 "\t efield: For event probes ('e' types), the field is on of the fields\n"
5627 "\t of the <attached-group>/<attached-event>.\n"
5629 " events/\t\t- Directory containing all trace event subsystems:\n"
5630 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5631 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5632 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5634 " filter\t\t- If set, only events passing filter are traced\n"
5635 " events/<system>/<event>/\t- Directory containing control files for\n"
5637 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5638 " filter\t\t- If set, only events passing filter are traced\n"
5639 " trigger\t\t- If set, a command to perform when event is hit\n"
5640 "\t Format: <trigger>[:count][if <filter>]\n"
5641 "\t trigger: traceon, traceoff\n"
5642 "\t enable_event:<system>:<event>\n"
5643 "\t disable_event:<system>:<event>\n"
5644 #ifdef CONFIG_HIST_TRIGGERS
5645 "\t enable_hist:<system>:<event>\n"
5646 "\t disable_hist:<system>:<event>\n"
5648 #ifdef CONFIG_STACKTRACE
5651 #ifdef CONFIG_TRACER_SNAPSHOT
5654 #ifdef CONFIG_HIST_TRIGGERS
5655 "\t\t hist (see below)\n"
5657 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5658 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5659 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5660 "\t events/block/block_unplug/trigger\n"
5661 "\t The first disables tracing every time block_unplug is hit.\n"
5662 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5663 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5664 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5665 "\t Like function triggers, the counter is only decremented if it\n"
5666 "\t enabled or disabled tracing.\n"
5667 "\t To remove a trigger without a count:\n"
5668 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5669 "\t To remove a trigger with a count:\n"
5670 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5671 "\t Filters can be ignored when removing a trigger.\n"
5672 #ifdef CONFIG_HIST_TRIGGERS
5673 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5674 "\t Format: hist:keys=<field1[,field2,...]>\n"
5675 "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n"
5676 "\t [:values=<field1[,field2,...]>]\n"
5677 "\t [:sort=<field1[,field2,...]>]\n"
5678 "\t [:size=#entries]\n"
5679 "\t [:pause][:continue][:clear]\n"
5680 "\t [:name=histname1]\n"
5681 "\t [:nohitcount]\n"
5682 "\t [:<handler>.<action>]\n"
5683 "\t [if <filter>]\n\n"
5684 "\t Note, special fields can be used as well:\n"
5685 "\t common_timestamp - to record current timestamp\n"
5686 "\t common_cpu - to record the CPU the event happened on\n"
5688 "\t A hist trigger variable can be:\n"
5689 "\t - a reference to a field e.g. x=current_timestamp,\n"
5690 "\t - a reference to another variable e.g. y=$x,\n"
5691 "\t - a numeric literal: e.g. ms_per_sec=1000,\n"
5692 "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n"
5694 "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n"
5695 "\t multiplication(*) and division(/) operators. An operand can be either a\n"
5696 "\t variable reference, field or numeric literal.\n"
5698 "\t When a matching event is hit, an entry is added to a hash\n"
5699 "\t table using the key(s) and value(s) named, and the value of a\n"
5700 "\t sum called 'hitcount' is incremented. Keys and values\n"
5701 "\t correspond to fields in the event's format description. Keys\n"
5702 "\t can be any field, or the special string 'stacktrace'.\n"
5703 "\t Compound keys consisting of up to two fields can be specified\n"
5704 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5705 "\t fields. Sort keys consisting of up to two fields can be\n"
5706 "\t specified using the 'sort' keyword. The sort direction can\n"
5707 "\t be modified by appending '.descending' or '.ascending' to a\n"
5708 "\t sort field. The 'size' parameter can be used to specify more\n"
5709 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5710 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5711 "\t its histogram data will be shared with other triggers of the\n"
5712 "\t same name, and trigger hits will update this common data.\n\n"
5713 "\t Reading the 'hist' file for the event will dump the hash\n"
5714 "\t table in its entirety to stdout. If there are multiple hist\n"
5715 "\t triggers attached to an event, there will be a table for each\n"
5716 "\t trigger in the output. The table displayed for a named\n"
5717 "\t trigger will be the same as any other instance having the\n"
5718 "\t same name. The default format used to display a given field\n"
5719 "\t can be modified by appending any of the following modifiers\n"
5720 "\t to the field name, as applicable:\n\n"
5721 "\t .hex display a number as a hex value\n"
5722 "\t .sym display an address as a symbol\n"
5723 "\t .sym-offset display an address as a symbol and offset\n"
5724 "\t .execname display a common_pid as a program name\n"
5725 "\t .syscall display a syscall id as a syscall name\n"
5726 "\t .log2 display log2 value rather than raw number\n"
5727 "\t .buckets=size display values in groups of size rather than raw number\n"
5728 "\t .usecs display a common_timestamp in microseconds\n"
5729 "\t .percent display a number of percentage value\n"
5730 "\t .graph display a bar-graph of a value\n\n"
5731 "\t The 'pause' parameter can be used to pause an existing hist\n"
5732 "\t trigger or to start a hist trigger but not log any events\n"
5733 "\t until told to do so. 'continue' can be used to start or\n"
5734 "\t restart a paused hist trigger.\n\n"
5735 "\t The 'clear' parameter will clear the contents of a running\n"
5736 "\t hist trigger and leave its current paused/active state\n"
5738 "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n"
5739 "\t raw hitcount in the histogram.\n\n"
5740 "\t The enable_hist and disable_hist triggers can be used to\n"
5741 "\t have one event conditionally start and stop another event's\n"
5742 "\t already-attached hist trigger. The syntax is analogous to\n"
5743 "\t the enable_event and disable_event triggers.\n\n"
5744 "\t Hist trigger handlers and actions are executed whenever a\n"
5745 "\t a histogram entry is added or updated. They take the form:\n\n"
5746 "\t <handler>.<action>\n\n"
5747 "\t The available handlers are:\n\n"
5748 "\t onmatch(matching.event) - invoke on addition or update\n"
5749 "\t onmax(var) - invoke if var exceeds current max\n"
5750 "\t onchange(var) - invoke action if var changes\n\n"
5751 "\t The available actions are:\n\n"
5752 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5753 "\t save(field,...) - save current event fields\n"
5754 #ifdef CONFIG_TRACER_SNAPSHOT
5755 "\t snapshot() - snapshot the trace buffer\n\n"
5757 #ifdef CONFIG_SYNTH_EVENTS
5758 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5759 "\t Write into this file to define/undefine new synthetic events.\n"
5760 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5766 tracing_readme_read(struct file *filp, char __user *ubuf,
5767 size_t cnt, loff_t *ppos)
5769 return simple_read_from_buffer(ubuf, cnt, ppos,
5770 readme_msg, strlen(readme_msg));
5773 static const struct file_operations tracing_readme_fops = {
5774 .open = tracing_open_generic,
5775 .read = tracing_readme_read,
5776 .llseek = generic_file_llseek,
5779 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5783 return trace_find_tgid_ptr(pid);
5786 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5790 return trace_find_tgid_ptr(pid);
5793 static void saved_tgids_stop(struct seq_file *m, void *v)
5797 static int saved_tgids_show(struct seq_file *m, void *v)
5799 int *entry = (int *)v;
5800 int pid = entry - tgid_map;
5806 seq_printf(m, "%d %d\n", pid, tgid);
5810 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5811 .start = saved_tgids_start,
5812 .stop = saved_tgids_stop,
5813 .next = saved_tgids_next,
5814 .show = saved_tgids_show,
5817 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5821 ret = tracing_check_open_get_tr(NULL);
5825 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5829 static const struct file_operations tracing_saved_tgids_fops = {
5830 .open = tracing_saved_tgids_open,
5832 .llseek = seq_lseek,
5833 .release = seq_release,
5836 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5838 unsigned int *ptr = v;
5840 if (*pos || m->count)
5845 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5847 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5856 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5862 arch_spin_lock(&trace_cmdline_lock);
5864 v = &savedcmd->map_cmdline_to_pid[0];
5866 v = saved_cmdlines_next(m, v, &l);
5874 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5876 arch_spin_unlock(&trace_cmdline_lock);
5880 static int saved_cmdlines_show(struct seq_file *m, void *v)
5882 char buf[TASK_COMM_LEN];
5883 unsigned int *pid = v;
5885 __trace_find_cmdline(*pid, buf);
5886 seq_printf(m, "%d %s\n", *pid, buf);
5890 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5891 .start = saved_cmdlines_start,
5892 .next = saved_cmdlines_next,
5893 .stop = saved_cmdlines_stop,
5894 .show = saved_cmdlines_show,
5897 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5901 ret = tracing_check_open_get_tr(NULL);
5905 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5908 static const struct file_operations tracing_saved_cmdlines_fops = {
5909 .open = tracing_saved_cmdlines_open,
5911 .llseek = seq_lseek,
5912 .release = seq_release,
5916 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5917 size_t cnt, loff_t *ppos)
5923 arch_spin_lock(&trace_cmdline_lock);
5924 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5925 arch_spin_unlock(&trace_cmdline_lock);
5928 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5931 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5933 kfree(s->saved_cmdlines);
5934 kfree(s->map_cmdline_to_pid);
5938 static int tracing_resize_saved_cmdlines(unsigned int val)
5940 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5942 s = kmalloc(sizeof(*s), GFP_KERNEL);
5946 if (allocate_cmdlines_buffer(val, s) < 0) {
5952 arch_spin_lock(&trace_cmdline_lock);
5953 savedcmd_temp = savedcmd;
5955 arch_spin_unlock(&trace_cmdline_lock);
5957 free_saved_cmdlines_buffer(savedcmd_temp);
5963 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5964 size_t cnt, loff_t *ppos)
5969 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5973 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5974 if (!val || val > PID_MAX_DEFAULT)
5977 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5986 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5987 .open = tracing_open_generic,
5988 .read = tracing_saved_cmdlines_size_read,
5989 .write = tracing_saved_cmdlines_size_write,
5992 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5993 static union trace_eval_map_item *
5994 update_eval_map(union trace_eval_map_item *ptr)
5996 if (!ptr->map.eval_string) {
5997 if (ptr->tail.next) {
5998 ptr = ptr->tail.next;
5999 /* Set ptr to the next real item (skip head) */
6007 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
6009 union trace_eval_map_item *ptr = v;
6012 * Paranoid! If ptr points to end, we don't want to increment past it.
6013 * This really should never happen.
6016 ptr = update_eval_map(ptr);
6017 if (WARN_ON_ONCE(!ptr))
6021 ptr = update_eval_map(ptr);
6026 static void *eval_map_start(struct seq_file *m, loff_t *pos)
6028 union trace_eval_map_item *v;
6031 mutex_lock(&trace_eval_mutex);
6033 v = trace_eval_maps;
6037 while (v && l < *pos) {
6038 v = eval_map_next(m, v, &l);
6044 static void eval_map_stop(struct seq_file *m, void *v)
6046 mutex_unlock(&trace_eval_mutex);
6049 static int eval_map_show(struct seq_file *m, void *v)
6051 union trace_eval_map_item *ptr = v;
6053 seq_printf(m, "%s %ld (%s)\n",
6054 ptr->map.eval_string, ptr->map.eval_value,
6060 static const struct seq_operations tracing_eval_map_seq_ops = {
6061 .start = eval_map_start,
6062 .next = eval_map_next,
6063 .stop = eval_map_stop,
6064 .show = eval_map_show,
6067 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
6071 ret = tracing_check_open_get_tr(NULL);
6075 return seq_open(filp, &tracing_eval_map_seq_ops);
6078 static const struct file_operations tracing_eval_map_fops = {
6079 .open = tracing_eval_map_open,
6081 .llseek = seq_lseek,
6082 .release = seq_release,
6085 static inline union trace_eval_map_item *
6086 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
6088 /* Return tail of array given the head */
6089 return ptr + ptr->head.length + 1;
6093 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
6096 struct trace_eval_map **stop;
6097 struct trace_eval_map **map;
6098 union trace_eval_map_item *map_array;
6099 union trace_eval_map_item *ptr;
6104 * The trace_eval_maps contains the map plus a head and tail item,
6105 * where the head holds the module and length of array, and the
6106 * tail holds a pointer to the next list.
6108 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
6110 pr_warn("Unable to allocate trace eval mapping\n");
6114 mutex_lock(&trace_eval_mutex);
6116 if (!trace_eval_maps)
6117 trace_eval_maps = map_array;
6119 ptr = trace_eval_maps;
6121 ptr = trace_eval_jmp_to_tail(ptr);
6122 if (!ptr->tail.next)
6124 ptr = ptr->tail.next;
6127 ptr->tail.next = map_array;
6129 map_array->head.mod = mod;
6130 map_array->head.length = len;
6133 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
6134 map_array->map = **map;
6137 memset(map_array, 0, sizeof(*map_array));
6139 mutex_unlock(&trace_eval_mutex);
6142 static void trace_create_eval_file(struct dentry *d_tracer)
6144 trace_create_file("eval_map", TRACE_MODE_READ, d_tracer,
6145 NULL, &tracing_eval_map_fops);
6148 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
6149 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
6150 static inline void trace_insert_eval_map_file(struct module *mod,
6151 struct trace_eval_map **start, int len) { }
6152 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
6154 static void trace_insert_eval_map(struct module *mod,
6155 struct trace_eval_map **start, int len)
6157 struct trace_eval_map **map;
6164 trace_event_eval_update(map, len);
6166 trace_insert_eval_map_file(mod, start, len);
6170 tracing_set_trace_read(struct file *filp, char __user *ubuf,
6171 size_t cnt, loff_t *ppos)
6173 struct trace_array *tr = filp->private_data;
6174 char buf[MAX_TRACER_SIZE+2];
6177 mutex_lock(&trace_types_lock);
6178 r = sprintf(buf, "%s\n", tr->current_trace->name);
6179 mutex_unlock(&trace_types_lock);
6181 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6184 int tracer_init(struct tracer *t, struct trace_array *tr)
6186 tracing_reset_online_cpus(&tr->array_buffer);
6190 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
6194 for_each_tracing_cpu(cpu)
6195 per_cpu_ptr(buf->data, cpu)->entries = val;
6198 #ifdef CONFIG_TRACER_MAX_TRACE
6199 /* resize @tr's buffer to the size of @size_tr's entries */
6200 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
6201 struct array_buffer *size_buf, int cpu_id)
6205 if (cpu_id == RING_BUFFER_ALL_CPUS) {
6206 for_each_tracing_cpu(cpu) {
6207 ret = ring_buffer_resize(trace_buf->buffer,
6208 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
6211 per_cpu_ptr(trace_buf->data, cpu)->entries =
6212 per_cpu_ptr(size_buf->data, cpu)->entries;
6215 ret = ring_buffer_resize(trace_buf->buffer,
6216 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
6218 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
6219 per_cpu_ptr(size_buf->data, cpu_id)->entries;
6224 #endif /* CONFIG_TRACER_MAX_TRACE */
6226 static int __tracing_resize_ring_buffer(struct trace_array *tr,
6227 unsigned long size, int cpu)
6232 * If kernel or user changes the size of the ring buffer
6233 * we use the size that was given, and we can forget about
6234 * expanding it later.
6236 ring_buffer_expanded = true;
6238 /* May be called before buffers are initialized */
6239 if (!tr->array_buffer.buffer)
6242 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
6246 #ifdef CONFIG_TRACER_MAX_TRACE
6247 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
6248 !tr->current_trace->use_max_tr)
6251 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
6253 int r = resize_buffer_duplicate_size(&tr->array_buffer,
6254 &tr->array_buffer, cpu);
6257 * AARGH! We are left with different
6258 * size max buffer!!!!
6259 * The max buffer is our "snapshot" buffer.
6260 * When a tracer needs a snapshot (one of the
6261 * latency tracers), it swaps the max buffer
6262 * with the saved snap shot. We succeeded to
6263 * update the size of the main buffer, but failed to
6264 * update the size of the max buffer. But when we tried
6265 * to reset the main buffer to the original size, we
6266 * failed there too. This is very unlikely to
6267 * happen, but if it does, warn and kill all
6271 tracing_disabled = 1;
6276 if (cpu == RING_BUFFER_ALL_CPUS)
6277 set_buffer_entries(&tr->max_buffer, size);
6279 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
6282 #endif /* CONFIG_TRACER_MAX_TRACE */
6284 if (cpu == RING_BUFFER_ALL_CPUS)
6285 set_buffer_entries(&tr->array_buffer, size);
6287 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
6292 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
6293 unsigned long size, int cpu_id)
6297 mutex_lock(&trace_types_lock);
6299 if (cpu_id != RING_BUFFER_ALL_CPUS) {
6300 /* make sure, this cpu is enabled in the mask */
6301 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
6307 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
6312 mutex_unlock(&trace_types_lock);
6319 * tracing_update_buffers - used by tracing facility to expand ring buffers
6321 * To save on memory when the tracing is never used on a system with it
6322 * configured in. The ring buffers are set to a minimum size. But once
6323 * a user starts to use the tracing facility, then they need to grow
6324 * to their default size.
6326 * This function is to be called when a tracer is about to be used.
6328 int tracing_update_buffers(void)
6332 mutex_lock(&trace_types_lock);
6333 if (!ring_buffer_expanded)
6334 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
6335 RING_BUFFER_ALL_CPUS);
6336 mutex_unlock(&trace_types_lock);
6341 struct trace_option_dentry;
6344 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
6347 * Used to clear out the tracer before deletion of an instance.
6348 * Must have trace_types_lock held.
6350 static void tracing_set_nop(struct trace_array *tr)
6352 if (tr->current_trace == &nop_trace)
6355 tr->current_trace->enabled--;
6357 if (tr->current_trace->reset)
6358 tr->current_trace->reset(tr);
6360 tr->current_trace = &nop_trace;
6363 static bool tracer_options_updated;
6365 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
6367 /* Only enable if the directory has been created already. */
6371 /* Only create trace option files after update_tracer_options finish */
6372 if (!tracer_options_updated)
6375 create_trace_option_files(tr, t);
6378 int tracing_set_tracer(struct trace_array *tr, const char *buf)
6381 #ifdef CONFIG_TRACER_MAX_TRACE
6386 mutex_lock(&trace_types_lock);
6388 if (!ring_buffer_expanded) {
6389 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
6390 RING_BUFFER_ALL_CPUS);
6396 for (t = trace_types; t; t = t->next) {
6397 if (strcmp(t->name, buf) == 0)
6404 if (t == tr->current_trace)
6407 #ifdef CONFIG_TRACER_SNAPSHOT
6408 if (t->use_max_tr) {
6409 local_irq_disable();
6410 arch_spin_lock(&tr->max_lock);
6411 if (tr->cond_snapshot)
6413 arch_spin_unlock(&tr->max_lock);
6419 /* Some tracers won't work on kernel command line */
6420 if (system_state < SYSTEM_RUNNING && t->noboot) {
6421 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
6426 /* Some tracers are only allowed for the top level buffer */
6427 if (!trace_ok_for_array(t, tr)) {
6432 /* If trace pipe files are being read, we can't change the tracer */
6433 if (tr->trace_ref) {
6438 trace_branch_disable();
6440 tr->current_trace->enabled--;
6442 if (tr->current_trace->reset)
6443 tr->current_trace->reset(tr);
6445 #ifdef CONFIG_TRACER_MAX_TRACE
6446 had_max_tr = tr->current_trace->use_max_tr;
6448 /* Current trace needs to be nop_trace before synchronize_rcu */
6449 tr->current_trace = &nop_trace;
6451 if (had_max_tr && !t->use_max_tr) {
6453 * We need to make sure that the update_max_tr sees that
6454 * current_trace changed to nop_trace to keep it from
6455 * swapping the buffers after we resize it.
6456 * The update_max_tr is called from interrupts disabled
6457 * so a synchronized_sched() is sufficient.
6463 if (t->use_max_tr && !tr->allocated_snapshot) {
6464 ret = tracing_alloc_snapshot_instance(tr);
6469 tr->current_trace = &nop_trace;
6473 ret = tracer_init(t, tr);
6478 tr->current_trace = t;
6479 tr->current_trace->enabled++;
6480 trace_branch_enable(tr);
6482 mutex_unlock(&trace_types_lock);
6488 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6489 size_t cnt, loff_t *ppos)
6491 struct trace_array *tr = filp->private_data;
6492 char buf[MAX_TRACER_SIZE+1];
6499 if (cnt > MAX_TRACER_SIZE)
6500 cnt = MAX_TRACER_SIZE;
6502 if (copy_from_user(buf, ubuf, cnt))
6509 err = tracing_set_tracer(tr, name);
6519 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6520 size_t cnt, loff_t *ppos)
6525 r = snprintf(buf, sizeof(buf), "%ld\n",
6526 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6527 if (r > sizeof(buf))
6529 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6533 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6534 size_t cnt, loff_t *ppos)
6539 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6549 tracing_thresh_read(struct file *filp, char __user *ubuf,
6550 size_t cnt, loff_t *ppos)
6552 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6556 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6557 size_t cnt, loff_t *ppos)
6559 struct trace_array *tr = filp->private_data;
6562 mutex_lock(&trace_types_lock);
6563 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6567 if (tr->current_trace->update_thresh) {
6568 ret = tr->current_trace->update_thresh(tr);
6575 mutex_unlock(&trace_types_lock);
6580 #ifdef CONFIG_TRACER_MAX_TRACE
6583 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6584 size_t cnt, loff_t *ppos)
6586 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6590 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6591 size_t cnt, loff_t *ppos)
6593 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6598 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6600 struct trace_array *tr = inode->i_private;
6601 struct trace_iterator *iter;
6604 ret = tracing_check_open_get_tr(tr);
6608 mutex_lock(&trace_types_lock);
6610 /* create a buffer to store the information to pass to userspace */
6611 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6614 __trace_array_put(tr);
6618 trace_seq_init(&iter->seq);
6619 iter->trace = tr->current_trace;
6621 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6626 /* trace pipe does not show start of buffer */
6627 cpumask_setall(iter->started);
6629 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6630 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6632 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6633 if (trace_clocks[tr->clock_id].in_ns)
6634 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6637 iter->array_buffer = &tr->array_buffer;
6638 iter->cpu_file = tracing_get_cpu(inode);
6639 mutex_init(&iter->mutex);
6640 filp->private_data = iter;
6642 if (iter->trace->pipe_open)
6643 iter->trace->pipe_open(iter);
6645 nonseekable_open(inode, filp);
6649 mutex_unlock(&trace_types_lock);
6654 __trace_array_put(tr);
6655 mutex_unlock(&trace_types_lock);
6659 static int tracing_release_pipe(struct inode *inode, struct file *file)
6661 struct trace_iterator *iter = file->private_data;
6662 struct trace_array *tr = inode->i_private;
6664 mutex_lock(&trace_types_lock);
6668 if (iter->trace->pipe_close)
6669 iter->trace->pipe_close(iter);
6671 mutex_unlock(&trace_types_lock);
6673 free_cpumask_var(iter->started);
6675 mutex_destroy(&iter->mutex);
6678 trace_array_put(tr);
6684 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6686 struct trace_array *tr = iter->tr;
6688 /* Iterators are static, they should be filled or empty */
6689 if (trace_buffer_iter(iter, iter->cpu_file))
6690 return EPOLLIN | EPOLLRDNORM;
6692 if (tr->trace_flags & TRACE_ITER_BLOCK)
6694 * Always select as readable when in blocking mode
6696 return EPOLLIN | EPOLLRDNORM;
6698 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6699 filp, poll_table, iter->tr->buffer_percent);
6703 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6705 struct trace_iterator *iter = filp->private_data;
6707 return trace_poll(iter, filp, poll_table);
6710 /* Must be called with iter->mutex held. */
6711 static int tracing_wait_pipe(struct file *filp)
6713 struct trace_iterator *iter = filp->private_data;
6716 while (trace_empty(iter)) {
6718 if ((filp->f_flags & O_NONBLOCK)) {
6723 * We block until we read something and tracing is disabled.
6724 * We still block if tracing is disabled, but we have never
6725 * read anything. This allows a user to cat this file, and
6726 * then enable tracing. But after we have read something,
6727 * we give an EOF when tracing is again disabled.
6729 * iter->pos will be 0 if we haven't read anything.
6731 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6734 mutex_unlock(&iter->mutex);
6736 ret = wait_on_pipe(iter, 0);
6738 mutex_lock(&iter->mutex);
6751 tracing_read_pipe(struct file *filp, char __user *ubuf,
6752 size_t cnt, loff_t *ppos)
6754 struct trace_iterator *iter = filp->private_data;
6758 * Avoid more than one consumer on a single file descriptor
6759 * This is just a matter of traces coherency, the ring buffer itself
6762 mutex_lock(&iter->mutex);
6764 /* return any leftover data */
6765 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6769 trace_seq_init(&iter->seq);
6771 if (iter->trace->read) {
6772 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6778 sret = tracing_wait_pipe(filp);
6782 /* stop when tracing is finished */
6783 if (trace_empty(iter)) {
6788 if (cnt >= PAGE_SIZE)
6789 cnt = PAGE_SIZE - 1;
6791 /* reset all but tr, trace, and overruns */
6792 trace_iterator_reset(iter);
6793 cpumask_clear(iter->started);
6794 trace_seq_init(&iter->seq);
6796 trace_event_read_lock();
6797 trace_access_lock(iter->cpu_file);
6798 while (trace_find_next_entry_inc(iter) != NULL) {
6799 enum print_line_t ret;
6800 int save_len = iter->seq.seq.len;
6802 ret = print_trace_line(iter);
6803 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6805 * If one print_trace_line() fills entire trace_seq in one shot,
6806 * trace_seq_to_user() will returns -EBUSY because save_len == 0,
6807 * In this case, we need to consume it, otherwise, loop will peek
6808 * this event next time, resulting in an infinite loop.
6810 if (save_len == 0) {
6812 trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n");
6813 trace_consume(iter);
6817 /* In other cases, don't print partial lines */
6818 iter->seq.seq.len = save_len;
6821 if (ret != TRACE_TYPE_NO_CONSUME)
6822 trace_consume(iter);
6824 if (trace_seq_used(&iter->seq) >= cnt)
6828 * Setting the full flag means we reached the trace_seq buffer
6829 * size and we should leave by partial output condition above.
6830 * One of the trace_seq_* functions is not used properly.
6832 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6835 trace_access_unlock(iter->cpu_file);
6836 trace_event_read_unlock();
6838 /* Now copy what we have to the user */
6839 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6840 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6841 trace_seq_init(&iter->seq);
6844 * If there was nothing to send to user, in spite of consuming trace
6845 * entries, go back to wait for more entries.
6851 mutex_unlock(&iter->mutex);
6856 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6859 __free_page(spd->pages[idx]);
6863 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6869 /* Seq buffer is page-sized, exactly what we need. */
6871 save_len = iter->seq.seq.len;
6872 ret = print_trace_line(iter);
6874 if (trace_seq_has_overflowed(&iter->seq)) {
6875 iter->seq.seq.len = save_len;
6880 * This should not be hit, because it should only
6881 * be set if the iter->seq overflowed. But check it
6882 * anyway to be safe.
6884 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6885 iter->seq.seq.len = save_len;
6889 count = trace_seq_used(&iter->seq) - save_len;
6892 iter->seq.seq.len = save_len;
6896 if (ret != TRACE_TYPE_NO_CONSUME)
6897 trace_consume(iter);
6899 if (!trace_find_next_entry_inc(iter)) {
6909 static ssize_t tracing_splice_read_pipe(struct file *filp,
6911 struct pipe_inode_info *pipe,
6915 struct page *pages_def[PIPE_DEF_BUFFERS];
6916 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6917 struct trace_iterator *iter = filp->private_data;
6918 struct splice_pipe_desc spd = {
6920 .partial = partial_def,
6921 .nr_pages = 0, /* This gets updated below. */
6922 .nr_pages_max = PIPE_DEF_BUFFERS,
6923 .ops = &default_pipe_buf_ops,
6924 .spd_release = tracing_spd_release_pipe,
6930 if (splice_grow_spd(pipe, &spd))
6933 mutex_lock(&iter->mutex);
6935 if (iter->trace->splice_read) {
6936 ret = iter->trace->splice_read(iter, filp,
6937 ppos, pipe, len, flags);
6942 ret = tracing_wait_pipe(filp);
6946 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6951 trace_event_read_lock();
6952 trace_access_lock(iter->cpu_file);
6954 /* Fill as many pages as possible. */
6955 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6956 spd.pages[i] = alloc_page(GFP_KERNEL);
6960 rem = tracing_fill_pipe_page(rem, iter);
6962 /* Copy the data into the page, so we can start over. */
6963 ret = trace_seq_to_buffer(&iter->seq,
6964 page_address(spd.pages[i]),
6965 trace_seq_used(&iter->seq));
6967 __free_page(spd.pages[i]);
6970 spd.partial[i].offset = 0;
6971 spd.partial[i].len = trace_seq_used(&iter->seq);
6973 trace_seq_init(&iter->seq);
6976 trace_access_unlock(iter->cpu_file);
6977 trace_event_read_unlock();
6978 mutex_unlock(&iter->mutex);
6983 ret = splice_to_pipe(pipe, &spd);
6987 splice_shrink_spd(&spd);
6991 mutex_unlock(&iter->mutex);
6996 tracing_entries_read(struct file *filp, char __user *ubuf,
6997 size_t cnt, loff_t *ppos)
6999 struct inode *inode = file_inode(filp);
7000 struct trace_array *tr = inode->i_private;
7001 int cpu = tracing_get_cpu(inode);
7006 mutex_lock(&trace_types_lock);
7008 if (cpu == RING_BUFFER_ALL_CPUS) {
7009 int cpu, buf_size_same;
7014 /* check if all cpu sizes are same */
7015 for_each_tracing_cpu(cpu) {
7016 /* fill in the size from first enabled cpu */
7018 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
7019 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
7025 if (buf_size_same) {
7026 if (!ring_buffer_expanded)
7027 r = sprintf(buf, "%lu (expanded: %lu)\n",
7029 trace_buf_size >> 10);
7031 r = sprintf(buf, "%lu\n", size >> 10);
7033 r = sprintf(buf, "X\n");
7035 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
7037 mutex_unlock(&trace_types_lock);
7039 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7044 tracing_entries_write(struct file *filp, const char __user *ubuf,
7045 size_t cnt, loff_t *ppos)
7047 struct inode *inode = file_inode(filp);
7048 struct trace_array *tr = inode->i_private;
7052 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7056 /* must have at least 1 entry */
7060 /* value is in KB */
7062 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
7072 tracing_total_entries_read(struct file *filp, char __user *ubuf,
7073 size_t cnt, loff_t *ppos)
7075 struct trace_array *tr = filp->private_data;
7078 unsigned long size = 0, expanded_size = 0;
7080 mutex_lock(&trace_types_lock);
7081 for_each_tracing_cpu(cpu) {
7082 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
7083 if (!ring_buffer_expanded)
7084 expanded_size += trace_buf_size >> 10;
7086 if (ring_buffer_expanded)
7087 r = sprintf(buf, "%lu\n", size);
7089 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
7090 mutex_unlock(&trace_types_lock);
7092 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7096 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
7097 size_t cnt, loff_t *ppos)
7100 * There is no need to read what the user has written, this function
7101 * is just to make sure that there is no error when "echo" is used
7110 tracing_free_buffer_release(struct inode *inode, struct file *filp)
7112 struct trace_array *tr = inode->i_private;
7114 /* disable tracing ? */
7115 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
7116 tracer_tracing_off(tr);
7117 /* resize the ring buffer to 0 */
7118 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
7120 trace_array_put(tr);
7126 tracing_mark_write(struct file *filp, const char __user *ubuf,
7127 size_t cnt, loff_t *fpos)
7129 struct trace_array *tr = filp->private_data;
7130 struct ring_buffer_event *event;
7131 enum event_trigger_type tt = ETT_NONE;
7132 struct trace_buffer *buffer;
7133 struct print_entry *entry;
7138 /* Used in tracing_mark_raw_write() as well */
7139 #define FAULTED_STR "<faulted>"
7140 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
7142 if (tracing_disabled)
7145 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7148 if (cnt > TRACE_BUF_SIZE)
7149 cnt = TRACE_BUF_SIZE;
7151 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7153 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
7155 /* If less than "<faulted>", then make sure we can still add that */
7156 if (cnt < FAULTED_SIZE)
7157 size += FAULTED_SIZE - cnt;
7159 buffer = tr->array_buffer.buffer;
7160 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
7162 if (unlikely(!event))
7163 /* Ring buffer disabled, return as if not open for write */
7166 entry = ring_buffer_event_data(event);
7167 entry->ip = _THIS_IP_;
7169 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
7171 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7177 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
7178 /* do not add \n before testing triggers, but add \0 */
7179 entry->buf[cnt] = '\0';
7180 tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event);
7183 if (entry->buf[cnt - 1] != '\n') {
7184 entry->buf[cnt] = '\n';
7185 entry->buf[cnt + 1] = '\0';
7187 entry->buf[cnt] = '\0';
7189 if (static_branch_unlikely(&trace_marker_exports_enabled))
7190 ftrace_exports(event, TRACE_EXPORT_MARKER);
7191 __buffer_unlock_commit(buffer, event);
7194 event_triggers_post_call(tr->trace_marker_file, tt);
7199 /* Limit it for now to 3K (including tag) */
7200 #define RAW_DATA_MAX_SIZE (1024*3)
7203 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
7204 size_t cnt, loff_t *fpos)
7206 struct trace_array *tr = filp->private_data;
7207 struct ring_buffer_event *event;
7208 struct trace_buffer *buffer;
7209 struct raw_data_entry *entry;
7214 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
7216 if (tracing_disabled)
7219 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
7222 /* The marker must at least have a tag id */
7223 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
7226 if (cnt > TRACE_BUF_SIZE)
7227 cnt = TRACE_BUF_SIZE;
7229 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
7231 size = sizeof(*entry) + cnt;
7232 if (cnt < FAULT_SIZE_ID)
7233 size += FAULT_SIZE_ID - cnt;
7235 buffer = tr->array_buffer.buffer;
7236 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
7239 /* Ring buffer disabled, return as if not open for write */
7242 entry = ring_buffer_event_data(event);
7244 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
7247 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
7252 __buffer_unlock_commit(buffer, event);
7257 static int tracing_clock_show(struct seq_file *m, void *v)
7259 struct trace_array *tr = m->private;
7262 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
7264 "%s%s%s%s", i ? " " : "",
7265 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
7266 i == tr->clock_id ? "]" : "");
7272 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
7276 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
7277 if (strcmp(trace_clocks[i].name, clockstr) == 0)
7280 if (i == ARRAY_SIZE(trace_clocks))
7283 mutex_lock(&trace_types_lock);
7287 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
7290 * New clock may not be consistent with the previous clock.
7291 * Reset the buffer so that it doesn't have incomparable timestamps.
7293 tracing_reset_online_cpus(&tr->array_buffer);
7295 #ifdef CONFIG_TRACER_MAX_TRACE
7296 if (tr->max_buffer.buffer)
7297 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
7298 tracing_reset_online_cpus(&tr->max_buffer);
7301 mutex_unlock(&trace_types_lock);
7306 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
7307 size_t cnt, loff_t *fpos)
7309 struct seq_file *m = filp->private_data;
7310 struct trace_array *tr = m->private;
7312 const char *clockstr;
7315 if (cnt >= sizeof(buf))
7318 if (copy_from_user(buf, ubuf, cnt))
7323 clockstr = strstrip(buf);
7325 ret = tracing_set_clock(tr, clockstr);
7334 static int tracing_clock_open(struct inode *inode, struct file *file)
7336 struct trace_array *tr = inode->i_private;
7339 ret = tracing_check_open_get_tr(tr);
7343 ret = single_open(file, tracing_clock_show, inode->i_private);
7345 trace_array_put(tr);
7350 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
7352 struct trace_array *tr = m->private;
7354 mutex_lock(&trace_types_lock);
7356 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
7357 seq_puts(m, "delta [absolute]\n");
7359 seq_puts(m, "[delta] absolute\n");
7361 mutex_unlock(&trace_types_lock);
7366 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
7368 struct trace_array *tr = inode->i_private;
7371 ret = tracing_check_open_get_tr(tr);
7375 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
7377 trace_array_put(tr);
7382 u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe)
7384 if (rbe == this_cpu_read(trace_buffered_event))
7385 return ring_buffer_time_stamp(buffer);
7387 return ring_buffer_event_time_stamp(buffer, rbe);
7391 * Set or disable using the per CPU trace_buffer_event when possible.
7393 int tracing_set_filter_buffering(struct trace_array *tr, bool set)
7397 mutex_lock(&trace_types_lock);
7399 if (set && tr->no_filter_buffering_ref++)
7403 if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) {
7408 --tr->no_filter_buffering_ref;
7411 mutex_unlock(&trace_types_lock);
7416 struct ftrace_buffer_info {
7417 struct trace_iterator iter;
7419 unsigned int spare_cpu;
7423 #ifdef CONFIG_TRACER_SNAPSHOT
7424 static int tracing_snapshot_open(struct inode *inode, struct file *file)
7426 struct trace_array *tr = inode->i_private;
7427 struct trace_iterator *iter;
7431 ret = tracing_check_open_get_tr(tr);
7435 if (file->f_mode & FMODE_READ) {
7436 iter = __tracing_open(inode, file, true);
7438 ret = PTR_ERR(iter);
7440 /* Writes still need the seq_file to hold the private data */
7442 m = kzalloc(sizeof(*m), GFP_KERNEL);
7445 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
7453 iter->array_buffer = &tr->max_buffer;
7454 iter->cpu_file = tracing_get_cpu(inode);
7456 file->private_data = m;
7460 trace_array_put(tr);
7466 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
7469 struct seq_file *m = filp->private_data;
7470 struct trace_iterator *iter = m->private;
7471 struct trace_array *tr = iter->tr;
7475 ret = tracing_update_buffers();
7479 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7483 mutex_lock(&trace_types_lock);
7485 if (tr->current_trace->use_max_tr) {
7490 local_irq_disable();
7491 arch_spin_lock(&tr->max_lock);
7492 if (tr->cond_snapshot)
7494 arch_spin_unlock(&tr->max_lock);
7501 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7505 if (tr->allocated_snapshot)
7509 /* Only allow per-cpu swap if the ring buffer supports it */
7510 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7511 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7516 if (tr->allocated_snapshot)
7517 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7518 &tr->array_buffer, iter->cpu_file);
7520 ret = tracing_alloc_snapshot_instance(tr);
7523 local_irq_disable();
7524 /* Now, we're going to swap */
7525 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7526 update_max_tr(tr, current, smp_processor_id(), NULL);
7528 update_max_tr_single(tr, current, iter->cpu_file);
7532 if (tr->allocated_snapshot) {
7533 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7534 tracing_reset_online_cpus(&tr->max_buffer);
7536 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7546 mutex_unlock(&trace_types_lock);
7550 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7552 struct seq_file *m = file->private_data;
7555 ret = tracing_release(inode, file);
7557 if (file->f_mode & FMODE_READ)
7560 /* If write only, the seq_file is just a stub */
7568 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7569 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7570 size_t count, loff_t *ppos);
7571 static int tracing_buffers_release(struct inode *inode, struct file *file);
7572 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7573 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7575 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7577 struct ftrace_buffer_info *info;
7580 /* The following checks for tracefs lockdown */
7581 ret = tracing_buffers_open(inode, filp);
7585 info = filp->private_data;
7587 if (info->iter.trace->use_max_tr) {
7588 tracing_buffers_release(inode, filp);
7592 info->iter.snapshot = true;
7593 info->iter.array_buffer = &info->iter.tr->max_buffer;
7598 #endif /* CONFIG_TRACER_SNAPSHOT */
7601 static const struct file_operations tracing_thresh_fops = {
7602 .open = tracing_open_generic,
7603 .read = tracing_thresh_read,
7604 .write = tracing_thresh_write,
7605 .llseek = generic_file_llseek,
7608 #ifdef CONFIG_TRACER_MAX_TRACE
7609 static const struct file_operations tracing_max_lat_fops = {
7610 .open = tracing_open_generic,
7611 .read = tracing_max_lat_read,
7612 .write = tracing_max_lat_write,
7613 .llseek = generic_file_llseek,
7617 static const struct file_operations set_tracer_fops = {
7618 .open = tracing_open_generic,
7619 .read = tracing_set_trace_read,
7620 .write = tracing_set_trace_write,
7621 .llseek = generic_file_llseek,
7624 static const struct file_operations tracing_pipe_fops = {
7625 .open = tracing_open_pipe,
7626 .poll = tracing_poll_pipe,
7627 .read = tracing_read_pipe,
7628 .splice_read = tracing_splice_read_pipe,
7629 .release = tracing_release_pipe,
7630 .llseek = no_llseek,
7633 static const struct file_operations tracing_entries_fops = {
7634 .open = tracing_open_generic_tr,
7635 .read = tracing_entries_read,
7636 .write = tracing_entries_write,
7637 .llseek = generic_file_llseek,
7638 .release = tracing_release_generic_tr,
7641 static const struct file_operations tracing_total_entries_fops = {
7642 .open = tracing_open_generic_tr,
7643 .read = tracing_total_entries_read,
7644 .llseek = generic_file_llseek,
7645 .release = tracing_release_generic_tr,
7648 static const struct file_operations tracing_free_buffer_fops = {
7649 .open = tracing_open_generic_tr,
7650 .write = tracing_free_buffer_write,
7651 .release = tracing_free_buffer_release,
7654 static const struct file_operations tracing_mark_fops = {
7655 .open = tracing_mark_open,
7656 .write = tracing_mark_write,
7657 .release = tracing_release_generic_tr,
7660 static const struct file_operations tracing_mark_raw_fops = {
7661 .open = tracing_mark_open,
7662 .write = tracing_mark_raw_write,
7663 .release = tracing_release_generic_tr,
7666 static const struct file_operations trace_clock_fops = {
7667 .open = tracing_clock_open,
7669 .llseek = seq_lseek,
7670 .release = tracing_single_release_tr,
7671 .write = tracing_clock_write,
7674 static const struct file_operations trace_time_stamp_mode_fops = {
7675 .open = tracing_time_stamp_mode_open,
7677 .llseek = seq_lseek,
7678 .release = tracing_single_release_tr,
7681 #ifdef CONFIG_TRACER_SNAPSHOT
7682 static const struct file_operations snapshot_fops = {
7683 .open = tracing_snapshot_open,
7685 .write = tracing_snapshot_write,
7686 .llseek = tracing_lseek,
7687 .release = tracing_snapshot_release,
7690 static const struct file_operations snapshot_raw_fops = {
7691 .open = snapshot_raw_open,
7692 .read = tracing_buffers_read,
7693 .release = tracing_buffers_release,
7694 .splice_read = tracing_buffers_splice_read,
7695 .llseek = no_llseek,
7698 #endif /* CONFIG_TRACER_SNAPSHOT */
7701 * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
7702 * @filp: The active open file structure
7703 * @ubuf: The userspace provided buffer to read value into
7704 * @cnt: The maximum number of bytes to read
7705 * @ppos: The current "file" position
7707 * This function implements the write interface for a struct trace_min_max_param.
7708 * The filp->private_data must point to a trace_min_max_param structure that
7709 * defines where to write the value, the min and the max acceptable values,
7710 * and a lock to protect the write.
7713 trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
7715 struct trace_min_max_param *param = filp->private_data;
7722 err = kstrtoull_from_user(ubuf, cnt, 10, &val);
7727 mutex_lock(param->lock);
7729 if (param->min && val < *param->min)
7732 if (param->max && val > *param->max)
7739 mutex_unlock(param->lock);
7748 * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
7749 * @filp: The active open file structure
7750 * @ubuf: The userspace provided buffer to read value into
7751 * @cnt: The maximum number of bytes to read
7752 * @ppos: The current "file" position
7754 * This function implements the read interface for a struct trace_min_max_param.
7755 * The filp->private_data must point to a trace_min_max_param struct with valid
7759 trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
7761 struct trace_min_max_param *param = filp->private_data;
7762 char buf[U64_STR_SIZE];
7771 if (cnt > sizeof(buf))
7774 len = snprintf(buf, sizeof(buf), "%llu\n", val);
7776 return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
7779 const struct file_operations trace_min_max_fops = {
7780 .open = tracing_open_generic,
7781 .read = trace_min_max_read,
7782 .write = trace_min_max_write,
7785 #define TRACING_LOG_ERRS_MAX 8
7786 #define TRACING_LOG_LOC_MAX 128
7788 #define CMD_PREFIX " Command: "
7791 const char **errs; /* ptr to loc-specific array of err strings */
7792 u8 type; /* index into errs -> specific err string */
7793 u16 pos; /* caret position */
7797 struct tracing_log_err {
7798 struct list_head list;
7799 struct err_info info;
7800 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7801 char *cmd; /* what caused err */
7804 static DEFINE_MUTEX(tracing_err_log_lock);
7806 static struct tracing_log_err *alloc_tracing_log_err(int len)
7808 struct tracing_log_err *err;
7810 err = kzalloc(sizeof(*err), GFP_KERNEL);
7812 return ERR_PTR(-ENOMEM);
7814 err->cmd = kzalloc(len, GFP_KERNEL);
7817 return ERR_PTR(-ENOMEM);
7823 static void free_tracing_log_err(struct tracing_log_err *err)
7829 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr,
7832 struct tracing_log_err *err;
7835 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7836 err = alloc_tracing_log_err(len);
7837 if (PTR_ERR(err) != -ENOMEM)
7838 tr->n_err_log_entries++;
7842 cmd = kzalloc(len, GFP_KERNEL);
7844 return ERR_PTR(-ENOMEM);
7845 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7848 list_del(&err->list);
7854 * err_pos - find the position of a string within a command for error careting
7855 * @cmd: The tracing command that caused the error
7856 * @str: The string to position the caret at within @cmd
7858 * Finds the position of the first occurrence of @str within @cmd. The
7859 * return value can be passed to tracing_log_err() for caret placement
7862 * Returns the index within @cmd of the first occurrence of @str or 0
7863 * if @str was not found.
7865 unsigned int err_pos(char *cmd, const char *str)
7869 if (WARN_ON(!strlen(cmd)))
7872 found = strstr(cmd, str);
7880 * tracing_log_err - write an error to the tracing error log
7881 * @tr: The associated trace array for the error (NULL for top level array)
7882 * @loc: A string describing where the error occurred
7883 * @cmd: The tracing command that caused the error
7884 * @errs: The array of loc-specific static error strings
7885 * @type: The index into errs[], which produces the specific static err string
7886 * @pos: The position the caret should be placed in the cmd
7888 * Writes an error into tracing/error_log of the form:
7890 * <loc>: error: <text>
7894 * tracing/error_log is a small log file containing the last
7895 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7896 * unless there has been a tracing error, and the error log can be
7897 * cleared and have its memory freed by writing the empty string in
7898 * truncation mode to it i.e. echo > tracing/error_log.
7900 * NOTE: the @errs array along with the @type param are used to
7901 * produce a static error string - this string is not copied and saved
7902 * when the error is logged - only a pointer to it is saved. See
7903 * existing callers for examples of how static strings are typically
7904 * defined for use with tracing_log_err().
7906 void tracing_log_err(struct trace_array *tr,
7907 const char *loc, const char *cmd,
7908 const char **errs, u8 type, u16 pos)
7910 struct tracing_log_err *err;
7916 len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1;
7918 mutex_lock(&tracing_err_log_lock);
7919 err = get_tracing_log_err(tr, len);
7920 if (PTR_ERR(err) == -ENOMEM) {
7921 mutex_unlock(&tracing_err_log_lock);
7925 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7926 snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd);
7928 err->info.errs = errs;
7929 err->info.type = type;
7930 err->info.pos = pos;
7931 err->info.ts = local_clock();
7933 list_add_tail(&err->list, &tr->err_log);
7934 mutex_unlock(&tracing_err_log_lock);
7937 static void clear_tracing_err_log(struct trace_array *tr)
7939 struct tracing_log_err *err, *next;
7941 mutex_lock(&tracing_err_log_lock);
7942 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7943 list_del(&err->list);
7944 free_tracing_log_err(err);
7947 tr->n_err_log_entries = 0;
7948 mutex_unlock(&tracing_err_log_lock);
7951 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7953 struct trace_array *tr = m->private;
7955 mutex_lock(&tracing_err_log_lock);
7957 return seq_list_start(&tr->err_log, *pos);
7960 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7962 struct trace_array *tr = m->private;
7964 return seq_list_next(v, &tr->err_log, pos);
7967 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7969 mutex_unlock(&tracing_err_log_lock);
7972 static void tracing_err_log_show_pos(struct seq_file *m, u16 pos)
7976 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7978 for (i = 0; i < pos; i++)
7983 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7985 struct tracing_log_err *err = v;
7988 const char *err_text = err->info.errs[err->info.type];
7989 u64 sec = err->info.ts;
7992 nsec = do_div(sec, NSEC_PER_SEC);
7993 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7994 err->loc, err_text);
7995 seq_printf(m, "%s", err->cmd);
7996 tracing_err_log_show_pos(m, err->info.pos);
8002 static const struct seq_operations tracing_err_log_seq_ops = {
8003 .start = tracing_err_log_seq_start,
8004 .next = tracing_err_log_seq_next,
8005 .stop = tracing_err_log_seq_stop,
8006 .show = tracing_err_log_seq_show
8009 static int tracing_err_log_open(struct inode *inode, struct file *file)
8011 struct trace_array *tr = inode->i_private;
8014 ret = tracing_check_open_get_tr(tr);
8018 /* If this file was opened for write, then erase contents */
8019 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
8020 clear_tracing_err_log(tr);
8022 if (file->f_mode & FMODE_READ) {
8023 ret = seq_open(file, &tracing_err_log_seq_ops);
8025 struct seq_file *m = file->private_data;
8028 trace_array_put(tr);
8034 static ssize_t tracing_err_log_write(struct file *file,
8035 const char __user *buffer,
8036 size_t count, loff_t *ppos)
8041 static int tracing_err_log_release(struct inode *inode, struct file *file)
8043 struct trace_array *tr = inode->i_private;
8045 trace_array_put(tr);
8047 if (file->f_mode & FMODE_READ)
8048 seq_release(inode, file);
8053 static const struct file_operations tracing_err_log_fops = {
8054 .open = tracing_err_log_open,
8055 .write = tracing_err_log_write,
8057 .llseek = seq_lseek,
8058 .release = tracing_err_log_release,
8061 static int tracing_buffers_open(struct inode *inode, struct file *filp)
8063 struct trace_array *tr = inode->i_private;
8064 struct ftrace_buffer_info *info;
8067 ret = tracing_check_open_get_tr(tr);
8071 info = kvzalloc(sizeof(*info), GFP_KERNEL);
8073 trace_array_put(tr);
8077 mutex_lock(&trace_types_lock);
8080 info->iter.cpu_file = tracing_get_cpu(inode);
8081 info->iter.trace = tr->current_trace;
8082 info->iter.array_buffer = &tr->array_buffer;
8084 /* Force reading ring buffer for first read */
8085 info->read = (unsigned int)-1;
8087 filp->private_data = info;
8091 mutex_unlock(&trace_types_lock);
8093 ret = nonseekable_open(inode, filp);
8095 trace_array_put(tr);
8101 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
8103 struct ftrace_buffer_info *info = filp->private_data;
8104 struct trace_iterator *iter = &info->iter;
8106 return trace_poll(iter, filp, poll_table);
8110 tracing_buffers_read(struct file *filp, char __user *ubuf,
8111 size_t count, loff_t *ppos)
8113 struct ftrace_buffer_info *info = filp->private_data;
8114 struct trace_iterator *iter = &info->iter;
8121 #ifdef CONFIG_TRACER_MAX_TRACE
8122 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8127 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
8129 if (IS_ERR(info->spare)) {
8130 ret = PTR_ERR(info->spare);
8133 info->spare_cpu = iter->cpu_file;
8139 /* Do we have previous read data to read? */
8140 if (info->read < PAGE_SIZE)
8144 trace_access_lock(iter->cpu_file);
8145 ret = ring_buffer_read_page(iter->array_buffer->buffer,
8149 trace_access_unlock(iter->cpu_file);
8152 if (trace_empty(iter)) {
8153 if ((filp->f_flags & O_NONBLOCK))
8156 ret = wait_on_pipe(iter, 0);
8167 size = PAGE_SIZE - info->read;
8171 ret = copy_to_user(ubuf, info->spare + info->read, size);
8183 static int tracing_buffers_release(struct inode *inode, struct file *file)
8185 struct ftrace_buffer_info *info = file->private_data;
8186 struct trace_iterator *iter = &info->iter;
8188 mutex_lock(&trace_types_lock);
8190 iter->tr->trace_ref--;
8192 __trace_array_put(iter->tr);
8195 /* Make sure the waiters see the new wait_index */
8198 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8201 ring_buffer_free_read_page(iter->array_buffer->buffer,
8202 info->spare_cpu, info->spare);
8205 mutex_unlock(&trace_types_lock);
8211 struct trace_buffer *buffer;
8214 refcount_t refcount;
8217 static void buffer_ref_release(struct buffer_ref *ref)
8219 if (!refcount_dec_and_test(&ref->refcount))
8221 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
8225 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
8226 struct pipe_buffer *buf)
8228 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8230 buffer_ref_release(ref);
8234 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
8235 struct pipe_buffer *buf)
8237 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
8239 if (refcount_read(&ref->refcount) > INT_MAX/2)
8242 refcount_inc(&ref->refcount);
8246 /* Pipe buffer operations for a buffer. */
8247 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
8248 .release = buffer_pipe_buf_release,
8249 .get = buffer_pipe_buf_get,
8253 * Callback from splice_to_pipe(), if we need to release some pages
8254 * at the end of the spd in case we error'ed out in filling the pipe.
8256 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
8258 struct buffer_ref *ref =
8259 (struct buffer_ref *)spd->partial[i].private;
8261 buffer_ref_release(ref);
8262 spd->partial[i].private = 0;
8266 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
8267 struct pipe_inode_info *pipe, size_t len,
8270 struct ftrace_buffer_info *info = file->private_data;
8271 struct trace_iterator *iter = &info->iter;
8272 struct partial_page partial_def[PIPE_DEF_BUFFERS];
8273 struct page *pages_def[PIPE_DEF_BUFFERS];
8274 struct splice_pipe_desc spd = {
8276 .partial = partial_def,
8277 .nr_pages_max = PIPE_DEF_BUFFERS,
8278 .ops = &buffer_pipe_buf_ops,
8279 .spd_release = buffer_spd_release,
8281 struct buffer_ref *ref;
8285 #ifdef CONFIG_TRACER_MAX_TRACE
8286 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
8290 if (*ppos & (PAGE_SIZE - 1))
8293 if (len & (PAGE_SIZE - 1)) {
8294 if (len < PAGE_SIZE)
8299 if (splice_grow_spd(pipe, &spd))
8303 trace_access_lock(iter->cpu_file);
8304 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8306 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
8310 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
8316 refcount_set(&ref->refcount, 1);
8317 ref->buffer = iter->array_buffer->buffer;
8318 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
8319 if (IS_ERR(ref->page)) {
8320 ret = PTR_ERR(ref->page);
8325 ref->cpu = iter->cpu_file;
8327 r = ring_buffer_read_page(ref->buffer, &ref->page,
8328 len, iter->cpu_file, 1);
8330 ring_buffer_free_read_page(ref->buffer, ref->cpu,
8336 page = virt_to_page(ref->page);
8338 spd.pages[i] = page;
8339 spd.partial[i].len = PAGE_SIZE;
8340 spd.partial[i].offset = 0;
8341 spd.partial[i].private = (unsigned long)ref;
8345 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
8348 trace_access_unlock(iter->cpu_file);
8351 /* did we read anything? */
8352 if (!spd.nr_pages) {
8359 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
8362 wait_index = READ_ONCE(iter->wait_index);
8364 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
8368 /* No need to wait after waking up when tracing is off */
8369 if (!tracer_tracing_is_on(iter->tr))
8372 /* Make sure we see the new wait_index */
8374 if (wait_index != iter->wait_index)
8380 ret = splice_to_pipe(pipe, &spd);
8382 splice_shrink_spd(&spd);
8387 /* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
8388 static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8390 struct ftrace_buffer_info *info = file->private_data;
8391 struct trace_iterator *iter = &info->iter;
8394 return -ENOIOCTLCMD;
8396 mutex_lock(&trace_types_lock);
8399 /* Make sure the waiters see the new wait_index */
8402 ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file);
8404 mutex_unlock(&trace_types_lock);
8408 static const struct file_operations tracing_buffers_fops = {
8409 .open = tracing_buffers_open,
8410 .read = tracing_buffers_read,
8411 .poll = tracing_buffers_poll,
8412 .release = tracing_buffers_release,
8413 .splice_read = tracing_buffers_splice_read,
8414 .unlocked_ioctl = tracing_buffers_ioctl,
8415 .llseek = no_llseek,
8419 tracing_stats_read(struct file *filp, char __user *ubuf,
8420 size_t count, loff_t *ppos)
8422 struct inode *inode = file_inode(filp);
8423 struct trace_array *tr = inode->i_private;
8424 struct array_buffer *trace_buf = &tr->array_buffer;
8425 int cpu = tracing_get_cpu(inode);
8426 struct trace_seq *s;
8428 unsigned long long t;
8429 unsigned long usec_rem;
8431 s = kmalloc(sizeof(*s), GFP_KERNEL);
8437 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
8438 trace_seq_printf(s, "entries: %ld\n", cnt);
8440 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
8441 trace_seq_printf(s, "overrun: %ld\n", cnt);
8443 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
8444 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
8446 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
8447 trace_seq_printf(s, "bytes: %ld\n", cnt);
8449 if (trace_clocks[tr->clock_id].in_ns) {
8450 /* local or global for trace_clock */
8451 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8452 usec_rem = do_div(t, USEC_PER_SEC);
8453 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
8456 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer));
8457 usec_rem = do_div(t, USEC_PER_SEC);
8458 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
8460 /* counter or tsc mode for trace_clock */
8461 trace_seq_printf(s, "oldest event ts: %llu\n",
8462 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
8464 trace_seq_printf(s, "now ts: %llu\n",
8465 ring_buffer_time_stamp(trace_buf->buffer));
8468 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
8469 trace_seq_printf(s, "dropped events: %ld\n", cnt);
8471 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
8472 trace_seq_printf(s, "read events: %ld\n", cnt);
8474 count = simple_read_from_buffer(ubuf, count, ppos,
8475 s->buffer, trace_seq_used(s));
8482 static const struct file_operations tracing_stats_fops = {
8483 .open = tracing_open_generic_tr,
8484 .read = tracing_stats_read,
8485 .llseek = generic_file_llseek,
8486 .release = tracing_release_generic_tr,
8489 #ifdef CONFIG_DYNAMIC_FTRACE
8492 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
8493 size_t cnt, loff_t *ppos)
8499 /* 256 should be plenty to hold the amount needed */
8500 buf = kmalloc(256, GFP_KERNEL);
8504 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
8505 ftrace_update_tot_cnt,
8506 ftrace_number_of_pages,
8507 ftrace_number_of_groups);
8509 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8514 static const struct file_operations tracing_dyn_info_fops = {
8515 .open = tracing_open_generic,
8516 .read = tracing_read_dyn_info,
8517 .llseek = generic_file_llseek,
8519 #endif /* CONFIG_DYNAMIC_FTRACE */
8521 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
8523 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
8524 struct trace_array *tr, struct ftrace_probe_ops *ops,
8527 tracing_snapshot_instance(tr);
8531 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
8532 struct trace_array *tr, struct ftrace_probe_ops *ops,
8535 struct ftrace_func_mapper *mapper = data;
8539 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8549 tracing_snapshot_instance(tr);
8553 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
8554 struct ftrace_probe_ops *ops, void *data)
8556 struct ftrace_func_mapper *mapper = data;
8559 seq_printf(m, "%ps:", (void *)ip);
8561 seq_puts(m, "snapshot");
8564 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
8567 seq_printf(m, ":count=%ld\n", *count);
8569 seq_puts(m, ":unlimited\n");
8575 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
8576 unsigned long ip, void *init_data, void **data)
8578 struct ftrace_func_mapper *mapper = *data;
8581 mapper = allocate_ftrace_func_mapper();
8587 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
8591 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
8592 unsigned long ip, void *data)
8594 struct ftrace_func_mapper *mapper = data;
8599 free_ftrace_func_mapper(mapper, NULL);
8603 ftrace_func_mapper_remove_ip(mapper, ip);
8606 static struct ftrace_probe_ops snapshot_probe_ops = {
8607 .func = ftrace_snapshot,
8608 .print = ftrace_snapshot_print,
8611 static struct ftrace_probe_ops snapshot_count_probe_ops = {
8612 .func = ftrace_count_snapshot,
8613 .print = ftrace_snapshot_print,
8614 .init = ftrace_snapshot_init,
8615 .free = ftrace_snapshot_free,
8619 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
8620 char *glob, char *cmd, char *param, int enable)
8622 struct ftrace_probe_ops *ops;
8623 void *count = (void *)-1;
8630 /* hash funcs only work with set_ftrace_filter */
8634 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8637 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8642 number = strsep(¶m, ":");
8644 if (!strlen(number))
8648 * We use the callback data field (which is a pointer)
8651 ret = kstrtoul(number, 0, (unsigned long *)&count);
8656 ret = tracing_alloc_snapshot_instance(tr);
8660 ret = register_ftrace_function_probe(glob, tr, ops, count);
8663 return ret < 0 ? ret : 0;
8666 static struct ftrace_func_command ftrace_snapshot_cmd = {
8668 .func = ftrace_trace_snapshot_callback,
8671 static __init int register_snapshot_cmd(void)
8673 return register_ftrace_command(&ftrace_snapshot_cmd);
8676 static inline __init int register_snapshot_cmd(void) { return 0; }
8677 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8679 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8681 if (WARN_ON(!tr->dir))
8682 return ERR_PTR(-ENODEV);
8684 /* Top directory uses NULL as the parent */
8685 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8688 /* All sub buffers have a descriptor */
8692 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8694 struct dentry *d_tracer;
8697 return tr->percpu_dir;
8699 d_tracer = tracing_get_dentry(tr);
8700 if (IS_ERR(d_tracer))
8703 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8705 MEM_FAIL(!tr->percpu_dir,
8706 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8708 return tr->percpu_dir;
8711 static struct dentry *
8712 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8713 void *data, long cpu, const struct file_operations *fops)
8715 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8717 if (ret) /* See tracing_get_cpu() */
8718 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8723 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8725 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8726 struct dentry *d_cpu;
8727 char cpu_dir[30]; /* 30 characters should be more than enough */
8732 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8733 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8735 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8739 /* per cpu trace_pipe */
8740 trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu,
8741 tr, cpu, &tracing_pipe_fops);
8744 trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu,
8745 tr, cpu, &tracing_fops);
8747 trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu,
8748 tr, cpu, &tracing_buffers_fops);
8750 trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu,
8751 tr, cpu, &tracing_stats_fops);
8753 trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu,
8754 tr, cpu, &tracing_entries_fops);
8756 #ifdef CONFIG_TRACER_SNAPSHOT
8757 trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu,
8758 tr, cpu, &snapshot_fops);
8760 trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu,
8761 tr, cpu, &snapshot_raw_fops);
8765 #ifdef CONFIG_FTRACE_SELFTEST
8766 /* Let selftest have access to static functions in this file */
8767 #include "trace_selftest.c"
8771 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8774 struct trace_option_dentry *topt = filp->private_data;
8777 if (topt->flags->val & topt->opt->bit)
8782 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8786 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8789 struct trace_option_dentry *topt = filp->private_data;
8793 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8797 if (val != 0 && val != 1)
8800 if (!!(topt->flags->val & topt->opt->bit) != val) {
8801 mutex_lock(&trace_types_lock);
8802 ret = __set_tracer_option(topt->tr, topt->flags,
8804 mutex_unlock(&trace_types_lock);
8815 static const struct file_operations trace_options_fops = {
8816 .open = tracing_open_generic,
8817 .read = trace_options_read,
8818 .write = trace_options_write,
8819 .llseek = generic_file_llseek,
8823 * In order to pass in both the trace_array descriptor as well as the index
8824 * to the flag that the trace option file represents, the trace_array
8825 * has a character array of trace_flags_index[], which holds the index
8826 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8827 * The address of this character array is passed to the flag option file
8828 * read/write callbacks.
8830 * In order to extract both the index and the trace_array descriptor,
8831 * get_tr_index() uses the following algorithm.
8835 * As the pointer itself contains the address of the index (remember
8838 * Then to get the trace_array descriptor, by subtracting that index
8839 * from the ptr, we get to the start of the index itself.
8841 * ptr - idx == &index[0]
8843 * Then a simple container_of() from that pointer gets us to the
8844 * trace_array descriptor.
8846 static void get_tr_index(void *data, struct trace_array **ptr,
8847 unsigned int *pindex)
8849 *pindex = *(unsigned char *)data;
8851 *ptr = container_of(data - *pindex, struct trace_array,
8856 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8859 void *tr_index = filp->private_data;
8860 struct trace_array *tr;
8864 get_tr_index(tr_index, &tr, &index);
8866 if (tr->trace_flags & (1 << index))
8871 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8875 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8878 void *tr_index = filp->private_data;
8879 struct trace_array *tr;
8884 get_tr_index(tr_index, &tr, &index);
8886 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8890 if (val != 0 && val != 1)
8893 mutex_lock(&event_mutex);
8894 mutex_lock(&trace_types_lock);
8895 ret = set_tracer_flag(tr, 1 << index, val);
8896 mutex_unlock(&trace_types_lock);
8897 mutex_unlock(&event_mutex);
8907 static const struct file_operations trace_options_core_fops = {
8908 .open = tracing_open_generic,
8909 .read = trace_options_core_read,
8910 .write = trace_options_core_write,
8911 .llseek = generic_file_llseek,
8914 struct dentry *trace_create_file(const char *name,
8916 struct dentry *parent,
8918 const struct file_operations *fops)
8922 ret = tracefs_create_file(name, mode, parent, data, fops);
8924 pr_warn("Could not create tracefs '%s' entry\n", name);
8930 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8932 struct dentry *d_tracer;
8937 d_tracer = tracing_get_dentry(tr);
8938 if (IS_ERR(d_tracer))
8941 tr->options = tracefs_create_dir("options", d_tracer);
8943 pr_warn("Could not create tracefs directory 'options'\n");
8951 create_trace_option_file(struct trace_array *tr,
8952 struct trace_option_dentry *topt,
8953 struct tracer_flags *flags,
8954 struct tracer_opt *opt)
8956 struct dentry *t_options;
8958 t_options = trace_options_init_dentry(tr);
8962 topt->flags = flags;
8966 topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE,
8967 t_options, topt, &trace_options_fops);
8972 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8974 struct trace_option_dentry *topts;
8975 struct trace_options *tr_topts;
8976 struct tracer_flags *flags;
8977 struct tracer_opt *opts;
8984 flags = tracer->flags;
8986 if (!flags || !flags->opts)
8990 * If this is an instance, only create flags for tracers
8991 * the instance may have.
8993 if (!trace_ok_for_array(tracer, tr))
8996 for (i = 0; i < tr->nr_topts; i++) {
8997 /* Make sure there's no duplicate flags. */
8998 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
9004 for (cnt = 0; opts[cnt].name; cnt++)
9007 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
9011 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
9018 tr->topts = tr_topts;
9019 tr->topts[tr->nr_topts].tracer = tracer;
9020 tr->topts[tr->nr_topts].topts = topts;
9023 for (cnt = 0; opts[cnt].name; cnt++) {
9024 create_trace_option_file(tr, &topts[cnt], flags,
9026 MEM_FAIL(topts[cnt].entry == NULL,
9027 "Failed to create trace option: %s",
9032 static struct dentry *
9033 create_trace_option_core_file(struct trace_array *tr,
9034 const char *option, long index)
9036 struct dentry *t_options;
9038 t_options = trace_options_init_dentry(tr);
9042 return trace_create_file(option, TRACE_MODE_WRITE, t_options,
9043 (void *)&tr->trace_flags_index[index],
9044 &trace_options_core_fops);
9047 static void create_trace_options_dir(struct trace_array *tr)
9049 struct dentry *t_options;
9050 bool top_level = tr == &global_trace;
9053 t_options = trace_options_init_dentry(tr);
9057 for (i = 0; trace_options[i]; i++) {
9059 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
9060 create_trace_option_core_file(tr, trace_options[i], i);
9065 rb_simple_read(struct file *filp, char __user *ubuf,
9066 size_t cnt, loff_t *ppos)
9068 struct trace_array *tr = filp->private_data;
9072 r = tracer_tracing_is_on(tr);
9073 r = sprintf(buf, "%d\n", r);
9075 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9079 rb_simple_write(struct file *filp, const char __user *ubuf,
9080 size_t cnt, loff_t *ppos)
9082 struct trace_array *tr = filp->private_data;
9083 struct trace_buffer *buffer = tr->array_buffer.buffer;
9087 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9092 mutex_lock(&trace_types_lock);
9093 if (!!val == tracer_tracing_is_on(tr)) {
9094 val = 0; /* do nothing */
9096 tracer_tracing_on(tr);
9097 if (tr->current_trace->start)
9098 tr->current_trace->start(tr);
9100 tracer_tracing_off(tr);
9101 if (tr->current_trace->stop)
9102 tr->current_trace->stop(tr);
9103 /* Wake up any waiters */
9104 ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS);
9106 mutex_unlock(&trace_types_lock);
9114 static const struct file_operations rb_simple_fops = {
9115 .open = tracing_open_generic_tr,
9116 .read = rb_simple_read,
9117 .write = rb_simple_write,
9118 .release = tracing_release_generic_tr,
9119 .llseek = default_llseek,
9123 buffer_percent_read(struct file *filp, char __user *ubuf,
9124 size_t cnt, loff_t *ppos)
9126 struct trace_array *tr = filp->private_data;
9130 r = tr->buffer_percent;
9131 r = sprintf(buf, "%d\n", r);
9133 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
9137 buffer_percent_write(struct file *filp, const char __user *ubuf,
9138 size_t cnt, loff_t *ppos)
9140 struct trace_array *tr = filp->private_data;
9144 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
9151 tr->buffer_percent = val;
9158 static const struct file_operations buffer_percent_fops = {
9159 .open = tracing_open_generic_tr,
9160 .read = buffer_percent_read,
9161 .write = buffer_percent_write,
9162 .release = tracing_release_generic_tr,
9163 .llseek = default_llseek,
9166 static struct dentry *trace_instance_dir;
9169 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
9172 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
9174 enum ring_buffer_flags rb_flags;
9176 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
9180 buf->buffer = ring_buffer_alloc(size, rb_flags);
9184 buf->data = alloc_percpu(struct trace_array_cpu);
9186 ring_buffer_free(buf->buffer);
9191 /* Allocate the first page for all buffers */
9192 set_buffer_entries(&tr->array_buffer,
9193 ring_buffer_size(tr->array_buffer.buffer, 0));
9198 static void free_trace_buffer(struct array_buffer *buf)
9201 ring_buffer_free(buf->buffer);
9203 free_percpu(buf->data);
9208 static int allocate_trace_buffers(struct trace_array *tr, int size)
9212 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
9216 #ifdef CONFIG_TRACER_MAX_TRACE
9217 ret = allocate_trace_buffer(tr, &tr->max_buffer,
9218 allocate_snapshot ? size : 1);
9219 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
9220 free_trace_buffer(&tr->array_buffer);
9223 tr->allocated_snapshot = allocate_snapshot;
9226 * Only the top level trace array gets its snapshot allocated
9227 * from the kernel command line.
9229 allocate_snapshot = false;
9235 static void free_trace_buffers(struct trace_array *tr)
9240 free_trace_buffer(&tr->array_buffer);
9242 #ifdef CONFIG_TRACER_MAX_TRACE
9243 free_trace_buffer(&tr->max_buffer);
9247 static void init_trace_flags_index(struct trace_array *tr)
9251 /* Used by the trace options files */
9252 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
9253 tr->trace_flags_index[i] = i;
9256 static void __update_tracer_options(struct trace_array *tr)
9260 for (t = trace_types; t; t = t->next)
9261 add_tracer_options(tr, t);
9264 static void update_tracer_options(struct trace_array *tr)
9266 mutex_lock(&trace_types_lock);
9267 tracer_options_updated = true;
9268 __update_tracer_options(tr);
9269 mutex_unlock(&trace_types_lock);
9272 /* Must have trace_types_lock held */
9273 struct trace_array *trace_array_find(const char *instance)
9275 struct trace_array *tr, *found = NULL;
9277 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9278 if (tr->name && strcmp(tr->name, instance) == 0) {
9287 struct trace_array *trace_array_find_get(const char *instance)
9289 struct trace_array *tr;
9291 mutex_lock(&trace_types_lock);
9292 tr = trace_array_find(instance);
9295 mutex_unlock(&trace_types_lock);
9300 static int trace_array_create_dir(struct trace_array *tr)
9304 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
9308 ret = event_trace_add_tracer(tr->dir, tr);
9310 tracefs_remove(tr->dir);
9314 init_tracer_tracefs(tr, tr->dir);
9315 __update_tracer_options(tr);
9320 static struct trace_array *trace_array_create(const char *name)
9322 struct trace_array *tr;
9326 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
9328 return ERR_PTR(ret);
9330 tr->name = kstrdup(name, GFP_KERNEL);
9334 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
9337 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
9339 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
9341 raw_spin_lock_init(&tr->start_lock);
9343 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9345 tr->current_trace = &nop_trace;
9347 INIT_LIST_HEAD(&tr->systems);
9348 INIT_LIST_HEAD(&tr->events);
9349 INIT_LIST_HEAD(&tr->hist_vars);
9350 INIT_LIST_HEAD(&tr->err_log);
9352 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
9355 if (ftrace_allocate_ftrace_ops(tr) < 0)
9358 ftrace_init_trace_array(tr);
9360 init_trace_flags_index(tr);
9362 if (trace_instance_dir) {
9363 ret = trace_array_create_dir(tr);
9367 __trace_early_add_events(tr);
9369 list_add(&tr->list, &ftrace_trace_arrays);
9376 ftrace_free_ftrace_ops(tr);
9377 free_trace_buffers(tr);
9378 free_cpumask_var(tr->tracing_cpumask);
9382 return ERR_PTR(ret);
9385 static int instance_mkdir(const char *name)
9387 struct trace_array *tr;
9390 mutex_lock(&event_mutex);
9391 mutex_lock(&trace_types_lock);
9394 if (trace_array_find(name))
9397 tr = trace_array_create(name);
9399 ret = PTR_ERR_OR_ZERO(tr);
9402 mutex_unlock(&trace_types_lock);
9403 mutex_unlock(&event_mutex);
9408 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
9409 * @name: The name of the trace array to be looked up/created.
9411 * Returns pointer to trace array with given name.
9412 * NULL, if it cannot be created.
9414 * NOTE: This function increments the reference counter associated with the
9415 * trace array returned. This makes sure it cannot be freed while in use.
9416 * Use trace_array_put() once the trace array is no longer needed.
9417 * If the trace_array is to be freed, trace_array_destroy() needs to
9418 * be called after the trace_array_put(), or simply let user space delete
9419 * it from the tracefs instances directory. But until the
9420 * trace_array_put() is called, user space can not delete it.
9423 struct trace_array *trace_array_get_by_name(const char *name)
9425 struct trace_array *tr;
9427 mutex_lock(&event_mutex);
9428 mutex_lock(&trace_types_lock);
9430 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9431 if (tr->name && strcmp(tr->name, name) == 0)
9435 tr = trace_array_create(name);
9443 mutex_unlock(&trace_types_lock);
9444 mutex_unlock(&event_mutex);
9447 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
9449 static int __remove_instance(struct trace_array *tr)
9453 /* Reference counter for a newly created trace array = 1. */
9454 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
9457 list_del(&tr->list);
9459 /* Disable all the flags that were enabled coming in */
9460 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
9461 if ((1 << i) & ZEROED_TRACE_FLAGS)
9462 set_tracer_flag(tr, 1 << i, 0);
9465 tracing_set_nop(tr);
9466 clear_ftrace_function_probes(tr);
9467 event_trace_del_tracer(tr);
9468 ftrace_clear_pids(tr);
9469 ftrace_destroy_function_files(tr);
9470 tracefs_remove(tr->dir);
9471 free_percpu(tr->last_func_repeats);
9472 free_trace_buffers(tr);
9474 for (i = 0; i < tr->nr_topts; i++) {
9475 kfree(tr->topts[i].topts);
9479 free_cpumask_var(tr->tracing_cpumask);
9486 int trace_array_destroy(struct trace_array *this_tr)
9488 struct trace_array *tr;
9494 mutex_lock(&event_mutex);
9495 mutex_lock(&trace_types_lock);
9499 /* Making sure trace array exists before destroying it. */
9500 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9501 if (tr == this_tr) {
9502 ret = __remove_instance(tr);
9507 mutex_unlock(&trace_types_lock);
9508 mutex_unlock(&event_mutex);
9512 EXPORT_SYMBOL_GPL(trace_array_destroy);
9514 static int instance_rmdir(const char *name)
9516 struct trace_array *tr;
9519 mutex_lock(&event_mutex);
9520 mutex_lock(&trace_types_lock);
9523 tr = trace_array_find(name);
9525 ret = __remove_instance(tr);
9527 mutex_unlock(&trace_types_lock);
9528 mutex_unlock(&event_mutex);
9533 static __init void create_trace_instances(struct dentry *d_tracer)
9535 struct trace_array *tr;
9537 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
9540 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
9543 mutex_lock(&event_mutex);
9544 mutex_lock(&trace_types_lock);
9546 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
9549 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
9550 "Failed to create instance directory\n"))
9554 mutex_unlock(&trace_types_lock);
9555 mutex_unlock(&event_mutex);
9559 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
9561 struct trace_event_file *file;
9564 trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer,
9565 tr, &show_traces_fops);
9567 trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer,
9568 tr, &set_tracer_fops);
9570 trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer,
9571 tr, &tracing_cpumask_fops);
9573 trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer,
9574 tr, &tracing_iter_fops);
9576 trace_create_file("trace", TRACE_MODE_WRITE, d_tracer,
9579 trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer,
9580 tr, &tracing_pipe_fops);
9582 trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer,
9583 tr, &tracing_entries_fops);
9585 trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer,
9586 tr, &tracing_total_entries_fops);
9588 trace_create_file("free_buffer", 0200, d_tracer,
9589 tr, &tracing_free_buffer_fops);
9591 trace_create_file("trace_marker", 0220, d_tracer,
9592 tr, &tracing_mark_fops);
9594 file = __find_event_file(tr, "ftrace", "print");
9595 if (file && file->dir)
9596 trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
9597 file, &event_trigger_fops);
9598 tr->trace_marker_file = file;
9600 trace_create_file("trace_marker_raw", 0220, d_tracer,
9601 tr, &tracing_mark_raw_fops);
9603 trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr,
9606 trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer,
9607 tr, &rb_simple_fops);
9609 trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr,
9610 &trace_time_stamp_mode_fops);
9612 tr->buffer_percent = 50;
9614 trace_create_file("buffer_percent", TRACE_MODE_READ, d_tracer,
9615 tr, &buffer_percent_fops);
9617 create_trace_options_dir(tr);
9619 #ifdef CONFIG_TRACER_MAX_TRACE
9620 trace_create_maxlat_file(tr, d_tracer);
9623 if (ftrace_create_function_files(tr, d_tracer))
9624 MEM_FAIL(1, "Could not allocate function filter files");
9626 #ifdef CONFIG_TRACER_SNAPSHOT
9627 trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer,
9628 tr, &snapshot_fops);
9631 trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer,
9632 tr, &tracing_err_log_fops);
9634 for_each_tracing_cpu(cpu)
9635 tracing_init_tracefs_percpu(tr, cpu);
9637 ftrace_init_tracefs(tr, d_tracer);
9640 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9642 struct vfsmount *mnt;
9643 struct file_system_type *type;
9646 * To maintain backward compatibility for tools that mount
9647 * debugfs to get to the tracing facility, tracefs is automatically
9648 * mounted to the debugfs/tracing directory.
9650 type = get_fs_type("tracefs");
9653 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9654 put_filesystem(type);
9663 * tracing_init_dentry - initialize top level trace array
9665 * This is called when creating files or directories in the tracing
9666 * directory. It is called via fs_initcall() by any of the boot up code
9667 * and expects to return the dentry of the top level tracing directory.
9669 int tracing_init_dentry(void)
9671 struct trace_array *tr = &global_trace;
9673 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9674 pr_warn("Tracing disabled due to lockdown\n");
9678 /* The top level trace array uses NULL as parent */
9682 if (WARN_ON(!tracefs_initialized()))
9686 * As there may still be users that expect the tracing
9687 * files to exist in debugfs/tracing, we must automount
9688 * the tracefs file system there, so older tools still
9689 * work with the newer kernel.
9691 tr->dir = debugfs_create_automount("tracing", NULL,
9692 trace_automount, NULL);
9697 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9698 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9700 static struct workqueue_struct *eval_map_wq __initdata;
9701 static struct work_struct eval_map_work __initdata;
9702 static struct work_struct tracerfs_init_work __initdata;
9704 static void __init eval_map_work_func(struct work_struct *work)
9708 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9709 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9712 static int __init trace_eval_init(void)
9714 INIT_WORK(&eval_map_work, eval_map_work_func);
9716 eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0);
9718 pr_err("Unable to allocate eval_map_wq\n");
9720 eval_map_work_func(&eval_map_work);
9724 queue_work(eval_map_wq, &eval_map_work);
9728 subsys_initcall(trace_eval_init);
9730 static int __init trace_eval_sync(void)
9732 /* Make sure the eval map updates are finished */
9734 destroy_workqueue(eval_map_wq);
9738 late_initcall_sync(trace_eval_sync);
9741 #ifdef CONFIG_MODULES
9742 static void trace_module_add_evals(struct module *mod)
9744 if (!mod->num_trace_evals)
9748 * Modules with bad taint do not have events created, do
9749 * not bother with enums either.
9751 if (trace_module_has_bad_taint(mod))
9754 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9757 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9758 static void trace_module_remove_evals(struct module *mod)
9760 union trace_eval_map_item *map;
9761 union trace_eval_map_item **last = &trace_eval_maps;
9763 if (!mod->num_trace_evals)
9766 mutex_lock(&trace_eval_mutex);
9768 map = trace_eval_maps;
9771 if (map->head.mod == mod)
9773 map = trace_eval_jmp_to_tail(map);
9774 last = &map->tail.next;
9775 map = map->tail.next;
9780 *last = trace_eval_jmp_to_tail(map)->tail.next;
9783 mutex_unlock(&trace_eval_mutex);
9786 static inline void trace_module_remove_evals(struct module *mod) { }
9787 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9789 static int trace_module_notify(struct notifier_block *self,
9790 unsigned long val, void *data)
9792 struct module *mod = data;
9795 case MODULE_STATE_COMING:
9796 trace_module_add_evals(mod);
9798 case MODULE_STATE_GOING:
9799 trace_module_remove_evals(mod);
9806 static struct notifier_block trace_module_nb = {
9807 .notifier_call = trace_module_notify,
9810 #endif /* CONFIG_MODULES */
9812 static __init void tracer_init_tracefs_work_func(struct work_struct *work)
9817 init_tracer_tracefs(&global_trace, NULL);
9818 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9820 trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL,
9821 &global_trace, &tracing_thresh_fops);
9823 trace_create_file("README", TRACE_MODE_READ, NULL,
9824 NULL, &tracing_readme_fops);
9826 trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL,
9827 NULL, &tracing_saved_cmdlines_fops);
9829 trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL,
9830 NULL, &tracing_saved_cmdlines_size_fops);
9832 trace_create_file("saved_tgids", TRACE_MODE_READ, NULL,
9833 NULL, &tracing_saved_tgids_fops);
9835 trace_create_eval_file(NULL);
9837 #ifdef CONFIG_MODULES
9838 register_module_notifier(&trace_module_nb);
9841 #ifdef CONFIG_DYNAMIC_FTRACE
9842 trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL,
9843 NULL, &tracing_dyn_info_fops);
9846 create_trace_instances(NULL);
9848 update_tracer_options(&global_trace);
9851 static __init int tracer_init_tracefs(void)
9855 trace_access_lock_init();
9857 ret = tracing_init_dentry();
9862 INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func);
9863 queue_work(eval_map_wq, &tracerfs_init_work);
9865 tracer_init_tracefs_work_func(NULL);
9868 rv_init_interface();
9873 fs_initcall(tracer_init_tracefs);
9875 static int trace_die_panic_handler(struct notifier_block *self,
9876 unsigned long ev, void *unused);
9878 static struct notifier_block trace_panic_notifier = {
9879 .notifier_call = trace_die_panic_handler,
9880 .priority = INT_MAX - 1,
9883 static struct notifier_block trace_die_notifier = {
9884 .notifier_call = trace_die_panic_handler,
9885 .priority = INT_MAX - 1,
9889 * The idea is to execute the following die/panic callback early, in order
9890 * to avoid showing irrelevant information in the trace (like other panic
9891 * notifier functions); we are the 2nd to run, after hung_task/rcu_stall
9892 * warnings get disabled (to prevent potential log flooding).
9894 static int trace_die_panic_handler(struct notifier_block *self,
9895 unsigned long ev, void *unused)
9897 if (!ftrace_dump_on_oops)
9900 /* The die notifier requires DIE_OOPS to trigger */
9901 if (self == &trace_die_notifier && ev != DIE_OOPS)
9904 ftrace_dump(ftrace_dump_on_oops);
9910 * printk is set to max of 1024, we really don't need it that big.
9911 * Nothing should be printing 1000 characters anyway.
9913 #define TRACE_MAX_PRINT 1000
9916 * Define here KERN_TRACE so that we have one place to modify
9917 * it if we decide to change what log level the ftrace dump
9920 #define KERN_TRACE KERN_EMERG
9923 trace_printk_seq(struct trace_seq *s)
9925 /* Probably should print a warning here. */
9926 if (s->seq.len >= TRACE_MAX_PRINT)
9927 s->seq.len = TRACE_MAX_PRINT;
9930 * More paranoid code. Although the buffer size is set to
9931 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9932 * an extra layer of protection.
9934 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9935 s->seq.len = s->seq.size - 1;
9937 /* should be zero ended, but we are paranoid. */
9938 s->buffer[s->seq.len] = 0;
9940 printk(KERN_TRACE "%s", s->buffer);
9945 void trace_init_global_iter(struct trace_iterator *iter)
9947 iter->tr = &global_trace;
9948 iter->trace = iter->tr->current_trace;
9949 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9950 iter->array_buffer = &global_trace.array_buffer;
9952 if (iter->trace && iter->trace->open)
9953 iter->trace->open(iter);
9955 /* Annotate start of buffers if we had overruns */
9956 if (ring_buffer_overruns(iter->array_buffer->buffer))
9957 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9959 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9960 if (trace_clocks[iter->tr->clock_id].in_ns)
9961 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9963 /* Can not use kmalloc for iter.temp and iter.fmt */
9964 iter->temp = static_temp_buf;
9965 iter->temp_size = STATIC_TEMP_BUF_SIZE;
9966 iter->fmt = static_fmt_buf;
9967 iter->fmt_size = STATIC_FMT_BUF_SIZE;
9970 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9972 /* use static because iter can be a bit big for the stack */
9973 static struct trace_iterator iter;
9974 static atomic_t dump_running;
9975 struct trace_array *tr = &global_trace;
9976 unsigned int old_userobj;
9977 unsigned long flags;
9980 /* Only allow one dump user at a time. */
9981 if (atomic_inc_return(&dump_running) != 1) {
9982 atomic_dec(&dump_running);
9987 * Always turn off tracing when we dump.
9988 * We don't need to show trace output of what happens
9989 * between multiple crashes.
9991 * If the user does a sysrq-z, then they can re-enable
9992 * tracing with echo 1 > tracing_on.
9996 local_irq_save(flags);
9998 /* Simulate the iterator */
9999 trace_init_global_iter(&iter);
10001 for_each_tracing_cpu(cpu) {
10002 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10005 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
10007 /* don't look at user memory in panic mode */
10008 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
10010 switch (oops_dump_mode) {
10012 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10015 iter.cpu_file = raw_smp_processor_id();
10020 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
10021 iter.cpu_file = RING_BUFFER_ALL_CPUS;
10024 printk(KERN_TRACE "Dumping ftrace buffer:\n");
10026 /* Did function tracer already get disabled? */
10027 if (ftrace_is_dead()) {
10028 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
10029 printk("# MAY BE MISSING FUNCTION EVENTS\n");
10033 * We need to stop all tracing on all CPUS to read
10034 * the next buffer. This is a bit expensive, but is
10035 * not done often. We fill all what we can read,
10036 * and then release the locks again.
10039 while (!trace_empty(&iter)) {
10042 printk(KERN_TRACE "---------------------------------\n");
10046 trace_iterator_reset(&iter);
10047 iter.iter_flags |= TRACE_FILE_LAT_FMT;
10049 if (trace_find_next_entry_inc(&iter) != NULL) {
10052 ret = print_trace_line(&iter);
10053 if (ret != TRACE_TYPE_NO_CONSUME)
10054 trace_consume(&iter);
10056 touch_nmi_watchdog();
10058 trace_printk_seq(&iter.seq);
10062 printk(KERN_TRACE " (ftrace buffer empty)\n");
10064 printk(KERN_TRACE "---------------------------------\n");
10067 tr->trace_flags |= old_userobj;
10069 for_each_tracing_cpu(cpu) {
10070 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
10072 atomic_dec(&dump_running);
10073 local_irq_restore(flags);
10075 EXPORT_SYMBOL_GPL(ftrace_dump);
10077 #define WRITE_BUFSIZE 4096
10079 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
10080 size_t count, loff_t *ppos,
10081 int (*createfn)(const char *))
10083 char *kbuf, *buf, *tmp;
10088 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
10092 while (done < count) {
10093 size = count - done;
10095 if (size >= WRITE_BUFSIZE)
10096 size = WRITE_BUFSIZE - 1;
10098 if (copy_from_user(kbuf, buffer + done, size)) {
10105 tmp = strchr(buf, '\n');
10108 size = tmp - buf + 1;
10110 size = strlen(buf);
10111 if (done + size < count) {
10114 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
10115 pr_warn("Line length is too long: Should be less than %d\n",
10116 WRITE_BUFSIZE - 2);
10123 /* Remove comments */
10124 tmp = strchr(buf, '#');
10129 ret = createfn(buf);
10134 } while (done < count);
10144 __init static int tracer_alloc_buffers(void)
10150 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10151 pr_warn("Tracing disabled due to lockdown\n");
10156 * Make sure we don't accidentally add more trace options
10157 * than we have bits for.
10159 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
10161 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
10164 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
10165 goto out_free_buffer_mask;
10167 /* Only allocate trace_printk buffers if a trace_printk exists */
10168 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
10169 /* Must be called before global_trace.buffer is allocated */
10170 trace_printk_init_buffers();
10172 /* To save memory, keep the ring buffer size to its minimum */
10173 if (ring_buffer_expanded)
10174 ring_buf_size = trace_buf_size;
10178 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
10179 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
10181 raw_spin_lock_init(&global_trace.start_lock);
10184 * The prepare callbacks allocates some memory for the ring buffer. We
10185 * don't free the buffer if the CPU goes down. If we were to free
10186 * the buffer, then the user would lose any trace that was in the
10187 * buffer. The memory will be removed once the "instance" is removed.
10189 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
10190 "trace/RB:prepare", trace_rb_cpu_prepare,
10193 goto out_free_cpumask;
10194 /* Used for event triggers */
10196 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
10198 goto out_rm_hp_state;
10200 if (trace_create_savedcmd() < 0)
10201 goto out_free_temp_buffer;
10203 /* TODO: make the number of buffers hot pluggable with CPUS */
10204 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
10205 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
10206 goto out_free_savedcmd;
10209 if (global_trace.buffer_disabled)
10212 if (trace_boot_clock) {
10213 ret = tracing_set_clock(&global_trace, trace_boot_clock);
10215 pr_warn("Trace clock %s not defined, going back to default\n",
10220 * register_tracer() might reference current_trace, so it
10221 * needs to be set before we register anything. This is
10222 * just a bootstrap of current_trace anyway.
10224 global_trace.current_trace = &nop_trace;
10226 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
10228 ftrace_init_global_array_ops(&global_trace);
10230 init_trace_flags_index(&global_trace);
10232 register_tracer(&nop_trace);
10234 /* Function tracing may start here (via kernel command line) */
10235 init_function_trace();
10237 /* All seems OK, enable tracing */
10238 tracing_disabled = 0;
10240 atomic_notifier_chain_register(&panic_notifier_list,
10241 &trace_panic_notifier);
10243 register_die_notifier(&trace_die_notifier);
10245 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
10247 INIT_LIST_HEAD(&global_trace.systems);
10248 INIT_LIST_HEAD(&global_trace.events);
10249 INIT_LIST_HEAD(&global_trace.hist_vars);
10250 INIT_LIST_HEAD(&global_trace.err_log);
10251 list_add(&global_trace.list, &ftrace_trace_arrays);
10253 apply_trace_boot_options();
10255 register_snapshot_cmd();
10262 free_saved_cmdlines_buffer(savedcmd);
10263 out_free_temp_buffer:
10264 ring_buffer_free(temp_buffer);
10266 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
10268 free_cpumask_var(global_trace.tracing_cpumask);
10269 out_free_buffer_mask:
10270 free_cpumask_var(tracing_buffer_mask);
10275 void __init ftrace_boot_snapshot(void)
10277 if (snapshot_at_boot) {
10278 tracing_snapshot();
10279 internal_trace_puts("** Boot snapshot taken **\n");
10283 void __init early_trace_init(void)
10285 if (tracepoint_printk) {
10286 tracepoint_print_iter =
10287 kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
10288 if (MEM_FAIL(!tracepoint_print_iter,
10289 "Failed to allocate trace iterator\n"))
10290 tracepoint_printk = 0;
10292 static_key_enable(&tracepoint_printk_key.key);
10294 tracer_alloc_buffers();
10299 void __init trace_init(void)
10301 trace_event_init();
10304 __init static void clear_boot_tracer(void)
10307 * The default tracer at boot buffer is an init section.
10308 * This function is called in lateinit. If we did not
10309 * find the boot tracer, then clear it out, to prevent
10310 * later registration from accessing the buffer that is
10311 * about to be freed.
10313 if (!default_bootup_tracer)
10316 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
10317 default_bootup_tracer);
10318 default_bootup_tracer = NULL;
10321 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
10322 __init static void tracing_set_default_clock(void)
10324 /* sched_clock_stable() is determined in late_initcall */
10325 if (!trace_boot_clock && !sched_clock_stable()) {
10326 if (security_locked_down(LOCKDOWN_TRACEFS)) {
10327 pr_warn("Can not set tracing clock due to lockdown\n");
10331 printk(KERN_WARNING
10332 "Unstable clock detected, switching default tracing clock to \"global\"\n"
10333 "If you want to keep using the local clock, then add:\n"
10334 " \"trace_clock=local\"\n"
10335 "on the kernel command line\n");
10336 tracing_set_clock(&global_trace, "global");
10340 static inline void tracing_set_default_clock(void) { }
10343 __init static int late_trace_init(void)
10345 if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
10346 static_key_disable(&tracepoint_printk_key.key);
10347 tracepoint_printk = 0;
10350 tracing_set_default_clock();
10351 clear_boot_tracer();
10355 late_initcall_sync(late_trace_init);