1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/security.h>
21 #include <linux/seq_file.h>
22 #include <linux/notifier.h>
23 #include <linux/irqflags.h>
24 #include <linux/debugfs.h>
25 #include <linux/tracefs.h>
26 #include <linux/pagemap.h>
27 #include <linux/hardirq.h>
28 #include <linux/linkage.h>
29 #include <linux/uaccess.h>
30 #include <linux/vmalloc.h>
31 #include <linux/ftrace.h>
32 #include <linux/module.h>
33 #include <linux/percpu.h>
34 #include <linux/splice.h>
35 #include <linux/kdebug.h>
36 #include <linux/string.h>
37 #include <linux/mount.h>
38 #include <linux/rwsem.h>
39 #include <linux/slab.h>
40 #include <linux/ctype.h>
41 #include <linux/init.h>
42 #include <linux/poll.h>
43 #include <linux/nmi.h>
45 #include <linux/trace.h>
46 #include <linux/sched/clock.h>
47 #include <linux/sched/rt.h>
48 #include <linux/fsnotify.h>
49 #include <linux/irq_work.h>
50 #include <linux/workqueue.h>
53 #include "trace_output.h"
56 * On boot up, the ring buffer is set to the minimum size, so that
57 * we do not waste memory on systems that are not using tracing.
59 bool ring_buffer_expanded;
62 * We need to change this state when a selftest is running.
63 * A selftest will lurk into the ring-buffer to count the
64 * entries inserted during the selftest although some concurrent
65 * insertions into the ring-buffer such as trace_printk could occurred
66 * at the same time, giving false positive or negative results.
68 static bool __read_mostly tracing_selftest_running;
71 * If a tracer is running, we do not want to run SELFTEST.
73 bool __read_mostly tracing_selftest_disabled;
75 /* Pipe tracepoints to printk */
76 struct trace_iterator *tracepoint_print_iter;
77 int tracepoint_printk;
78 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
80 /* For tracers that don't implement custom flags */
81 static struct tracer_opt dummy_tracer_opt[] = {
86 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
92 * To prevent the comm cache from being overwritten when no
93 * tracing is active, only save the comm when a trace event
96 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
99 * Kill all tracing for good (never come back).
100 * It is initialized to 1 but will turn to zero if the initialization
101 * of the tracer is successful. But that is the only place that sets
104 static int tracing_disabled = 1;
106 cpumask_var_t __read_mostly tracing_buffer_mask;
109 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
111 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112 * is set, then ftrace_dump is called. This will output the contents
113 * of the ftrace buffers to the console. This is very useful for
114 * capturing traces that lead to crashes and outputing it to a
117 * It is default off, but you can enable it with either specifying
118 * "ftrace_dump_on_oops" in the kernel command line, or setting
119 * /proc/sys/kernel/ftrace_dump_on_oops
120 * Set 1 if you want to dump buffers of all CPUs
121 * Set 2 if you want to dump the buffer of the CPU that triggered oops
124 enum ftrace_dump_mode ftrace_dump_on_oops;
126 /* When set, tracing will stop when a WARN*() is hit */
127 int __disable_trace_on_warning;
129 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
130 /* Map of enums to their values, for "eval_map" file */
131 struct trace_eval_map_head {
133 unsigned long length;
136 union trace_eval_map_item;
138 struct trace_eval_map_tail {
140 * "end" is first and points to NULL as it must be different
141 * than "mod" or "eval_string"
143 union trace_eval_map_item *next;
144 const char *end; /* points to NULL */
147 static DEFINE_MUTEX(trace_eval_mutex);
150 * The trace_eval_maps are saved in an array with two extra elements,
151 * one at the beginning, and one at the end. The beginning item contains
152 * the count of the saved maps (head.length), and the module they
153 * belong to if not built in (head.mod). The ending item contains a
154 * pointer to the next array of saved eval_map items.
156 union trace_eval_map_item {
157 struct trace_eval_map map;
158 struct trace_eval_map_head head;
159 struct trace_eval_map_tail tail;
162 static union trace_eval_map_item *trace_eval_maps;
163 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
165 int tracing_set_tracer(struct trace_array *tr, const char *buf);
166 static void ftrace_trace_userstack(struct trace_array *tr,
167 struct trace_buffer *buffer,
168 unsigned long flags, int pc);
170 #define MAX_TRACER_SIZE 100
171 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
172 static char *default_bootup_tracer;
174 static bool allocate_snapshot;
176 static int __init set_cmdline_ftrace(char *str)
178 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
179 default_bootup_tracer = bootup_tracer_buf;
180 /* We are using ftrace early, expand it */
181 ring_buffer_expanded = true;
184 __setup("ftrace=", set_cmdline_ftrace);
186 static int __init set_ftrace_dump_on_oops(char *str)
188 if (*str++ != '=' || !*str) {
189 ftrace_dump_on_oops = DUMP_ALL;
193 if (!strcmp("orig_cpu", str)) {
194 ftrace_dump_on_oops = DUMP_ORIG;
200 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
202 static int __init stop_trace_on_warning(char *str)
204 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
205 __disable_trace_on_warning = 1;
208 __setup("traceoff_on_warning", stop_trace_on_warning);
210 static int __init boot_alloc_snapshot(char *str)
212 allocate_snapshot = true;
213 /* We also need the main ring buffer expanded */
214 ring_buffer_expanded = true;
217 __setup("alloc_snapshot", boot_alloc_snapshot);
220 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
222 static int __init set_trace_boot_options(char *str)
224 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
227 __setup("trace_options=", set_trace_boot_options);
229 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
230 static char *trace_boot_clock __initdata;
232 static int __init set_trace_boot_clock(char *str)
234 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
235 trace_boot_clock = trace_boot_clock_buf;
238 __setup("trace_clock=", set_trace_boot_clock);
240 static int __init set_tracepoint_printk(char *str)
242 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
243 tracepoint_printk = 1;
246 __setup("tp_printk", set_tracepoint_printk);
248 unsigned long long ns2usecs(u64 nsec)
256 trace_process_export(struct trace_export *export,
257 struct ring_buffer_event *event, int flag)
259 struct trace_entry *entry;
260 unsigned int size = 0;
262 if (export->flags & flag) {
263 entry = ring_buffer_event_data(event);
264 size = ring_buffer_event_length(event);
265 export->write(export, entry, size);
269 static DEFINE_MUTEX(ftrace_export_lock);
271 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
273 static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
274 static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
275 static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled);
277 static inline void ftrace_exports_enable(struct trace_export *export)
279 if (export->flags & TRACE_EXPORT_FUNCTION)
280 static_branch_inc(&trace_function_exports_enabled);
282 if (export->flags & TRACE_EXPORT_EVENT)
283 static_branch_inc(&trace_event_exports_enabled);
285 if (export->flags & TRACE_EXPORT_MARKER)
286 static_branch_inc(&trace_marker_exports_enabled);
289 static inline void ftrace_exports_disable(struct trace_export *export)
291 if (export->flags & TRACE_EXPORT_FUNCTION)
292 static_branch_dec(&trace_function_exports_enabled);
294 if (export->flags & TRACE_EXPORT_EVENT)
295 static_branch_dec(&trace_event_exports_enabled);
297 if (export->flags & TRACE_EXPORT_MARKER)
298 static_branch_dec(&trace_marker_exports_enabled);
301 static void ftrace_exports(struct ring_buffer_event *event, int flag)
303 struct trace_export *export;
305 preempt_disable_notrace();
307 export = rcu_dereference_raw_check(ftrace_exports_list);
309 trace_process_export(export, event, flag);
310 export = rcu_dereference_raw_check(export->next);
313 preempt_enable_notrace();
317 add_trace_export(struct trace_export **list, struct trace_export *export)
319 rcu_assign_pointer(export->next, *list);
321 * We are entering export into the list but another
322 * CPU might be walking that list. We need to make sure
323 * the export->next pointer is valid before another CPU sees
324 * the export pointer included into the list.
326 rcu_assign_pointer(*list, export);
330 rm_trace_export(struct trace_export **list, struct trace_export *export)
332 struct trace_export **p;
334 for (p = list; *p != NULL; p = &(*p)->next)
341 rcu_assign_pointer(*p, (*p)->next);
347 add_ftrace_export(struct trace_export **list, struct trace_export *export)
349 ftrace_exports_enable(export);
351 add_trace_export(list, export);
355 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
359 ret = rm_trace_export(list, export);
360 ftrace_exports_disable(export);
365 int register_ftrace_export(struct trace_export *export)
367 if (WARN_ON_ONCE(!export->write))
370 mutex_lock(&ftrace_export_lock);
372 add_ftrace_export(&ftrace_exports_list, export);
374 mutex_unlock(&ftrace_export_lock);
378 EXPORT_SYMBOL_GPL(register_ftrace_export);
380 int unregister_ftrace_export(struct trace_export *export)
384 mutex_lock(&ftrace_export_lock);
386 ret = rm_ftrace_export(&ftrace_exports_list, export);
388 mutex_unlock(&ftrace_export_lock);
392 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
394 /* trace_flags holds trace_options default values */
395 #define TRACE_DEFAULT_FLAGS \
396 (FUNCTION_DEFAULT_FLAGS | \
397 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
398 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
399 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
400 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
402 /* trace_options that are only supported by global_trace */
403 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
404 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
406 /* trace_flags that are default zero for instances */
407 #define ZEROED_TRACE_FLAGS \
408 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
411 * The global_trace is the descriptor that holds the top-level tracing
412 * buffers for the live tracing.
414 static struct trace_array global_trace = {
415 .trace_flags = TRACE_DEFAULT_FLAGS,
418 LIST_HEAD(ftrace_trace_arrays);
420 int trace_array_get(struct trace_array *this_tr)
422 struct trace_array *tr;
425 mutex_lock(&trace_types_lock);
426 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
433 mutex_unlock(&trace_types_lock);
438 static void __trace_array_put(struct trace_array *this_tr)
440 WARN_ON(!this_tr->ref);
445 * trace_array_put - Decrement the reference counter for this trace array.
447 * NOTE: Use this when we no longer need the trace array returned by
448 * trace_array_get_by_name(). This ensures the trace array can be later
452 void trace_array_put(struct trace_array *this_tr)
457 mutex_lock(&trace_types_lock);
458 __trace_array_put(this_tr);
459 mutex_unlock(&trace_types_lock);
461 EXPORT_SYMBOL_GPL(trace_array_put);
463 int tracing_check_open_get_tr(struct trace_array *tr)
467 ret = security_locked_down(LOCKDOWN_TRACEFS);
471 if (tracing_disabled)
474 if (tr && trace_array_get(tr) < 0)
480 int call_filter_check_discard(struct trace_event_call *call, void *rec,
481 struct trace_buffer *buffer,
482 struct ring_buffer_event *event)
484 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
485 !filter_match_preds(call->filter, rec)) {
486 __trace_event_discard_commit(buffer, event);
493 void trace_free_pid_list(struct trace_pid_list *pid_list)
495 vfree(pid_list->pids);
500 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
501 * @filtered_pids: The list of pids to check
502 * @search_pid: The PID to find in @filtered_pids
504 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
507 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
510 * If pid_max changed after filtered_pids was created, we
511 * by default ignore all pids greater than the previous pid_max.
513 if (search_pid >= filtered_pids->pid_max)
516 return test_bit(search_pid, filtered_pids->pids);
520 * trace_ignore_this_task - should a task be ignored for tracing
521 * @filtered_pids: The list of pids to check
522 * @task: The task that should be ignored if not filtered
524 * Checks if @task should be traced or not from @filtered_pids.
525 * Returns true if @task should *NOT* be traced.
526 * Returns false if @task should be traced.
529 trace_ignore_this_task(struct trace_pid_list *filtered_pids,
530 struct trace_pid_list *filtered_no_pids,
531 struct task_struct *task)
534 * If filterd_no_pids is not empty, and the task's pid is listed
535 * in filtered_no_pids, then return true.
536 * Otherwise, if filtered_pids is empty, that means we can
537 * trace all tasks. If it has content, then only trace pids
538 * within filtered_pids.
541 return (filtered_pids &&
542 !trace_find_filtered_pid(filtered_pids, task->pid)) ||
544 trace_find_filtered_pid(filtered_no_pids, task->pid));
548 * trace_filter_add_remove_task - Add or remove a task from a pid_list
549 * @pid_list: The list to modify
550 * @self: The current task for fork or NULL for exit
551 * @task: The task to add or remove
553 * If adding a task, if @self is defined, the task is only added if @self
554 * is also included in @pid_list. This happens on fork and tasks should
555 * only be added when the parent is listed. If @self is NULL, then the
556 * @task pid will be removed from the list, which would happen on exit
559 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
560 struct task_struct *self,
561 struct task_struct *task)
566 /* For forks, we only add if the forking task is listed */
568 if (!trace_find_filtered_pid(pid_list, self->pid))
572 /* Sorry, but we don't support pid_max changing after setting */
573 if (task->pid >= pid_list->pid_max)
576 /* "self" is set for forks, and NULL for exits */
578 set_bit(task->pid, pid_list->pids);
580 clear_bit(task->pid, pid_list->pids);
584 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
585 * @pid_list: The pid list to show
586 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
587 * @pos: The position of the file
589 * This is used by the seq_file "next" operation to iterate the pids
590 * listed in a trace_pid_list structure.
592 * Returns the pid+1 as we want to display pid of zero, but NULL would
593 * stop the iteration.
595 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
597 unsigned long pid = (unsigned long)v;
601 /* pid already is +1 of the actual prevous bit */
602 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
604 /* Return pid + 1 to allow zero to be represented */
605 if (pid < pid_list->pid_max)
606 return (void *)(pid + 1);
612 * trace_pid_start - Used for seq_file to start reading pid lists
613 * @pid_list: The pid list to show
614 * @pos: The position of the file
616 * This is used by seq_file "start" operation to start the iteration
619 * Returns the pid+1 as we want to display pid of zero, but NULL would
620 * stop the iteration.
622 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
627 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
628 if (pid >= pid_list->pid_max)
631 /* Return pid + 1 so that zero can be the exit value */
632 for (pid++; pid && l < *pos;
633 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
639 * trace_pid_show - show the current pid in seq_file processing
640 * @m: The seq_file structure to write into
641 * @v: A void pointer of the pid (+1) value to display
643 * Can be directly used by seq_file operations to display the current
646 int trace_pid_show(struct seq_file *m, void *v)
648 unsigned long pid = (unsigned long)v - 1;
650 seq_printf(m, "%lu\n", pid);
654 /* 128 should be much more than enough */
655 #define PID_BUF_SIZE 127
657 int trace_pid_write(struct trace_pid_list *filtered_pids,
658 struct trace_pid_list **new_pid_list,
659 const char __user *ubuf, size_t cnt)
661 struct trace_pid_list *pid_list;
662 struct trace_parser parser;
670 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
674 * Always recreate a new array. The write is an all or nothing
675 * operation. Always create a new array when adding new pids by
676 * the user. If the operation fails, then the current list is
679 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
681 trace_parser_put(&parser);
685 pid_list->pid_max = READ_ONCE(pid_max);
687 /* Only truncating will shrink pid_max */
688 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
689 pid_list->pid_max = filtered_pids->pid_max;
691 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
692 if (!pid_list->pids) {
693 trace_parser_put(&parser);
699 /* copy the current bits to the new max */
700 for_each_set_bit(pid, filtered_pids->pids,
701 filtered_pids->pid_max) {
702 set_bit(pid, pid_list->pids);
711 ret = trace_get_user(&parser, ubuf, cnt, &pos);
712 if (ret < 0 || !trace_parser_loaded(&parser))
720 if (kstrtoul(parser.buffer, 0, &val))
722 if (val >= pid_list->pid_max)
727 set_bit(pid, pid_list->pids);
730 trace_parser_clear(&parser);
733 trace_parser_put(&parser);
736 trace_free_pid_list(pid_list);
741 /* Cleared the list of pids */
742 trace_free_pid_list(pid_list);
747 *new_pid_list = pid_list;
752 static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu)
756 /* Early boot up does not have a buffer yet */
758 return trace_clock_local();
760 ts = ring_buffer_time_stamp(buf->buffer, cpu);
761 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
766 u64 ftrace_now(int cpu)
768 return buffer_ftrace_now(&global_trace.array_buffer, cpu);
772 * tracing_is_enabled - Show if global_trace has been disabled
774 * Shows if the global trace has been enabled or not. It uses the
775 * mirror flag "buffer_disabled" to be used in fast paths such as for
776 * the irqsoff tracer. But it may be inaccurate due to races. If you
777 * need to know the accurate state, use tracing_is_on() which is a little
778 * slower, but accurate.
780 int tracing_is_enabled(void)
783 * For quick access (irqsoff uses this in fast path), just
784 * return the mirror variable of the state of the ring buffer.
785 * It's a little racy, but we don't really care.
788 return !global_trace.buffer_disabled;
792 * trace_buf_size is the size in bytes that is allocated
793 * for a buffer. Note, the number of bytes is always rounded
796 * This number is purposely set to a low number of 16384.
797 * If the dump on oops happens, it will be much appreciated
798 * to not have to wait for all that output. Anyway this can be
799 * boot time and run time configurable.
801 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
803 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
805 /* trace_types holds a link list of available tracers. */
806 static struct tracer *trace_types __read_mostly;
809 * trace_types_lock is used to protect the trace_types list.
811 DEFINE_MUTEX(trace_types_lock);
814 * serialize the access of the ring buffer
816 * ring buffer serializes readers, but it is low level protection.
817 * The validity of the events (which returns by ring_buffer_peek() ..etc)
818 * are not protected by ring buffer.
820 * The content of events may become garbage if we allow other process consumes
821 * these events concurrently:
822 * A) the page of the consumed events may become a normal page
823 * (not reader page) in ring buffer, and this page will be rewrited
824 * by events producer.
825 * B) The page of the consumed events may become a page for splice_read,
826 * and this page will be returned to system.
828 * These primitives allow multi process access to different cpu ring buffer
831 * These primitives don't distinguish read-only and read-consume access.
832 * Multi read-only access are also serialized.
836 static DECLARE_RWSEM(all_cpu_access_lock);
837 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
839 static inline void trace_access_lock(int cpu)
841 if (cpu == RING_BUFFER_ALL_CPUS) {
842 /* gain it for accessing the whole ring buffer. */
843 down_write(&all_cpu_access_lock);
845 /* gain it for accessing a cpu ring buffer. */
847 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
848 down_read(&all_cpu_access_lock);
850 /* Secondly block other access to this @cpu ring buffer. */
851 mutex_lock(&per_cpu(cpu_access_lock, cpu));
855 static inline void trace_access_unlock(int cpu)
857 if (cpu == RING_BUFFER_ALL_CPUS) {
858 up_write(&all_cpu_access_lock);
860 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
861 up_read(&all_cpu_access_lock);
865 static inline void trace_access_lock_init(void)
869 for_each_possible_cpu(cpu)
870 mutex_init(&per_cpu(cpu_access_lock, cpu));
875 static DEFINE_MUTEX(access_lock);
877 static inline void trace_access_lock(int cpu)
880 mutex_lock(&access_lock);
883 static inline void trace_access_unlock(int cpu)
886 mutex_unlock(&access_lock);
889 static inline void trace_access_lock_init(void)
895 #ifdef CONFIG_STACKTRACE
896 static void __ftrace_trace_stack(struct trace_buffer *buffer,
898 int skip, int pc, struct pt_regs *regs);
899 static inline void ftrace_trace_stack(struct trace_array *tr,
900 struct trace_buffer *buffer,
902 int skip, int pc, struct pt_regs *regs);
905 static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
907 int skip, int pc, struct pt_regs *regs)
910 static inline void ftrace_trace_stack(struct trace_array *tr,
911 struct trace_buffer *buffer,
913 int skip, int pc, struct pt_regs *regs)
919 static __always_inline void
920 trace_event_setup(struct ring_buffer_event *event,
921 int type, unsigned long flags, int pc)
923 struct trace_entry *ent = ring_buffer_event_data(event);
925 tracing_generic_entry_update(ent, type, flags, pc);
928 static __always_inline struct ring_buffer_event *
929 __trace_buffer_lock_reserve(struct trace_buffer *buffer,
932 unsigned long flags, int pc)
934 struct ring_buffer_event *event;
936 event = ring_buffer_lock_reserve(buffer, len);
938 trace_event_setup(event, type, flags, pc);
943 void tracer_tracing_on(struct trace_array *tr)
945 if (tr->array_buffer.buffer)
946 ring_buffer_record_on(tr->array_buffer.buffer);
948 * This flag is looked at when buffers haven't been allocated
949 * yet, or by some tracers (like irqsoff), that just want to
950 * know if the ring buffer has been disabled, but it can handle
951 * races of where it gets disabled but we still do a record.
952 * As the check is in the fast path of the tracers, it is more
953 * important to be fast than accurate.
955 tr->buffer_disabled = 0;
956 /* Make the flag seen by readers */
961 * tracing_on - enable tracing buffers
963 * This function enables tracing buffers that may have been
964 * disabled with tracing_off.
966 void tracing_on(void)
968 tracer_tracing_on(&global_trace);
970 EXPORT_SYMBOL_GPL(tracing_on);
973 static __always_inline void
974 __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
976 __this_cpu_write(trace_taskinfo_save, true);
978 /* If this is the temp buffer, we need to commit fully */
979 if (this_cpu_read(trace_buffered_event) == event) {
980 /* Length is in event->array[0] */
981 ring_buffer_write(buffer, event->array[0], &event->array[1]);
982 /* Release the temp buffer */
983 this_cpu_dec(trace_buffered_event_cnt);
985 ring_buffer_unlock_commit(buffer, event);
989 * __trace_puts - write a constant string into the trace buffer.
990 * @ip: The address of the caller
991 * @str: The constant string to write
992 * @size: The size of the string.
994 int __trace_puts(unsigned long ip, const char *str, int size)
996 struct ring_buffer_event *event;
997 struct trace_buffer *buffer;
998 struct print_entry *entry;
999 unsigned long irq_flags;
1003 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1006 pc = preempt_count();
1008 if (unlikely(tracing_selftest_running || tracing_disabled))
1011 alloc = sizeof(*entry) + size + 2; /* possible \n added */
1013 local_save_flags(irq_flags);
1014 buffer = global_trace.array_buffer.buffer;
1015 ring_buffer_nest_start(buffer);
1016 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
1023 entry = ring_buffer_event_data(event);
1026 memcpy(&entry->buf, str, size);
1028 /* Add a newline if necessary */
1029 if (entry->buf[size - 1] != '\n') {
1030 entry->buf[size] = '\n';
1031 entry->buf[size + 1] = '\0';
1033 entry->buf[size] = '\0';
1035 __buffer_unlock_commit(buffer, event);
1036 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1038 ring_buffer_nest_end(buffer);
1041 EXPORT_SYMBOL_GPL(__trace_puts);
1044 * __trace_bputs - write the pointer to a constant string into trace buffer
1045 * @ip: The address of the caller
1046 * @str: The constant string to write to the buffer to
1048 int __trace_bputs(unsigned long ip, const char *str)
1050 struct ring_buffer_event *event;
1051 struct trace_buffer *buffer;
1052 struct bputs_entry *entry;
1053 unsigned long irq_flags;
1054 int size = sizeof(struct bputs_entry);
1058 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
1061 pc = preempt_count();
1063 if (unlikely(tracing_selftest_running || tracing_disabled))
1066 local_save_flags(irq_flags);
1067 buffer = global_trace.array_buffer.buffer;
1069 ring_buffer_nest_start(buffer);
1070 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
1075 entry = ring_buffer_event_data(event);
1079 __buffer_unlock_commit(buffer, event);
1080 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
1084 ring_buffer_nest_end(buffer);
1087 EXPORT_SYMBOL_GPL(__trace_bputs);
1089 #ifdef CONFIG_TRACER_SNAPSHOT
1090 static void tracing_snapshot_instance_cond(struct trace_array *tr,
1093 struct tracer *tracer = tr->current_trace;
1094 unsigned long flags;
1097 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
1098 internal_trace_puts("*** snapshot is being ignored ***\n");
1102 if (!tr->allocated_snapshot) {
1103 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
1104 internal_trace_puts("*** stopping trace here! ***\n");
1109 /* Note, snapshot can not be used when the tracer uses it */
1110 if (tracer->use_max_tr) {
1111 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
1112 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
1116 local_irq_save(flags);
1117 update_max_tr(tr, current, smp_processor_id(), cond_data);
1118 local_irq_restore(flags);
1121 void tracing_snapshot_instance(struct trace_array *tr)
1123 tracing_snapshot_instance_cond(tr, NULL);
1127 * tracing_snapshot - take a snapshot of the current buffer.
1129 * This causes a swap between the snapshot buffer and the current live
1130 * tracing buffer. You can use this to take snapshots of the live
1131 * trace when some condition is triggered, but continue to trace.
1133 * Note, make sure to allocate the snapshot with either
1134 * a tracing_snapshot_alloc(), or by doing it manually
1135 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
1137 * If the snapshot buffer is not allocated, it will stop tracing.
1138 * Basically making a permanent snapshot.
1140 void tracing_snapshot(void)
1142 struct trace_array *tr = &global_trace;
1144 tracing_snapshot_instance(tr);
1146 EXPORT_SYMBOL_GPL(tracing_snapshot);
1149 * tracing_snapshot_cond - conditionally take a snapshot of the current buffer.
1150 * @tr: The tracing instance to snapshot
1151 * @cond_data: The data to be tested conditionally, and possibly saved
1153 * This is the same as tracing_snapshot() except that the snapshot is
1154 * conditional - the snapshot will only happen if the
1155 * cond_snapshot.update() implementation receiving the cond_data
1156 * returns true, which means that the trace array's cond_snapshot
1157 * update() operation used the cond_data to determine whether the
1158 * snapshot should be taken, and if it was, presumably saved it along
1159 * with the snapshot.
1161 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1163 tracing_snapshot_instance_cond(tr, cond_data);
1165 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1168 * tracing_snapshot_cond_data - get the user data associated with a snapshot
1169 * @tr: The tracing instance
1171 * When the user enables a conditional snapshot using
1172 * tracing_snapshot_cond_enable(), the user-defined cond_data is saved
1173 * with the snapshot. This accessor is used to retrieve it.
1175 * Should not be called from cond_snapshot.update(), since it takes
1176 * the tr->max_lock lock, which the code calling
1177 * cond_snapshot.update() has already done.
1179 * Returns the cond_data associated with the trace array's snapshot.
1181 void *tracing_cond_snapshot_data(struct trace_array *tr)
1183 void *cond_data = NULL;
1185 arch_spin_lock(&tr->max_lock);
1187 if (tr->cond_snapshot)
1188 cond_data = tr->cond_snapshot->cond_data;
1190 arch_spin_unlock(&tr->max_lock);
1194 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1196 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
1197 struct array_buffer *size_buf, int cpu_id);
1198 static void set_buffer_entries(struct array_buffer *buf, unsigned long val);
1200 int tracing_alloc_snapshot_instance(struct trace_array *tr)
1204 if (!tr->allocated_snapshot) {
1206 /* allocate spare buffer */
1207 ret = resize_buffer_duplicate_size(&tr->max_buffer,
1208 &tr->array_buffer, RING_BUFFER_ALL_CPUS);
1212 tr->allocated_snapshot = true;
1218 static void free_snapshot(struct trace_array *tr)
1221 * We don't free the ring buffer. instead, resize it because
1222 * The max_tr ring buffer has some state (e.g. ring->clock) and
1223 * we want preserve it.
1225 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
1226 set_buffer_entries(&tr->max_buffer, 1);
1227 tracing_reset_online_cpus(&tr->max_buffer);
1228 tr->allocated_snapshot = false;
1232 * tracing_alloc_snapshot - allocate snapshot buffer.
1234 * This only allocates the snapshot buffer if it isn't already
1235 * allocated - it doesn't also take a snapshot.
1237 * This is meant to be used in cases where the snapshot buffer needs
1238 * to be set up for events that can't sleep but need to be able to
1239 * trigger a snapshot.
1241 int tracing_alloc_snapshot(void)
1243 struct trace_array *tr = &global_trace;
1246 ret = tracing_alloc_snapshot_instance(tr);
1251 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1254 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1256 * This is similar to tracing_snapshot(), but it will allocate the
1257 * snapshot buffer if it isn't already allocated. Use this only
1258 * where it is safe to sleep, as the allocation may sleep.
1260 * This causes a swap between the snapshot buffer and the current live
1261 * tracing buffer. You can use this to take snapshots of the live
1262 * trace when some condition is triggered, but continue to trace.
1264 void tracing_snapshot_alloc(void)
1268 ret = tracing_alloc_snapshot();
1274 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1277 * tracing_snapshot_cond_enable - enable conditional snapshot for an instance
1278 * @tr: The tracing instance
1279 * @cond_data: User data to associate with the snapshot
1280 * @update: Implementation of the cond_snapshot update function
1282 * Check whether the conditional snapshot for the given instance has
1283 * already been enabled, or if the current tracer is already using a
1284 * snapshot; if so, return -EBUSY, else create a cond_snapshot and
1285 * save the cond_data and update function inside.
1287 * Returns 0 if successful, error otherwise.
1289 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data,
1290 cond_update_fn_t update)
1292 struct cond_snapshot *cond_snapshot;
1295 cond_snapshot = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL);
1299 cond_snapshot->cond_data = cond_data;
1300 cond_snapshot->update = update;
1302 mutex_lock(&trace_types_lock);
1304 ret = tracing_alloc_snapshot_instance(tr);
1308 if (tr->current_trace->use_max_tr) {
1314 * The cond_snapshot can only change to NULL without the
1315 * trace_types_lock. We don't care if we race with it going
1316 * to NULL, but we want to make sure that it's not set to
1317 * something other than NULL when we get here, which we can
1318 * do safely with only holding the trace_types_lock and not
1319 * having to take the max_lock.
1321 if (tr->cond_snapshot) {
1326 arch_spin_lock(&tr->max_lock);
1327 tr->cond_snapshot = cond_snapshot;
1328 arch_spin_unlock(&tr->max_lock);
1330 mutex_unlock(&trace_types_lock);
1335 mutex_unlock(&trace_types_lock);
1336 kfree(cond_snapshot);
1339 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1342 * tracing_snapshot_cond_disable - disable conditional snapshot for an instance
1343 * @tr: The tracing instance
1345 * Check whether the conditional snapshot for the given instance is
1346 * enabled; if so, free the cond_snapshot associated with it,
1347 * otherwise return -EINVAL.
1349 * Returns 0 if successful, error otherwise.
1351 int tracing_snapshot_cond_disable(struct trace_array *tr)
1355 arch_spin_lock(&tr->max_lock);
1357 if (!tr->cond_snapshot)
1360 kfree(tr->cond_snapshot);
1361 tr->cond_snapshot = NULL;
1364 arch_spin_unlock(&tr->max_lock);
1368 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1370 void tracing_snapshot(void)
1372 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1374 EXPORT_SYMBOL_GPL(tracing_snapshot);
1375 void tracing_snapshot_cond(struct trace_array *tr, void *cond_data)
1377 WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used");
1379 EXPORT_SYMBOL_GPL(tracing_snapshot_cond);
1380 int tracing_alloc_snapshot(void)
1382 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1385 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1386 void tracing_snapshot_alloc(void)
1391 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1392 void *tracing_cond_snapshot_data(struct trace_array *tr)
1396 EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data);
1397 int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update)
1401 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable);
1402 int tracing_snapshot_cond_disable(struct trace_array *tr)
1406 EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable);
1407 #endif /* CONFIG_TRACER_SNAPSHOT */
1409 void tracer_tracing_off(struct trace_array *tr)
1411 if (tr->array_buffer.buffer)
1412 ring_buffer_record_off(tr->array_buffer.buffer);
1414 * This flag is looked at when buffers haven't been allocated
1415 * yet, or by some tracers (like irqsoff), that just want to
1416 * know if the ring buffer has been disabled, but it can handle
1417 * races of where it gets disabled but we still do a record.
1418 * As the check is in the fast path of the tracers, it is more
1419 * important to be fast than accurate.
1421 tr->buffer_disabled = 1;
1422 /* Make the flag seen by readers */
1427 * tracing_off - turn off tracing buffers
1429 * This function stops the tracing buffers from recording data.
1430 * It does not disable any overhead the tracers themselves may
1431 * be causing. This function simply causes all recording to
1432 * the ring buffers to fail.
1434 void tracing_off(void)
1436 tracer_tracing_off(&global_trace);
1438 EXPORT_SYMBOL_GPL(tracing_off);
1440 void disable_trace_on_warning(void)
1442 if (__disable_trace_on_warning) {
1443 trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_,
1444 "Disabling tracing due to warning\n");
1450 * tracer_tracing_is_on - show real state of ring buffer enabled
1451 * @tr : the trace array to know if ring buffer is enabled
1453 * Shows real state of the ring buffer if it is enabled or not.
1455 bool tracer_tracing_is_on(struct trace_array *tr)
1457 if (tr->array_buffer.buffer)
1458 return ring_buffer_record_is_on(tr->array_buffer.buffer);
1459 return !tr->buffer_disabled;
1463 * tracing_is_on - show state of ring buffers enabled
1465 int tracing_is_on(void)
1467 return tracer_tracing_is_on(&global_trace);
1469 EXPORT_SYMBOL_GPL(tracing_is_on);
1471 static int __init set_buf_size(char *str)
1473 unsigned long buf_size;
1477 buf_size = memparse(str, &str);
1478 /* nr_entries can not be zero */
1481 trace_buf_size = buf_size;
1484 __setup("trace_buf_size=", set_buf_size);
1486 static int __init set_tracing_thresh(char *str)
1488 unsigned long threshold;
1493 ret = kstrtoul(str, 0, &threshold);
1496 tracing_thresh = threshold * 1000;
1499 __setup("tracing_thresh=", set_tracing_thresh);
1501 unsigned long nsecs_to_usecs(unsigned long nsecs)
1503 return nsecs / 1000;
1507 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1508 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1509 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1510 * of strings in the order that the evals (enum) were defined.
1515 /* These must match the bit postions in trace_iterator_flags */
1516 static const char *trace_options[] = {
1524 int in_ns; /* is this clock in nanoseconds? */
1525 } trace_clocks[] = {
1526 { trace_clock_local, "local", 1 },
1527 { trace_clock_global, "global", 1 },
1528 { trace_clock_counter, "counter", 0 },
1529 { trace_clock_jiffies, "uptime", 0 },
1530 { trace_clock, "perf", 1 },
1531 { ktime_get_mono_fast_ns, "mono", 1 },
1532 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1533 { ktime_get_boot_fast_ns, "boot", 1 },
1537 bool trace_clock_in_ns(struct trace_array *tr)
1539 if (trace_clocks[tr->clock_id].in_ns)
1546 * trace_parser_get_init - gets the buffer for trace parser
1548 int trace_parser_get_init(struct trace_parser *parser, int size)
1550 memset(parser, 0, sizeof(*parser));
1552 parser->buffer = kmalloc(size, GFP_KERNEL);
1553 if (!parser->buffer)
1556 parser->size = size;
1561 * trace_parser_put - frees the buffer for trace parser
1563 void trace_parser_put(struct trace_parser *parser)
1565 kfree(parser->buffer);
1566 parser->buffer = NULL;
1570 * trace_get_user - reads the user input string separated by space
1571 * (matched by isspace(ch))
1573 * For each string found the 'struct trace_parser' is updated,
1574 * and the function returns.
1576 * Returns number of bytes read.
1578 * See kernel/trace/trace.h for 'struct trace_parser' details.
1580 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1581 size_t cnt, loff_t *ppos)
1588 trace_parser_clear(parser);
1590 ret = get_user(ch, ubuf++);
1598 * The parser is not finished with the last write,
1599 * continue reading the user input without skipping spaces.
1601 if (!parser->cont) {
1602 /* skip white space */
1603 while (cnt && isspace(ch)) {
1604 ret = get_user(ch, ubuf++);
1613 /* only spaces were written */
1614 if (isspace(ch) || !ch) {
1621 /* read the non-space input */
1622 while (cnt && !isspace(ch) && ch) {
1623 if (parser->idx < parser->size - 1)
1624 parser->buffer[parser->idx++] = ch;
1629 ret = get_user(ch, ubuf++);
1636 /* We either got finished input or we have to wait for another call. */
1637 if (isspace(ch) || !ch) {
1638 parser->buffer[parser->idx] = 0;
1639 parser->cont = false;
1640 } else if (parser->idx < parser->size - 1) {
1641 parser->cont = true;
1642 parser->buffer[parser->idx++] = ch;
1643 /* Make sure the parsed string always terminates with '\0'. */
1644 parser->buffer[parser->idx] = 0;
1657 /* TODO add a seq_buf_to_buffer() */
1658 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1662 if (trace_seq_used(s) <= s->seq.readpos)
1665 len = trace_seq_used(s) - s->seq.readpos;
1668 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1670 s->seq.readpos += cnt;
1674 unsigned long __read_mostly tracing_thresh;
1675 static const struct file_operations tracing_max_lat_fops;
1677 #if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1678 defined(CONFIG_FSNOTIFY)
1680 static struct workqueue_struct *fsnotify_wq;
1682 static void latency_fsnotify_workfn(struct work_struct *work)
1684 struct trace_array *tr = container_of(work, struct trace_array,
1686 fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY);
1689 static void latency_fsnotify_workfn_irq(struct irq_work *iwork)
1691 struct trace_array *tr = container_of(iwork, struct trace_array,
1693 queue_work(fsnotify_wq, &tr->fsnotify_work);
1696 static void trace_create_maxlat_file(struct trace_array *tr,
1697 struct dentry *d_tracer)
1699 INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn);
1700 init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq);
1701 tr->d_max_latency = trace_create_file("tracing_max_latency", 0644,
1702 d_tracer, &tr->max_latency,
1703 &tracing_max_lat_fops);
1706 __init static int latency_fsnotify_init(void)
1708 fsnotify_wq = alloc_workqueue("tr_max_lat_wq",
1709 WQ_UNBOUND | WQ_HIGHPRI, 0);
1711 pr_err("Unable to allocate tr_max_lat_wq\n");
1717 late_initcall_sync(latency_fsnotify_init);
1719 void latency_fsnotify(struct trace_array *tr)
1724 * We cannot call queue_work(&tr->fsnotify_work) from here because it's
1725 * possible that we are called from __schedule() or do_idle(), which
1726 * could cause a deadlock.
1728 irq_work_queue(&tr->fsnotify_irqwork);
1732 * (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
1733 * defined(CONFIG_FSNOTIFY)
1737 #define trace_create_maxlat_file(tr, d_tracer) \
1738 trace_create_file("tracing_max_latency", 0644, d_tracer, \
1739 &tr->max_latency, &tracing_max_lat_fops)
1743 #ifdef CONFIG_TRACER_MAX_TRACE
1745 * Copy the new maximum trace into the separate maximum-trace
1746 * structure. (this way the maximum trace is permanently saved,
1747 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1750 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1752 struct array_buffer *trace_buf = &tr->array_buffer;
1753 struct array_buffer *max_buf = &tr->max_buffer;
1754 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1755 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1758 max_buf->time_start = data->preempt_timestamp;
1760 max_data->saved_latency = tr->max_latency;
1761 max_data->critical_start = data->critical_start;
1762 max_data->critical_end = data->critical_end;
1764 strncpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1765 max_data->pid = tsk->pid;
1767 * If tsk == current, then use current_uid(), as that does not use
1768 * RCU. The irq tracer can be called out of RCU scope.
1771 max_data->uid = current_uid();
1773 max_data->uid = task_uid(tsk);
1775 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1776 max_data->policy = tsk->policy;
1777 max_data->rt_priority = tsk->rt_priority;
1779 /* record this tasks comm */
1780 tracing_record_cmdline(tsk);
1781 latency_fsnotify(tr);
1785 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1787 * @tsk: the task with the latency
1788 * @cpu: The cpu that initiated the trace.
1789 * @cond_data: User data associated with a conditional snapshot
1791 * Flip the buffers between the @tr and the max_tr and record information
1792 * about which task was the cause of this latency.
1795 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu,
1801 WARN_ON_ONCE(!irqs_disabled());
1803 if (!tr->allocated_snapshot) {
1804 /* Only the nop tracer should hit this when disabling */
1805 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1809 arch_spin_lock(&tr->max_lock);
1811 /* Inherit the recordable setting from array_buffer */
1812 if (ring_buffer_record_is_set_on(tr->array_buffer.buffer))
1813 ring_buffer_record_on(tr->max_buffer.buffer);
1815 ring_buffer_record_off(tr->max_buffer.buffer);
1817 #ifdef CONFIG_TRACER_SNAPSHOT
1818 if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data))
1821 swap(tr->array_buffer.buffer, tr->max_buffer.buffer);
1823 __update_max_tr(tr, tsk, cpu);
1826 arch_spin_unlock(&tr->max_lock);
1830 * update_max_tr_single - only copy one trace over, and reset the rest
1832 * @tsk: task with the latency
1833 * @cpu: the cpu of the buffer to copy.
1835 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1838 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1845 WARN_ON_ONCE(!irqs_disabled());
1846 if (!tr->allocated_snapshot) {
1847 /* Only the nop tracer should hit this when disabling */
1848 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1852 arch_spin_lock(&tr->max_lock);
1854 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu);
1856 if (ret == -EBUSY) {
1858 * We failed to swap the buffer due to a commit taking
1859 * place on this CPU. We fail to record, but we reset
1860 * the max trace buffer (no one writes directly to it)
1861 * and flag that it failed.
1863 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1864 "Failed to swap buffers due to commit in progress\n");
1867 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1869 __update_max_tr(tr, tsk, cpu);
1870 arch_spin_unlock(&tr->max_lock);
1872 #endif /* CONFIG_TRACER_MAX_TRACE */
1874 static int wait_on_pipe(struct trace_iterator *iter, int full)
1876 /* Iterators are static, they should be filled or empty */
1877 if (trace_buffer_iter(iter, iter->cpu_file))
1880 return ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file,
1884 #ifdef CONFIG_FTRACE_STARTUP_TEST
1885 static bool selftests_can_run;
1887 struct trace_selftests {
1888 struct list_head list;
1889 struct tracer *type;
1892 static LIST_HEAD(postponed_selftests);
1894 static int save_selftest(struct tracer *type)
1896 struct trace_selftests *selftest;
1898 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1902 selftest->type = type;
1903 list_add(&selftest->list, &postponed_selftests);
1907 static int run_tracer_selftest(struct tracer *type)
1909 struct trace_array *tr = &global_trace;
1910 struct tracer *saved_tracer = tr->current_trace;
1913 if (!type->selftest || tracing_selftest_disabled)
1917 * If a tracer registers early in boot up (before scheduling is
1918 * initialized and such), then do not run its selftests yet.
1919 * Instead, run it a little later in the boot process.
1921 if (!selftests_can_run)
1922 return save_selftest(type);
1925 * Run a selftest on this tracer.
1926 * Here we reset the trace buffer, and set the current
1927 * tracer to be this tracer. The tracer can then run some
1928 * internal tracing to verify that everything is in order.
1929 * If we fail, we do not register this tracer.
1931 tracing_reset_online_cpus(&tr->array_buffer);
1933 tr->current_trace = type;
1935 #ifdef CONFIG_TRACER_MAX_TRACE
1936 if (type->use_max_tr) {
1937 /* If we expanded the buffers, make sure the max is expanded too */
1938 if (ring_buffer_expanded)
1939 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1940 RING_BUFFER_ALL_CPUS);
1941 tr->allocated_snapshot = true;
1945 /* the test is responsible for initializing and enabling */
1946 pr_info("Testing tracer %s: ", type->name);
1947 ret = type->selftest(type, tr);
1948 /* the test is responsible for resetting too */
1949 tr->current_trace = saved_tracer;
1951 printk(KERN_CONT "FAILED!\n");
1952 /* Add the warning after printing 'FAILED' */
1956 /* Only reset on passing, to avoid touching corrupted buffers */
1957 tracing_reset_online_cpus(&tr->array_buffer);
1959 #ifdef CONFIG_TRACER_MAX_TRACE
1960 if (type->use_max_tr) {
1961 tr->allocated_snapshot = false;
1963 /* Shrink the max buffer again */
1964 if (ring_buffer_expanded)
1965 ring_buffer_resize(tr->max_buffer.buffer, 1,
1966 RING_BUFFER_ALL_CPUS);
1970 printk(KERN_CONT "PASSED\n");
1974 static __init int init_trace_selftests(void)
1976 struct trace_selftests *p, *n;
1977 struct tracer *t, **last;
1980 selftests_can_run = true;
1982 mutex_lock(&trace_types_lock);
1984 if (list_empty(&postponed_selftests))
1987 pr_info("Running postponed tracer tests:\n");
1989 tracing_selftest_running = true;
1990 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1991 /* This loop can take minutes when sanitizers are enabled, so
1992 * lets make sure we allow RCU processing.
1995 ret = run_tracer_selftest(p->type);
1996 /* If the test fails, then warn and remove from available_tracers */
1998 WARN(1, "tracer: %s failed selftest, disabling\n",
2000 last = &trace_types;
2001 for (t = trace_types; t; t = t->next) {
2012 tracing_selftest_running = false;
2015 mutex_unlock(&trace_types_lock);
2019 core_initcall(init_trace_selftests);
2021 static inline int run_tracer_selftest(struct tracer *type)
2025 #endif /* CONFIG_FTRACE_STARTUP_TEST */
2027 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
2029 static void __init apply_trace_boot_options(void);
2032 * register_tracer - register a tracer with the ftrace system.
2033 * @type: the plugin for the tracer
2035 * Register a new plugin tracer.
2037 int __init register_tracer(struct tracer *type)
2043 pr_info("Tracer must have a name\n");
2047 if (strlen(type->name) >= MAX_TRACER_SIZE) {
2048 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
2052 if (security_locked_down(LOCKDOWN_TRACEFS)) {
2053 pr_warn("Can not register tracer %s due to lockdown\n",
2058 mutex_lock(&trace_types_lock);
2060 tracing_selftest_running = true;
2062 for (t = trace_types; t; t = t->next) {
2063 if (strcmp(type->name, t->name) == 0) {
2065 pr_info("Tracer %s already registered\n",
2072 if (!type->set_flag)
2073 type->set_flag = &dummy_set_flag;
2075 /*allocate a dummy tracer_flags*/
2076 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
2081 type->flags->val = 0;
2082 type->flags->opts = dummy_tracer_opt;
2084 if (!type->flags->opts)
2085 type->flags->opts = dummy_tracer_opt;
2087 /* store the tracer for __set_tracer_option */
2088 type->flags->trace = type;
2090 ret = run_tracer_selftest(type);
2094 type->next = trace_types;
2096 add_tracer_options(&global_trace, type);
2099 tracing_selftest_running = false;
2100 mutex_unlock(&trace_types_lock);
2102 if (ret || !default_bootup_tracer)
2105 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
2108 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
2109 /* Do we want this tracer to start on bootup? */
2110 tracing_set_tracer(&global_trace, type->name);
2111 default_bootup_tracer = NULL;
2113 apply_trace_boot_options();
2115 /* disable other selftests, since this will break it. */
2116 tracing_selftest_disabled = true;
2117 #ifdef CONFIG_FTRACE_STARTUP_TEST
2118 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
2126 static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
2128 struct trace_buffer *buffer = buf->buffer;
2133 ring_buffer_record_disable(buffer);
2135 /* Make sure all commits have finished */
2137 ring_buffer_reset_cpu(buffer, cpu);
2139 ring_buffer_record_enable(buffer);
2142 void tracing_reset_online_cpus(struct array_buffer *buf)
2144 struct trace_buffer *buffer = buf->buffer;
2149 ring_buffer_record_disable(buffer);
2151 /* Make sure all commits have finished */
2154 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
2156 ring_buffer_reset_online_cpus(buffer);
2158 ring_buffer_record_enable(buffer);
2161 /* Must have trace_types_lock held */
2162 void tracing_reset_all_online_cpus(void)
2164 struct trace_array *tr;
2166 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
2167 if (!tr->clear_trace)
2169 tr->clear_trace = false;
2170 tracing_reset_online_cpus(&tr->array_buffer);
2171 #ifdef CONFIG_TRACER_MAX_TRACE
2172 tracing_reset_online_cpus(&tr->max_buffer);
2177 static int *tgid_map;
2179 #define SAVED_CMDLINES_DEFAULT 128
2180 #define NO_CMDLINE_MAP UINT_MAX
2181 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2182 struct saved_cmdlines_buffer {
2183 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
2184 unsigned *map_cmdline_to_pid;
2185 unsigned cmdline_num;
2187 char *saved_cmdlines;
2189 static struct saved_cmdlines_buffer *savedcmd;
2191 /* temporary disable recording */
2192 static atomic_t trace_record_taskinfo_disabled __read_mostly;
2194 static inline char *get_saved_cmdlines(int idx)
2196 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
2199 static inline void set_cmdline(int idx, const char *cmdline)
2201 strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
2204 static int allocate_cmdlines_buffer(unsigned int val,
2205 struct saved_cmdlines_buffer *s)
2207 s->map_cmdline_to_pid = kmalloc_array(val,
2208 sizeof(*s->map_cmdline_to_pid),
2210 if (!s->map_cmdline_to_pid)
2213 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
2214 if (!s->saved_cmdlines) {
2215 kfree(s->map_cmdline_to_pid);
2220 s->cmdline_num = val;
2221 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
2222 sizeof(s->map_pid_to_cmdline));
2223 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
2224 val * sizeof(*s->map_cmdline_to_pid));
2229 static int trace_create_savedcmd(void)
2233 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
2237 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
2247 int is_tracing_stopped(void)
2249 return global_trace.stop_count;
2253 * tracing_start - quick start of the tracer
2255 * If tracing is enabled but was stopped by tracing_stop,
2256 * this will start the tracer back up.
2258 void tracing_start(void)
2260 struct trace_buffer *buffer;
2261 unsigned long flags;
2263 if (tracing_disabled)
2266 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2267 if (--global_trace.stop_count) {
2268 if (global_trace.stop_count < 0) {
2269 /* Someone screwed up their debugging */
2271 global_trace.stop_count = 0;
2276 /* Prevent the buffers from switching */
2277 arch_spin_lock(&global_trace.max_lock);
2279 buffer = global_trace.array_buffer.buffer;
2281 ring_buffer_record_enable(buffer);
2283 #ifdef CONFIG_TRACER_MAX_TRACE
2284 buffer = global_trace.max_buffer.buffer;
2286 ring_buffer_record_enable(buffer);
2289 arch_spin_unlock(&global_trace.max_lock);
2292 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2295 static void tracing_start_tr(struct trace_array *tr)
2297 struct trace_buffer *buffer;
2298 unsigned long flags;
2300 if (tracing_disabled)
2303 /* If global, we need to also start the max tracer */
2304 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2305 return tracing_start();
2307 raw_spin_lock_irqsave(&tr->start_lock, flags);
2309 if (--tr->stop_count) {
2310 if (tr->stop_count < 0) {
2311 /* Someone screwed up their debugging */
2318 buffer = tr->array_buffer.buffer;
2320 ring_buffer_record_enable(buffer);
2323 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2327 * tracing_stop - quick stop of the tracer
2329 * Light weight way to stop tracing. Use in conjunction with
2332 void tracing_stop(void)
2334 struct trace_buffer *buffer;
2335 unsigned long flags;
2337 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
2338 if (global_trace.stop_count++)
2341 /* Prevent the buffers from switching */
2342 arch_spin_lock(&global_trace.max_lock);
2344 buffer = global_trace.array_buffer.buffer;
2346 ring_buffer_record_disable(buffer);
2348 #ifdef CONFIG_TRACER_MAX_TRACE
2349 buffer = global_trace.max_buffer.buffer;
2351 ring_buffer_record_disable(buffer);
2354 arch_spin_unlock(&global_trace.max_lock);
2357 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
2360 static void tracing_stop_tr(struct trace_array *tr)
2362 struct trace_buffer *buffer;
2363 unsigned long flags;
2365 /* If global, we need to also stop the max tracer */
2366 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
2367 return tracing_stop();
2369 raw_spin_lock_irqsave(&tr->start_lock, flags);
2370 if (tr->stop_count++)
2373 buffer = tr->array_buffer.buffer;
2375 ring_buffer_record_disable(buffer);
2378 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
2381 static int trace_save_cmdline(struct task_struct *tsk)
2385 /* treat recording of idle task as a success */
2389 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
2393 * It's not the end of the world if we don't get
2394 * the lock, but we also don't want to spin
2395 * nor do we want to disable interrupts,
2396 * so if we miss here, then better luck next time.
2398 if (!arch_spin_trylock(&trace_cmdline_lock))
2401 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2402 if (idx == NO_CMDLINE_MAP) {
2403 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
2406 * Check whether the cmdline buffer at idx has a pid
2407 * mapped. We are going to overwrite that entry so we
2408 * need to clear the map_pid_to_cmdline. Otherwise we
2409 * would read the new comm for the old pid.
2411 pid = savedcmd->map_cmdline_to_pid[idx];
2412 if (pid != NO_CMDLINE_MAP)
2413 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
2415 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
2416 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
2418 savedcmd->cmdline_idx = idx;
2421 set_cmdline(idx, tsk->comm);
2423 arch_spin_unlock(&trace_cmdline_lock);
2428 static void __trace_find_cmdline(int pid, char comm[])
2433 strcpy(comm, "<idle>");
2437 if (WARN_ON_ONCE(pid < 0)) {
2438 strcpy(comm, "<XXX>");
2442 if (pid > PID_MAX_DEFAULT) {
2443 strcpy(comm, "<...>");
2447 map = savedcmd->map_pid_to_cmdline[pid];
2448 if (map != NO_CMDLINE_MAP)
2449 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2451 strcpy(comm, "<...>");
2454 void trace_find_cmdline(int pid, char comm[])
2457 arch_spin_lock(&trace_cmdline_lock);
2459 __trace_find_cmdline(pid, comm);
2461 arch_spin_unlock(&trace_cmdline_lock);
2465 int trace_find_tgid(int pid)
2467 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2470 return tgid_map[pid];
2473 static int trace_save_tgid(struct task_struct *tsk)
2475 /* treat recording of idle task as a success */
2479 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2482 tgid_map[tsk->pid] = tsk->tgid;
2486 static bool tracing_record_taskinfo_skip(int flags)
2488 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2490 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2492 if (!__this_cpu_read(trace_taskinfo_save))
2498 * tracing_record_taskinfo - record the task info of a task
2500 * @task: task to record
2501 * @flags: TRACE_RECORD_CMDLINE for recording comm
2502 * TRACE_RECORD_TGID for recording tgid
2504 void tracing_record_taskinfo(struct task_struct *task, int flags)
2508 if (tracing_record_taskinfo_skip(flags))
2512 * Record as much task information as possible. If some fail, continue
2513 * to try to record the others.
2515 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2516 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2518 /* If recording any information failed, retry again soon. */
2522 __this_cpu_write(trace_taskinfo_save, false);
2526 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2528 * @prev: previous task during sched_switch
2529 * @next: next task during sched_switch
2530 * @flags: TRACE_RECORD_CMDLINE for recording comm
2531 * TRACE_RECORD_TGID for recording tgid
2533 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2534 struct task_struct *next, int flags)
2538 if (tracing_record_taskinfo_skip(flags))
2542 * Record as much task information as possible. If some fail, continue
2543 * to try to record the others.
2545 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2546 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2547 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2548 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2550 /* If recording any information failed, retry again soon. */
2554 __this_cpu_write(trace_taskinfo_save, false);
2557 /* Helpers to record a specific task information */
2558 void tracing_record_cmdline(struct task_struct *task)
2560 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2563 void tracing_record_tgid(struct task_struct *task)
2565 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2569 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2570 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2571 * simplifies those functions and keeps them in sync.
2573 enum print_line_t trace_handle_return(struct trace_seq *s)
2575 return trace_seq_has_overflowed(s) ?
2576 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2578 EXPORT_SYMBOL_GPL(trace_handle_return);
2581 tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
2582 unsigned long flags, int pc)
2584 struct task_struct *tsk = current;
2586 entry->preempt_count = pc & 0xff;
2587 entry->pid = (tsk) ? tsk->pid : 0;
2590 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2591 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2593 TRACE_FLAG_IRQS_NOSUPPORT |
2595 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2596 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2597 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2598 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2599 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2601 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2603 struct ring_buffer_event *
2604 trace_buffer_lock_reserve(struct trace_buffer *buffer,
2607 unsigned long flags, int pc)
2609 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2612 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2613 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2614 static int trace_buffered_event_ref;
2617 * trace_buffered_event_enable - enable buffering events
2619 * When events are being filtered, it is quicker to use a temporary
2620 * buffer to write the event data into if there's a likely chance
2621 * that it will not be committed. The discard of the ring buffer
2622 * is not as fast as committing, and is much slower than copying
2625 * When an event is to be filtered, allocate per cpu buffers to
2626 * write the event data into, and if the event is filtered and discarded
2627 * it is simply dropped, otherwise, the entire data is to be committed
2630 void trace_buffered_event_enable(void)
2632 struct ring_buffer_event *event;
2636 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2638 if (trace_buffered_event_ref++)
2641 for_each_tracing_cpu(cpu) {
2642 page = alloc_pages_node(cpu_to_node(cpu),
2643 GFP_KERNEL | __GFP_NORETRY, 0);
2647 event = page_address(page);
2648 memset(event, 0, sizeof(*event));
2650 per_cpu(trace_buffered_event, cpu) = event;
2653 if (cpu == smp_processor_id() &&
2654 __this_cpu_read(trace_buffered_event) !=
2655 per_cpu(trace_buffered_event, cpu))
2662 trace_buffered_event_disable();
2665 static void enable_trace_buffered_event(void *data)
2667 /* Probably not needed, but do it anyway */
2669 this_cpu_dec(trace_buffered_event_cnt);
2672 static void disable_trace_buffered_event(void *data)
2674 this_cpu_inc(trace_buffered_event_cnt);
2678 * trace_buffered_event_disable - disable buffering events
2680 * When a filter is removed, it is faster to not use the buffered
2681 * events, and to commit directly into the ring buffer. Free up
2682 * the temp buffers when there are no more users. This requires
2683 * special synchronization with current events.
2685 void trace_buffered_event_disable(void)
2689 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2691 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2694 if (--trace_buffered_event_ref)
2698 /* For each CPU, set the buffer as used. */
2699 smp_call_function_many(tracing_buffer_mask,
2700 disable_trace_buffered_event, NULL, 1);
2703 /* Wait for all current users to finish */
2706 for_each_tracing_cpu(cpu) {
2707 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2708 per_cpu(trace_buffered_event, cpu) = NULL;
2711 * Make sure trace_buffered_event is NULL before clearing
2712 * trace_buffered_event_cnt.
2717 /* Do the work on each cpu */
2718 smp_call_function_many(tracing_buffer_mask,
2719 enable_trace_buffered_event, NULL, 1);
2723 static struct trace_buffer *temp_buffer;
2725 struct ring_buffer_event *
2726 trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
2727 struct trace_event_file *trace_file,
2728 int type, unsigned long len,
2729 unsigned long flags, int pc)
2731 struct ring_buffer_event *entry;
2734 *current_rb = trace_file->tr->array_buffer.buffer;
2736 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2737 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2738 (entry = this_cpu_read(trace_buffered_event))) {
2739 /* Try to use the per cpu buffer first */
2740 val = this_cpu_inc_return(trace_buffered_event_cnt);
2742 trace_event_setup(entry, type, flags, pc);
2743 entry->array[0] = len;
2746 this_cpu_dec(trace_buffered_event_cnt);
2749 entry = __trace_buffer_lock_reserve(*current_rb,
2750 type, len, flags, pc);
2752 * If tracing is off, but we have triggers enabled
2753 * we still need to look at the event data. Use the temp_buffer
2754 * to store the trace event for the trigger to use. It's recursive
2755 * safe and will not be recorded anywhere.
2757 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2758 *current_rb = temp_buffer;
2759 entry = __trace_buffer_lock_reserve(*current_rb,
2760 type, len, flags, pc);
2764 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2766 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2767 static DEFINE_MUTEX(tracepoint_printk_mutex);
2769 static void output_printk(struct trace_event_buffer *fbuffer)
2771 struct trace_event_call *event_call;
2772 struct trace_event_file *file;
2773 struct trace_event *event;
2774 unsigned long flags;
2775 struct trace_iterator *iter = tracepoint_print_iter;
2777 /* We should never get here if iter is NULL */
2778 if (WARN_ON_ONCE(!iter))
2781 event_call = fbuffer->trace_file->event_call;
2782 if (!event_call || !event_call->event.funcs ||
2783 !event_call->event.funcs->trace)
2786 file = fbuffer->trace_file;
2787 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
2788 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
2789 !filter_match_preds(file->filter, fbuffer->entry)))
2792 event = &fbuffer->trace_file->event_call->event;
2794 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2795 trace_seq_init(&iter->seq);
2796 iter->ent = fbuffer->entry;
2797 event_call->event.funcs->trace(iter, 0, event);
2798 trace_seq_putc(&iter->seq, 0);
2799 printk("%s", iter->seq.buffer);
2801 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2804 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2805 void *buffer, size_t *lenp,
2808 int save_tracepoint_printk;
2811 mutex_lock(&tracepoint_printk_mutex);
2812 save_tracepoint_printk = tracepoint_printk;
2814 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2817 * This will force exiting early, as tracepoint_printk
2818 * is always zero when tracepoint_printk_iter is not allocated
2820 if (!tracepoint_print_iter)
2821 tracepoint_printk = 0;
2823 if (save_tracepoint_printk == tracepoint_printk)
2826 if (tracepoint_printk)
2827 static_key_enable(&tracepoint_printk_key.key);
2829 static_key_disable(&tracepoint_printk_key.key);
2832 mutex_unlock(&tracepoint_printk_mutex);
2837 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2839 if (static_key_false(&tracepoint_printk_key.key))
2840 output_printk(fbuffer);
2842 if (static_branch_unlikely(&trace_event_exports_enabled))
2843 ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
2844 event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
2845 fbuffer->event, fbuffer->entry,
2846 fbuffer->flags, fbuffer->pc, fbuffer->regs);
2848 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2853 * trace_buffer_unlock_commit_regs()
2854 * trace_event_buffer_commit()
2855 * trace_event_raw_event_xxx()
2857 # define STACK_SKIP 3
2859 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2860 struct trace_buffer *buffer,
2861 struct ring_buffer_event *event,
2862 unsigned long flags, int pc,
2863 struct pt_regs *regs)
2865 __buffer_unlock_commit(buffer, event);
2868 * If regs is not set, then skip the necessary functions.
2869 * Note, we can still get here via blktrace, wakeup tracer
2870 * and mmiotrace, but that's ok if they lose a function or
2871 * two. They are not that meaningful.
2873 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2874 ftrace_trace_userstack(tr, buffer, flags, pc);
2878 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2881 trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
2882 struct ring_buffer_event *event)
2884 __buffer_unlock_commit(buffer, event);
2888 trace_function(struct trace_array *tr,
2889 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2892 struct trace_event_call *call = &event_function;
2893 struct trace_buffer *buffer = tr->array_buffer.buffer;
2894 struct ring_buffer_event *event;
2895 struct ftrace_entry *entry;
2897 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2901 entry = ring_buffer_event_data(event);
2903 entry->parent_ip = parent_ip;
2905 if (!call_filter_check_discard(call, entry, buffer, event)) {
2906 if (static_branch_unlikely(&trace_function_exports_enabled))
2907 ftrace_exports(event, TRACE_EXPORT_FUNCTION);
2908 __buffer_unlock_commit(buffer, event);
2912 #ifdef CONFIG_STACKTRACE
2914 /* Allow 4 levels of nesting: normal, softirq, irq, NMI */
2915 #define FTRACE_KSTACK_NESTING 4
2917 #define FTRACE_KSTACK_ENTRIES (PAGE_SIZE / FTRACE_KSTACK_NESTING)
2919 struct ftrace_stack {
2920 unsigned long calls[FTRACE_KSTACK_ENTRIES];
2924 struct ftrace_stacks {
2925 struct ftrace_stack stacks[FTRACE_KSTACK_NESTING];
2928 static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
2929 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2931 static void __ftrace_trace_stack(struct trace_buffer *buffer,
2932 unsigned long flags,
2933 int skip, int pc, struct pt_regs *regs)
2935 struct trace_event_call *call = &event_kernel_stack;
2936 struct ring_buffer_event *event;
2937 unsigned int size, nr_entries;
2938 struct ftrace_stack *fstack;
2939 struct stack_entry *entry;
2943 * Add one, for this function and the call to save_stack_trace()
2944 * If regs is set, then these functions will not be in the way.
2946 #ifndef CONFIG_UNWINDER_ORC
2951 preempt_disable_notrace();
2953 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
2955 /* This should never happen. If it does, yell once and skip */
2956 if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
2960 * The above __this_cpu_inc_return() is 'atomic' cpu local. An
2961 * interrupt will either see the value pre increment or post
2962 * increment. If the interrupt happens pre increment it will have
2963 * restored the counter when it returns. We just need a barrier to
2964 * keep gcc from moving things around.
2968 fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
2969 size = ARRAY_SIZE(fstack->calls);
2972 nr_entries = stack_trace_save_regs(regs, fstack->calls,
2975 nr_entries = stack_trace_save(fstack->calls, size, skip);
2978 size = nr_entries * sizeof(unsigned long);
2979 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2980 sizeof(*entry) + size, flags, pc);
2983 entry = ring_buffer_event_data(event);
2985 memcpy(&entry->caller, fstack->calls, size);
2986 entry->size = nr_entries;
2988 if (!call_filter_check_discard(call, entry, buffer, event))
2989 __buffer_unlock_commit(buffer, event);
2992 /* Again, don't let gcc optimize things here */
2994 __this_cpu_dec(ftrace_stack_reserve);
2995 preempt_enable_notrace();
2999 static inline void ftrace_trace_stack(struct trace_array *tr,
3000 struct trace_buffer *buffer,
3001 unsigned long flags,
3002 int skip, int pc, struct pt_regs *regs)
3004 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
3007 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
3010 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
3013 struct trace_buffer *buffer = tr->array_buffer.buffer;
3015 if (rcu_is_watching()) {
3016 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3021 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
3022 * but if the above rcu_is_watching() failed, then the NMI
3023 * triggered someplace critical, and rcu_irq_enter() should
3024 * not be called from NMI.
3026 if (unlikely(in_nmi()))
3029 rcu_irq_enter_irqson();
3030 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
3031 rcu_irq_exit_irqson();
3035 * trace_dump_stack - record a stack back trace in the trace buffer
3036 * @skip: Number of functions to skip (helper handlers)
3038 void trace_dump_stack(int skip)
3040 unsigned long flags;
3042 if (tracing_disabled || tracing_selftest_running)
3045 local_save_flags(flags);
3047 #ifndef CONFIG_UNWINDER_ORC
3048 /* Skip 1 to skip this function. */
3051 __ftrace_trace_stack(global_trace.array_buffer.buffer,
3052 flags, skip, preempt_count(), NULL);
3054 EXPORT_SYMBOL_GPL(trace_dump_stack);
3056 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
3057 static DEFINE_PER_CPU(int, user_stack_count);
3060 ftrace_trace_userstack(struct trace_array *tr,
3061 struct trace_buffer *buffer, unsigned long flags, int pc)
3063 struct trace_event_call *call = &event_user_stack;
3064 struct ring_buffer_event *event;
3065 struct userstack_entry *entry;
3067 if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE))
3071 * NMIs can not handle page faults, even with fix ups.
3072 * The save user stack can (and often does) fault.
3074 if (unlikely(in_nmi()))
3078 * prevent recursion, since the user stack tracing may
3079 * trigger other kernel events.
3082 if (__this_cpu_read(user_stack_count))
3085 __this_cpu_inc(user_stack_count);
3087 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
3088 sizeof(*entry), flags, pc);
3090 goto out_drop_count;
3091 entry = ring_buffer_event_data(event);
3093 entry->tgid = current->tgid;
3094 memset(&entry->caller, 0, sizeof(entry->caller));
3096 stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES);
3097 if (!call_filter_check_discard(call, entry, buffer, event))
3098 __buffer_unlock_commit(buffer, event);
3101 __this_cpu_dec(user_stack_count);
3105 #else /* CONFIG_USER_STACKTRACE_SUPPORT */
3106 static void ftrace_trace_userstack(struct trace_array *tr,
3107 struct trace_buffer *buffer,
3108 unsigned long flags, int pc)
3111 #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
3113 #endif /* CONFIG_STACKTRACE */
3115 /* created for use with alloc_percpu */
3116 struct trace_buffer_struct {
3118 char buffer[4][TRACE_BUF_SIZE];
3121 static struct trace_buffer_struct *trace_percpu_buffer;
3124 * Thise allows for lockless recording. If we're nested too deeply, then
3125 * this returns NULL.
3127 static char *get_trace_buf(void)
3129 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
3131 if (!buffer || buffer->nesting >= 4)
3136 /* Interrupts must see nesting incremented before we use the buffer */
3138 return &buffer->buffer[buffer->nesting - 1][0];
3141 static void put_trace_buf(void)
3143 /* Don't let the decrement of nesting leak before this */
3145 this_cpu_dec(trace_percpu_buffer->nesting);
3148 static int alloc_percpu_trace_buffer(void)
3150 struct trace_buffer_struct *buffers;
3152 if (trace_percpu_buffer)
3155 buffers = alloc_percpu(struct trace_buffer_struct);
3156 if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer"))
3159 trace_percpu_buffer = buffers;
3163 static int buffers_allocated;
3165 void trace_printk_init_buffers(void)
3167 if (buffers_allocated)
3170 if (alloc_percpu_trace_buffer())
3173 /* trace_printk() is for debug use only. Don't use it in production. */
3176 pr_warn("**********************************************************\n");
3177 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3179 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
3181 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
3182 pr_warn("** unsafe for production use. **\n");
3184 pr_warn("** If you see this message and you are not debugging **\n");
3185 pr_warn("** the kernel, report this immediately to your vendor! **\n");
3187 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
3188 pr_warn("**********************************************************\n");
3190 /* Expand the buffers to set size */
3191 tracing_update_buffers();
3193 buffers_allocated = 1;
3196 * trace_printk_init_buffers() can be called by modules.
3197 * If that happens, then we need to start cmdline recording
3198 * directly here. If the global_trace.buffer is already
3199 * allocated here, then this was called by module code.
3201 if (global_trace.array_buffer.buffer)
3202 tracing_start_cmdline_record();
3204 EXPORT_SYMBOL_GPL(trace_printk_init_buffers);
3206 void trace_printk_start_comm(void)
3208 /* Start tracing comms if trace printk is set */
3209 if (!buffers_allocated)
3211 tracing_start_cmdline_record();
3214 static void trace_printk_start_stop_comm(int enabled)
3216 if (!buffers_allocated)
3220 tracing_start_cmdline_record();
3222 tracing_stop_cmdline_record();
3226 * trace_vbprintk - write binary msg to tracing buffer
3227 * @ip: The address of the caller
3228 * @fmt: The string format to write to the buffer
3229 * @args: Arguments for @fmt
3231 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
3233 struct trace_event_call *call = &event_bprint;
3234 struct ring_buffer_event *event;
3235 struct trace_buffer *buffer;
3236 struct trace_array *tr = &global_trace;
3237 struct bprint_entry *entry;
3238 unsigned long flags;
3240 int len = 0, size, pc;
3242 if (unlikely(tracing_selftest_running || tracing_disabled))
3245 /* Don't pollute graph traces with trace_vprintk internals */
3246 pause_graph_tracing();
3248 pc = preempt_count();
3249 preempt_disable_notrace();
3251 tbuffer = get_trace_buf();
3257 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
3259 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
3262 local_save_flags(flags);
3263 size = sizeof(*entry) + sizeof(u32) * len;
3264 buffer = tr->array_buffer.buffer;
3265 ring_buffer_nest_start(buffer);
3266 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
3270 entry = ring_buffer_event_data(event);
3274 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
3275 if (!call_filter_check_discard(call, entry, buffer, event)) {
3276 __buffer_unlock_commit(buffer, event);
3277 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
3281 ring_buffer_nest_end(buffer);
3286 preempt_enable_notrace();
3287 unpause_graph_tracing();
3291 EXPORT_SYMBOL_GPL(trace_vbprintk);
3295 __trace_array_vprintk(struct trace_buffer *buffer,
3296 unsigned long ip, const char *fmt, va_list args)
3298 struct trace_event_call *call = &event_print;
3299 struct ring_buffer_event *event;
3300 int len = 0, size, pc;
3301 struct print_entry *entry;
3302 unsigned long flags;
3305 if (tracing_disabled || tracing_selftest_running)
3308 /* Don't pollute graph traces with trace_vprintk internals */
3309 pause_graph_tracing();
3311 pc = preempt_count();
3312 preempt_disable_notrace();
3315 tbuffer = get_trace_buf();
3321 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
3323 local_save_flags(flags);
3324 size = sizeof(*entry) + len + 1;
3325 ring_buffer_nest_start(buffer);
3326 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3330 entry = ring_buffer_event_data(event);
3333 memcpy(&entry->buf, tbuffer, len + 1);
3334 if (!call_filter_check_discard(call, entry, buffer, event)) {
3335 __buffer_unlock_commit(buffer, event);
3336 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3340 ring_buffer_nest_end(buffer);
3344 preempt_enable_notrace();
3345 unpause_graph_tracing();
3351 int trace_array_vprintk(struct trace_array *tr,
3352 unsigned long ip, const char *fmt, va_list args)
3354 return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
3358 * trace_array_printk - Print a message to a specific instance
3359 * @tr: The instance trace_array descriptor
3360 * @ip: The instruction pointer that this is called from.
3361 * @fmt: The format to print (printf format)
3363 * If a subsystem sets up its own instance, they have the right to
3364 * printk strings into their tracing instance buffer using this
3365 * function. Note, this function will not write into the top level
3366 * buffer (use trace_printk() for that), as writing into the top level
3367 * buffer should only have events that can be individually disabled.
3368 * trace_printk() is only used for debugging a kernel, and should not
3369 * be ever encorporated in normal use.
3371 * trace_array_printk() can be used, as it will not add noise to the
3372 * top level tracing buffer.
3374 * Note, trace_array_init_printk() must be called on @tr before this
3378 int trace_array_printk(struct trace_array *tr,
3379 unsigned long ip, const char *fmt, ...)
3387 /* This is only allowed for created instances */
3388 if (tr == &global_trace)
3391 if (!(tr->trace_flags & TRACE_ITER_PRINTK))
3395 ret = trace_array_vprintk(tr, ip, fmt, ap);
3399 EXPORT_SYMBOL_GPL(trace_array_printk);
3402 * trace_array_init_printk - Initialize buffers for trace_array_printk()
3403 * @tr: The trace array to initialize the buffers for
3405 * As trace_array_printk() only writes into instances, they are OK to
3406 * have in the kernel (unlike trace_printk()). This needs to be called
3407 * before trace_array_printk() can be used on a trace_array.
3409 int trace_array_init_printk(struct trace_array *tr)
3414 /* This is only allowed for created instances */
3415 if (tr == &global_trace)
3418 return alloc_percpu_trace_buffer();
3420 EXPORT_SYMBOL_GPL(trace_array_init_printk);
3423 int trace_array_printk_buf(struct trace_buffer *buffer,
3424 unsigned long ip, const char *fmt, ...)
3429 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3433 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3439 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3441 return trace_array_vprintk(&global_trace, ip, fmt, args);
3443 EXPORT_SYMBOL_GPL(trace_vprintk);
3445 static void trace_iterator_increment(struct trace_iterator *iter)
3447 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3451 ring_buffer_iter_advance(buf_iter);
3454 static struct trace_entry *
3455 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3456 unsigned long *lost_events)
3458 struct ring_buffer_event *event;
3459 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3462 event = ring_buffer_iter_peek(buf_iter, ts);
3464 *lost_events = ring_buffer_iter_dropped(buf_iter) ?
3465 (unsigned long)-1 : 0;
3467 event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts,
3472 iter->ent_size = ring_buffer_event_length(event);
3473 return ring_buffer_event_data(event);
3479 static struct trace_entry *
3480 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3481 unsigned long *missing_events, u64 *ent_ts)
3483 struct trace_buffer *buffer = iter->array_buffer->buffer;
3484 struct trace_entry *ent, *next = NULL;
3485 unsigned long lost_events = 0, next_lost = 0;
3486 int cpu_file = iter->cpu_file;
3487 u64 next_ts = 0, ts;
3493 * If we are in a per_cpu trace file, don't bother by iterating over
3494 * all cpu and peek directly.
3496 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3497 if (ring_buffer_empty_cpu(buffer, cpu_file))
3499 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3501 *ent_cpu = cpu_file;
3506 for_each_tracing_cpu(cpu) {
3508 if (ring_buffer_empty_cpu(buffer, cpu))
3511 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3514 * Pick the entry with the smallest timestamp:
3516 if (ent && (!next || ts < next_ts)) {
3520 next_lost = lost_events;
3521 next_size = iter->ent_size;
3525 iter->ent_size = next_size;
3528 *ent_cpu = next_cpu;
3534 *missing_events = next_lost;
3539 #define STATIC_TEMP_BUF_SIZE 128
3540 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4);
3542 /* Find the next real entry, without updating the iterator itself */
3543 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3544 int *ent_cpu, u64 *ent_ts)
3546 /* __find_next_entry will reset ent_size */
3547 int ent_size = iter->ent_size;
3548 struct trace_entry *entry;
3551 * If called from ftrace_dump(), then the iter->temp buffer
3552 * will be the static_temp_buf and not created from kmalloc.
3553 * If the entry size is greater than the buffer, we can
3554 * not save it. Just return NULL in that case. This is only
3555 * used to add markers when two consecutive events' time
3556 * stamps have a large delta. See trace_print_lat_context()
3558 if (iter->temp == static_temp_buf &&
3559 STATIC_TEMP_BUF_SIZE < ent_size)
3563 * The __find_next_entry() may call peek_next_entry(), which may
3564 * call ring_buffer_peek() that may make the contents of iter->ent
3565 * undefined. Need to copy iter->ent now.
3567 if (iter->ent && iter->ent != iter->temp) {
3568 if ((!iter->temp || iter->temp_size < iter->ent_size) &&
3569 !WARN_ON_ONCE(iter->temp == static_temp_buf)) {
3571 temp = kmalloc(iter->ent_size, GFP_KERNEL);
3576 iter->temp_size = iter->ent_size;
3578 memcpy(iter->temp, iter->ent, iter->ent_size);
3579 iter->ent = iter->temp;
3581 entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3582 /* Put back the original ent_size */
3583 iter->ent_size = ent_size;
3588 /* Find the next real entry, and increment the iterator to the next entry */
3589 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3591 iter->ent = __find_next_entry(iter, &iter->cpu,
3592 &iter->lost_events, &iter->ts);
3595 trace_iterator_increment(iter);
3597 return iter->ent ? iter : NULL;
3600 static void trace_consume(struct trace_iterator *iter)
3602 ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts,
3603 &iter->lost_events);
3606 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3608 struct trace_iterator *iter = m->private;
3612 WARN_ON_ONCE(iter->leftover);
3616 /* can't go backwards */
3621 ent = trace_find_next_entry_inc(iter);
3625 while (ent && iter->idx < i)
3626 ent = trace_find_next_entry_inc(iter);
3633 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3635 struct ring_buffer_iter *buf_iter;
3636 unsigned long entries = 0;
3639 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0;
3641 buf_iter = trace_buffer_iter(iter, cpu);
3645 ring_buffer_iter_reset(buf_iter);
3648 * We could have the case with the max latency tracers
3649 * that a reset never took place on a cpu. This is evident
3650 * by the timestamp being before the start of the buffer.
3652 while (ring_buffer_iter_peek(buf_iter, &ts)) {
3653 if (ts >= iter->array_buffer->time_start)
3656 ring_buffer_iter_advance(buf_iter);
3659 per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries;
3663 * The current tracer is copied to avoid a global locking
3666 static void *s_start(struct seq_file *m, loff_t *pos)
3668 struct trace_iterator *iter = m->private;
3669 struct trace_array *tr = iter->tr;
3670 int cpu_file = iter->cpu_file;
3676 * copy the tracer to avoid using a global lock all around.
3677 * iter->trace is a copy of current_trace, the pointer to the
3678 * name may be used instead of a strcmp(), as iter->trace->name
3679 * will point to the same string as current_trace->name.
3681 mutex_lock(&trace_types_lock);
3682 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3683 *iter->trace = *tr->current_trace;
3684 mutex_unlock(&trace_types_lock);
3686 #ifdef CONFIG_TRACER_MAX_TRACE
3687 if (iter->snapshot && iter->trace->use_max_tr)
3688 return ERR_PTR(-EBUSY);
3691 if (!iter->snapshot)
3692 atomic_inc(&trace_record_taskinfo_disabled);
3694 if (*pos != iter->pos) {
3699 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3700 for_each_tracing_cpu(cpu)
3701 tracing_iter_reset(iter, cpu);
3703 tracing_iter_reset(iter, cpu_file);
3706 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3711 * If we overflowed the seq_file before, then we want
3712 * to just reuse the trace_seq buffer again.
3718 p = s_next(m, p, &l);
3722 trace_event_read_lock();
3723 trace_access_lock(cpu_file);
3727 static void s_stop(struct seq_file *m, void *p)
3729 struct trace_iterator *iter = m->private;
3731 #ifdef CONFIG_TRACER_MAX_TRACE
3732 if (iter->snapshot && iter->trace->use_max_tr)
3736 if (!iter->snapshot)
3737 atomic_dec(&trace_record_taskinfo_disabled);
3739 trace_access_unlock(iter->cpu_file);
3740 trace_event_read_unlock();
3744 get_total_entries_cpu(struct array_buffer *buf, unsigned long *total,
3745 unsigned long *entries, int cpu)
3747 unsigned long count;
3749 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3751 * If this buffer has skipped entries, then we hold all
3752 * entries for the trace and we need to ignore the
3753 * ones before the time stamp.
3755 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3756 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3757 /* total is the same as the entries */
3761 ring_buffer_overrun_cpu(buf->buffer, cpu);
3766 get_total_entries(struct array_buffer *buf,
3767 unsigned long *total, unsigned long *entries)
3775 for_each_tracing_cpu(cpu) {
3776 get_total_entries_cpu(buf, &t, &e, cpu);
3782 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu)
3784 unsigned long total, entries;
3789 get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu);
3794 unsigned long trace_total_entries(struct trace_array *tr)
3796 unsigned long total, entries;
3801 get_total_entries(&tr->array_buffer, &total, &entries);
3806 static void print_lat_help_header(struct seq_file *m)
3808 seq_puts(m, "# _------=> CPU# \n"
3809 "# / _-----=> irqs-off \n"
3810 "# | / _----=> need-resched \n"
3811 "# || / _---=> hardirq/softirq \n"
3812 "# ||| / _--=> preempt-depth \n"
3814 "# cmd pid ||||| time | caller \n"
3815 "# \\ / ||||| \\ | / \n");
3818 static void print_event_info(struct array_buffer *buf, struct seq_file *m)
3820 unsigned long total;
3821 unsigned long entries;
3823 get_total_entries(buf, &total, &entries);
3824 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3825 entries, total, num_online_cpus());
3829 static void print_func_help_header(struct array_buffer *buf, struct seq_file *m,
3832 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3834 print_event_info(buf, m);
3836 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : "");
3837 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3840 static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m,
3843 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3844 const char *space = " ";
3845 int prec = tgid ? 12 : 2;
3847 print_event_info(buf, m);
3849 seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
3850 seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
3851 seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
3852 seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
3853 seq_printf(m, "# %.*s||| / delay\n", prec, space);
3854 seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID ");
3855 seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | ");
3859 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3861 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3862 struct array_buffer *buf = iter->array_buffer;
3863 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3864 struct tracer *type = iter->trace;
3865 unsigned long entries;
3866 unsigned long total;
3867 const char *name = "preemption";
3871 get_total_entries(buf, &total, &entries);
3873 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3875 seq_puts(m, "# -----------------------------------"
3876 "---------------------------------\n");
3877 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3878 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3879 nsecs_to_usecs(data->saved_latency),
3883 #if defined(CONFIG_PREEMPT_NONE)
3885 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3887 #elif defined(CONFIG_PREEMPT)
3889 #elif defined(CONFIG_PREEMPT_RT)
3894 /* These are reserved for later use */
3897 seq_printf(m, " #P:%d)\n", num_online_cpus());
3901 seq_puts(m, "# -----------------\n");
3902 seq_printf(m, "# | task: %.16s-%d "
3903 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3904 data->comm, data->pid,
3905 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3906 data->policy, data->rt_priority);
3907 seq_puts(m, "# -----------------\n");
3909 if (data->critical_start) {
3910 seq_puts(m, "# => started at: ");
3911 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3912 trace_print_seq(m, &iter->seq);
3913 seq_puts(m, "\n# => ended at: ");
3914 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3915 trace_print_seq(m, &iter->seq);
3916 seq_puts(m, "\n#\n");
3922 static void test_cpu_buff_start(struct trace_iterator *iter)
3924 struct trace_seq *s = &iter->seq;
3925 struct trace_array *tr = iter->tr;
3927 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3930 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3933 if (cpumask_available(iter->started) &&
3934 cpumask_test_cpu(iter->cpu, iter->started))
3937 if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries)
3940 if (cpumask_available(iter->started))
3941 cpumask_set_cpu(iter->cpu, iter->started);
3943 /* Don't print started cpu buffer for the first entry of the trace */
3945 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3949 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3951 struct trace_array *tr = iter->tr;
3952 struct trace_seq *s = &iter->seq;
3953 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3954 struct trace_entry *entry;
3955 struct trace_event *event;
3959 test_cpu_buff_start(iter);
3961 event = ftrace_find_event(entry->type);
3963 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3964 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3965 trace_print_lat_context(iter);
3967 trace_print_context(iter);
3970 if (trace_seq_has_overflowed(s))
3971 return TRACE_TYPE_PARTIAL_LINE;
3974 return event->funcs->trace(iter, sym_flags, event);
3976 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3978 return trace_handle_return(s);
3981 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3983 struct trace_array *tr = iter->tr;
3984 struct trace_seq *s = &iter->seq;
3985 struct trace_entry *entry;
3986 struct trace_event *event;
3990 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3991 trace_seq_printf(s, "%d %d %llu ",
3992 entry->pid, iter->cpu, iter->ts);
3994 if (trace_seq_has_overflowed(s))
3995 return TRACE_TYPE_PARTIAL_LINE;
3997 event = ftrace_find_event(entry->type);
3999 return event->funcs->raw(iter, 0, event);
4001 trace_seq_printf(s, "%d ?\n", entry->type);
4003 return trace_handle_return(s);
4006 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
4008 struct trace_array *tr = iter->tr;
4009 struct trace_seq *s = &iter->seq;
4010 unsigned char newline = '\n';
4011 struct trace_entry *entry;
4012 struct trace_event *event;
4016 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4017 SEQ_PUT_HEX_FIELD(s, entry->pid);
4018 SEQ_PUT_HEX_FIELD(s, iter->cpu);
4019 SEQ_PUT_HEX_FIELD(s, iter->ts);
4020 if (trace_seq_has_overflowed(s))
4021 return TRACE_TYPE_PARTIAL_LINE;
4024 event = ftrace_find_event(entry->type);
4026 enum print_line_t ret = event->funcs->hex(iter, 0, event);
4027 if (ret != TRACE_TYPE_HANDLED)
4031 SEQ_PUT_FIELD(s, newline);
4033 return trace_handle_return(s);
4036 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
4038 struct trace_array *tr = iter->tr;
4039 struct trace_seq *s = &iter->seq;
4040 struct trace_entry *entry;
4041 struct trace_event *event;
4045 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
4046 SEQ_PUT_FIELD(s, entry->pid);
4047 SEQ_PUT_FIELD(s, iter->cpu);
4048 SEQ_PUT_FIELD(s, iter->ts);
4049 if (trace_seq_has_overflowed(s))
4050 return TRACE_TYPE_PARTIAL_LINE;
4053 event = ftrace_find_event(entry->type);
4054 return event ? event->funcs->binary(iter, 0, event) :
4058 int trace_empty(struct trace_iterator *iter)
4060 struct ring_buffer_iter *buf_iter;
4063 /* If we are looking at one CPU buffer, only check that one */
4064 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4065 cpu = iter->cpu_file;
4066 buf_iter = trace_buffer_iter(iter, cpu);
4068 if (!ring_buffer_iter_empty(buf_iter))
4071 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4077 for_each_tracing_cpu(cpu) {
4078 buf_iter = trace_buffer_iter(iter, cpu);
4080 if (!ring_buffer_iter_empty(buf_iter))
4083 if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu))
4091 /* Called with trace_event_read_lock() held. */
4092 enum print_line_t print_trace_line(struct trace_iterator *iter)
4094 struct trace_array *tr = iter->tr;
4095 unsigned long trace_flags = tr->trace_flags;
4096 enum print_line_t ret;
4098 if (iter->lost_events) {
4099 if (iter->lost_events == (unsigned long)-1)
4100 trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n",
4103 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
4104 iter->cpu, iter->lost_events);
4105 if (trace_seq_has_overflowed(&iter->seq))
4106 return TRACE_TYPE_PARTIAL_LINE;
4109 if (iter->trace && iter->trace->print_line) {
4110 ret = iter->trace->print_line(iter);
4111 if (ret != TRACE_TYPE_UNHANDLED)
4115 if (iter->ent->type == TRACE_BPUTS &&
4116 trace_flags & TRACE_ITER_PRINTK &&
4117 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4118 return trace_print_bputs_msg_only(iter);
4120 if (iter->ent->type == TRACE_BPRINT &&
4121 trace_flags & TRACE_ITER_PRINTK &&
4122 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4123 return trace_print_bprintk_msg_only(iter);
4125 if (iter->ent->type == TRACE_PRINT &&
4126 trace_flags & TRACE_ITER_PRINTK &&
4127 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
4128 return trace_print_printk_msg_only(iter);
4130 if (trace_flags & TRACE_ITER_BIN)
4131 return print_bin_fmt(iter);
4133 if (trace_flags & TRACE_ITER_HEX)
4134 return print_hex_fmt(iter);
4136 if (trace_flags & TRACE_ITER_RAW)
4137 return print_raw_fmt(iter);
4139 return print_trace_fmt(iter);
4142 void trace_latency_header(struct seq_file *m)
4144 struct trace_iterator *iter = m->private;
4145 struct trace_array *tr = iter->tr;
4147 /* print nothing if the buffers are empty */
4148 if (trace_empty(iter))
4151 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
4152 print_trace_header(m, iter);
4154 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
4155 print_lat_help_header(m);
4158 void trace_default_header(struct seq_file *m)
4160 struct trace_iterator *iter = m->private;
4161 struct trace_array *tr = iter->tr;
4162 unsigned long trace_flags = tr->trace_flags;
4164 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
4167 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
4168 /* print nothing if the buffers are empty */
4169 if (trace_empty(iter))
4171 print_trace_header(m, iter);
4172 if (!(trace_flags & TRACE_ITER_VERBOSE))
4173 print_lat_help_header(m);
4175 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
4176 if (trace_flags & TRACE_ITER_IRQ_INFO)
4177 print_func_help_header_irq(iter->array_buffer,
4180 print_func_help_header(iter->array_buffer, m,
4186 static void test_ftrace_alive(struct seq_file *m)
4188 if (!ftrace_is_dead())
4190 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
4191 "# MAY BE MISSING FUNCTION EVENTS\n");
4194 #ifdef CONFIG_TRACER_MAX_TRACE
4195 static void show_snapshot_main_help(struct seq_file *m)
4197 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
4198 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4199 "# Takes a snapshot of the main buffer.\n"
4200 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
4201 "# (Doesn't have to be '2' works with any number that\n"
4202 "# is not a '0' or '1')\n");
4205 static void show_snapshot_percpu_help(struct seq_file *m)
4207 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
4208 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4209 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
4210 "# Takes a snapshot of the main buffer for this cpu.\n");
4212 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
4213 "# Must use main snapshot file to allocate.\n");
4215 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
4216 "# (Doesn't have to be '2' works with any number that\n"
4217 "# is not a '0' or '1')\n");
4220 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
4222 if (iter->tr->allocated_snapshot)
4223 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
4225 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
4227 seq_puts(m, "# Snapshot commands:\n");
4228 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4229 show_snapshot_main_help(m);
4231 show_snapshot_percpu_help(m);
4234 /* Should never be called */
4235 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
4238 static int s_show(struct seq_file *m, void *v)
4240 struct trace_iterator *iter = v;
4243 if (iter->ent == NULL) {
4245 seq_printf(m, "# tracer: %s\n", iter->trace->name);
4247 test_ftrace_alive(m);
4249 if (iter->snapshot && trace_empty(iter))
4250 print_snapshot_help(m, iter);
4251 else if (iter->trace && iter->trace->print_header)
4252 iter->trace->print_header(m);
4254 trace_default_header(m);
4256 } else if (iter->leftover) {
4258 * If we filled the seq_file buffer earlier, we
4259 * want to just show it now.
4261 ret = trace_print_seq(m, &iter->seq);
4263 /* ret should this time be zero, but you never know */
4264 iter->leftover = ret;
4267 print_trace_line(iter);
4268 ret = trace_print_seq(m, &iter->seq);
4270 * If we overflow the seq_file buffer, then it will
4271 * ask us for this data again at start up.
4273 * ret is 0 if seq_file write succeeded.
4276 iter->leftover = ret;
4283 * Should be used after trace_array_get(), trace_types_lock
4284 * ensures that i_cdev was already initialized.
4286 static inline int tracing_get_cpu(struct inode *inode)
4288 if (inode->i_cdev) /* See trace_create_cpu_file() */
4289 return (long)inode->i_cdev - 1;
4290 return RING_BUFFER_ALL_CPUS;
4293 static const struct seq_operations tracer_seq_ops = {
4300 static struct trace_iterator *
4301 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
4303 struct trace_array *tr = inode->i_private;
4304 struct trace_iterator *iter;
4307 if (tracing_disabled)
4308 return ERR_PTR(-ENODEV);
4310 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
4312 return ERR_PTR(-ENOMEM);
4314 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
4316 if (!iter->buffer_iter)
4320 * trace_find_next_entry() may need to save off iter->ent.
4321 * It will place it into the iter->temp buffer. As most
4322 * events are less than 128, allocate a buffer of that size.
4323 * If one is greater, then trace_find_next_entry() will
4324 * allocate a new buffer to adjust for the bigger iter->ent.
4325 * It's not critical if it fails to get allocated here.
4327 iter->temp = kmalloc(128, GFP_KERNEL);
4329 iter->temp_size = 128;
4332 * We make a copy of the current tracer to avoid concurrent
4333 * changes on it while we are reading.
4335 mutex_lock(&trace_types_lock);
4336 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
4340 *iter->trace = *tr->current_trace;
4342 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
4347 #ifdef CONFIG_TRACER_MAX_TRACE
4348 /* Currently only the top directory has a snapshot */
4349 if (tr->current_trace->print_max || snapshot)
4350 iter->array_buffer = &tr->max_buffer;
4353 iter->array_buffer = &tr->array_buffer;
4354 iter->snapshot = snapshot;
4356 iter->cpu_file = tracing_get_cpu(inode);
4357 mutex_init(&iter->mutex);
4359 /* Notify the tracer early; before we stop tracing. */
4360 if (iter->trace->open)
4361 iter->trace->open(iter);
4363 /* Annotate start of buffers if we had overruns */
4364 if (ring_buffer_overruns(iter->array_buffer->buffer))
4365 iter->iter_flags |= TRACE_FILE_ANNOTATE;
4367 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
4368 if (trace_clocks[tr->clock_id].in_ns)
4369 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4372 * If pause-on-trace is enabled, then stop the trace while
4373 * dumping, unless this is the "snapshot" file
4375 if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE))
4376 tracing_stop_tr(tr);
4378 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
4379 for_each_tracing_cpu(cpu) {
4380 iter->buffer_iter[cpu] =
4381 ring_buffer_read_prepare(iter->array_buffer->buffer,
4384 ring_buffer_read_prepare_sync();
4385 for_each_tracing_cpu(cpu) {
4386 ring_buffer_read_start(iter->buffer_iter[cpu]);
4387 tracing_iter_reset(iter, cpu);
4390 cpu = iter->cpu_file;
4391 iter->buffer_iter[cpu] =
4392 ring_buffer_read_prepare(iter->array_buffer->buffer,
4394 ring_buffer_read_prepare_sync();
4395 ring_buffer_read_start(iter->buffer_iter[cpu]);
4396 tracing_iter_reset(iter, cpu);
4399 mutex_unlock(&trace_types_lock);
4404 mutex_unlock(&trace_types_lock);
4407 kfree(iter->buffer_iter);
4409 seq_release_private(inode, file);
4410 return ERR_PTR(-ENOMEM);
4413 int tracing_open_generic(struct inode *inode, struct file *filp)
4417 ret = tracing_check_open_get_tr(NULL);
4421 filp->private_data = inode->i_private;
4425 bool tracing_is_disabled(void)
4427 return (tracing_disabled) ? true: false;
4431 * Open and update trace_array ref count.
4432 * Must have the current trace_array passed to it.
4434 int tracing_open_generic_tr(struct inode *inode, struct file *filp)
4436 struct trace_array *tr = inode->i_private;
4439 ret = tracing_check_open_get_tr(tr);
4443 filp->private_data = inode->i_private;
4448 static int tracing_release(struct inode *inode, struct file *file)
4450 struct trace_array *tr = inode->i_private;
4451 struct seq_file *m = file->private_data;
4452 struct trace_iterator *iter;
4455 if (!(file->f_mode & FMODE_READ)) {
4456 trace_array_put(tr);
4460 /* Writes do not use seq_file */
4462 mutex_lock(&trace_types_lock);
4464 for_each_tracing_cpu(cpu) {
4465 if (iter->buffer_iter[cpu])
4466 ring_buffer_read_finish(iter->buffer_iter[cpu]);
4469 if (iter->trace && iter->trace->close)
4470 iter->trace->close(iter);
4472 if (!iter->snapshot && tr->stop_count)
4473 /* reenable tracing if it was previously enabled */
4474 tracing_start_tr(tr);
4476 __trace_array_put(tr);
4478 mutex_unlock(&trace_types_lock);
4480 mutex_destroy(&iter->mutex);
4481 free_cpumask_var(iter->started);
4484 kfree(iter->buffer_iter);
4485 seq_release_private(inode, file);
4490 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4492 struct trace_array *tr = inode->i_private;
4494 trace_array_put(tr);
4498 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4500 struct trace_array *tr = inode->i_private;
4502 trace_array_put(tr);
4504 return single_release(inode, file);
4507 static int tracing_open(struct inode *inode, struct file *file)
4509 struct trace_array *tr = inode->i_private;
4510 struct trace_iterator *iter;
4513 ret = tracing_check_open_get_tr(tr);
4517 /* If this file was open for write, then erase contents */
4518 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4519 int cpu = tracing_get_cpu(inode);
4520 struct array_buffer *trace_buf = &tr->array_buffer;
4522 #ifdef CONFIG_TRACER_MAX_TRACE
4523 if (tr->current_trace->print_max)
4524 trace_buf = &tr->max_buffer;
4527 if (cpu == RING_BUFFER_ALL_CPUS)
4528 tracing_reset_online_cpus(trace_buf);
4530 tracing_reset_cpu(trace_buf, cpu);
4533 if (file->f_mode & FMODE_READ) {
4534 iter = __tracing_open(inode, file, false);
4536 ret = PTR_ERR(iter);
4537 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4538 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4542 trace_array_put(tr);
4548 * Some tracers are not suitable for instance buffers.
4549 * A tracer is always available for the global array (toplevel)
4550 * or if it explicitly states that it is.
4553 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4555 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4558 /* Find the next tracer that this trace array may use */
4559 static struct tracer *
4560 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4562 while (t && !trace_ok_for_array(t, tr))
4569 t_next(struct seq_file *m, void *v, loff_t *pos)
4571 struct trace_array *tr = m->private;
4572 struct tracer *t = v;
4577 t = get_tracer_for_array(tr, t->next);
4582 static void *t_start(struct seq_file *m, loff_t *pos)
4584 struct trace_array *tr = m->private;
4588 mutex_lock(&trace_types_lock);
4590 t = get_tracer_for_array(tr, trace_types);
4591 for (; t && l < *pos; t = t_next(m, t, &l))
4597 static void t_stop(struct seq_file *m, void *p)
4599 mutex_unlock(&trace_types_lock);
4602 static int t_show(struct seq_file *m, void *v)
4604 struct tracer *t = v;
4609 seq_puts(m, t->name);
4618 static const struct seq_operations show_traces_seq_ops = {
4625 static int show_traces_open(struct inode *inode, struct file *file)
4627 struct trace_array *tr = inode->i_private;
4631 ret = tracing_check_open_get_tr(tr);
4635 ret = seq_open(file, &show_traces_seq_ops);
4637 trace_array_put(tr);
4641 m = file->private_data;
4647 static int show_traces_release(struct inode *inode, struct file *file)
4649 struct trace_array *tr = inode->i_private;
4651 trace_array_put(tr);
4652 return seq_release(inode, file);
4656 tracing_write_stub(struct file *filp, const char __user *ubuf,
4657 size_t count, loff_t *ppos)
4662 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4666 if (file->f_mode & FMODE_READ)
4667 ret = seq_lseek(file, offset, whence);
4669 file->f_pos = ret = 0;
4674 static const struct file_operations tracing_fops = {
4675 .open = tracing_open,
4677 .write = tracing_write_stub,
4678 .llseek = tracing_lseek,
4679 .release = tracing_release,
4682 static const struct file_operations show_traces_fops = {
4683 .open = show_traces_open,
4685 .llseek = seq_lseek,
4686 .release = show_traces_release,
4690 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4691 size_t count, loff_t *ppos)
4693 struct trace_array *tr = file_inode(filp)->i_private;
4697 len = snprintf(NULL, 0, "%*pb\n",
4698 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4699 mask_str = kmalloc(len, GFP_KERNEL);
4703 len = snprintf(mask_str, len, "%*pb\n",
4704 cpumask_pr_args(tr->tracing_cpumask));
4709 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4717 int tracing_set_cpumask(struct trace_array *tr,
4718 cpumask_var_t tracing_cpumask_new)
4725 local_irq_disable();
4726 arch_spin_lock(&tr->max_lock);
4727 for_each_tracing_cpu(cpu) {
4729 * Increase/decrease the disabled counter if we are
4730 * about to flip a bit in the cpumask:
4732 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4733 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4734 atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4735 ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu);
4737 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4738 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4739 atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled);
4740 ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu);
4743 arch_spin_unlock(&tr->max_lock);
4746 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4752 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4753 size_t count, loff_t *ppos)
4755 struct trace_array *tr = file_inode(filp)->i_private;
4756 cpumask_var_t tracing_cpumask_new;
4759 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4762 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4766 err = tracing_set_cpumask(tr, tracing_cpumask_new);
4770 free_cpumask_var(tracing_cpumask_new);
4775 free_cpumask_var(tracing_cpumask_new);
4780 static const struct file_operations tracing_cpumask_fops = {
4781 .open = tracing_open_generic_tr,
4782 .read = tracing_cpumask_read,
4783 .write = tracing_cpumask_write,
4784 .release = tracing_release_generic_tr,
4785 .llseek = generic_file_llseek,
4788 static int tracing_trace_options_show(struct seq_file *m, void *v)
4790 struct tracer_opt *trace_opts;
4791 struct trace_array *tr = m->private;
4795 mutex_lock(&trace_types_lock);
4796 tracer_flags = tr->current_trace->flags->val;
4797 trace_opts = tr->current_trace->flags->opts;
4799 for (i = 0; trace_options[i]; i++) {
4800 if (tr->trace_flags & (1 << i))
4801 seq_printf(m, "%s\n", trace_options[i]);
4803 seq_printf(m, "no%s\n", trace_options[i]);
4806 for (i = 0; trace_opts[i].name; i++) {
4807 if (tracer_flags & trace_opts[i].bit)
4808 seq_printf(m, "%s\n", trace_opts[i].name);
4810 seq_printf(m, "no%s\n", trace_opts[i].name);
4812 mutex_unlock(&trace_types_lock);
4817 static int __set_tracer_option(struct trace_array *tr,
4818 struct tracer_flags *tracer_flags,
4819 struct tracer_opt *opts, int neg)
4821 struct tracer *trace = tracer_flags->trace;
4824 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4829 tracer_flags->val &= ~opts->bit;
4831 tracer_flags->val |= opts->bit;
4835 /* Try to assign a tracer specific option */
4836 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4838 struct tracer *trace = tr->current_trace;
4839 struct tracer_flags *tracer_flags = trace->flags;
4840 struct tracer_opt *opts = NULL;
4843 for (i = 0; tracer_flags->opts[i].name; i++) {
4844 opts = &tracer_flags->opts[i];
4846 if (strcmp(cmp, opts->name) == 0)
4847 return __set_tracer_option(tr, trace->flags, opts, neg);
4853 /* Some tracers require overwrite to stay enabled */
4854 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4856 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4862 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4864 if ((mask == TRACE_ITER_RECORD_TGID) ||
4865 (mask == TRACE_ITER_RECORD_CMD))
4866 lockdep_assert_held(&event_mutex);
4868 /* do nothing if flag is already set */
4869 if (!!(tr->trace_flags & mask) == !!enabled)
4872 /* Give the tracer a chance to approve the change */
4873 if (tr->current_trace->flag_changed)
4874 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4878 tr->trace_flags |= mask;
4880 tr->trace_flags &= ~mask;
4882 if (mask == TRACE_ITER_RECORD_CMD)
4883 trace_event_enable_cmd_record(enabled);
4885 if (mask == TRACE_ITER_RECORD_TGID) {
4887 tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
4891 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4895 trace_event_enable_tgid_record(enabled);
4898 if (mask == TRACE_ITER_EVENT_FORK)
4899 trace_event_follow_fork(tr, enabled);
4901 if (mask == TRACE_ITER_FUNC_FORK)
4902 ftrace_pid_follow_fork(tr, enabled);
4904 if (mask == TRACE_ITER_OVERWRITE) {
4905 ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled);
4906 #ifdef CONFIG_TRACER_MAX_TRACE
4907 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4911 if (mask == TRACE_ITER_PRINTK) {
4912 trace_printk_start_stop_comm(enabled);
4913 trace_printk_control(enabled);
4919 int trace_set_options(struct trace_array *tr, char *option)
4924 size_t orig_len = strlen(option);
4927 cmp = strstrip(option);
4929 len = str_has_prefix(cmp, "no");
4935 mutex_lock(&event_mutex);
4936 mutex_lock(&trace_types_lock);
4938 ret = match_string(trace_options, -1, cmp);
4939 /* If no option could be set, test the specific tracer options */
4941 ret = set_tracer_option(tr, cmp, neg);
4943 ret = set_tracer_flag(tr, 1 << ret, !neg);
4945 mutex_unlock(&trace_types_lock);
4946 mutex_unlock(&event_mutex);
4949 * If the first trailing whitespace is replaced with '\0' by strstrip,
4950 * turn it back into a space.
4952 if (orig_len > strlen(option))
4953 option[strlen(option)] = ' ';
4958 static void __init apply_trace_boot_options(void)
4960 char *buf = trace_boot_options_buf;
4964 option = strsep(&buf, ",");
4970 trace_set_options(&global_trace, option);
4972 /* Put back the comma to allow this to be called again */
4979 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4980 size_t cnt, loff_t *ppos)
4982 struct seq_file *m = filp->private_data;
4983 struct trace_array *tr = m->private;
4987 if (cnt >= sizeof(buf))
4990 if (copy_from_user(buf, ubuf, cnt))
4995 ret = trace_set_options(tr, buf);
5004 static int tracing_trace_options_open(struct inode *inode, struct file *file)
5006 struct trace_array *tr = inode->i_private;
5009 ret = tracing_check_open_get_tr(tr);
5013 ret = single_open(file, tracing_trace_options_show, inode->i_private);
5015 trace_array_put(tr);
5020 static const struct file_operations tracing_iter_fops = {
5021 .open = tracing_trace_options_open,
5023 .llseek = seq_lseek,
5024 .release = tracing_single_release_tr,
5025 .write = tracing_trace_options_write,
5028 static const char readme_msg[] =
5029 "tracing mini-HOWTO:\n\n"
5030 "# echo 0 > tracing_on : quick way to disable tracing\n"
5031 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
5032 " Important files:\n"
5033 " trace\t\t\t- The static contents of the buffer\n"
5034 "\t\t\t To clear the buffer write into this file: echo > trace\n"
5035 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
5036 " current_tracer\t- function and latency tracers\n"
5037 " available_tracers\t- list of configured tracers for current_tracer\n"
5038 " error_log\t- error log for failed commands (that support it)\n"
5039 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
5040 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
5041 " trace_clock\t\t-change the clock used to order events\n"
5042 " local: Per cpu clock but may not be synced across CPUs\n"
5043 " global: Synced across CPUs but slows tracing down.\n"
5044 " counter: Not a clock, but just an increment\n"
5045 " uptime: Jiffy counter from time of boot\n"
5046 " perf: Same clock that perf events use\n"
5047 #ifdef CONFIG_X86_64
5048 " x86-tsc: TSC cycle counter\n"
5050 "\n timestamp_mode\t-view the mode used to timestamp events\n"
5051 " delta: Delta difference against a buffer-wide timestamp\n"
5052 " absolute: Absolute (standalone) timestamp\n"
5053 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
5054 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
5055 " tracing_cpumask\t- Limit which CPUs to trace\n"
5056 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
5057 "\t\t\t Remove sub-buffer with rmdir\n"
5058 " trace_options\t\t- Set format or modify how tracing happens\n"
5059 "\t\t\t Disable an option by prefixing 'no' to the\n"
5060 "\t\t\t option name\n"
5061 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
5062 #ifdef CONFIG_DYNAMIC_FTRACE
5063 "\n available_filter_functions - list of functions that can be filtered on\n"
5064 " set_ftrace_filter\t- echo function name in here to only trace these\n"
5065 "\t\t\t functions\n"
5066 "\t accepts: func_full_name or glob-matching-pattern\n"
5067 "\t modules: Can select a group via module\n"
5068 "\t Format: :mod:<module-name>\n"
5069 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
5070 "\t triggers: a command to perform when function is hit\n"
5071 "\t Format: <function>:<trigger>[:count]\n"
5072 "\t trigger: traceon, traceoff\n"
5073 "\t\t enable_event:<system>:<event>\n"
5074 "\t\t disable_event:<system>:<event>\n"
5075 #ifdef CONFIG_STACKTRACE
5078 #ifdef CONFIG_TRACER_SNAPSHOT
5083 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
5084 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
5085 "\t The first one will disable tracing every time do_fault is hit\n"
5086 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
5087 "\t The first time do trap is hit and it disables tracing, the\n"
5088 "\t counter will decrement to 2. If tracing is already disabled,\n"
5089 "\t the counter will not decrement. It only decrements when the\n"
5090 "\t trigger did work\n"
5091 "\t To remove trigger without count:\n"
5092 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
5093 "\t To remove trigger with a count:\n"
5094 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
5095 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
5096 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
5097 "\t modules: Can select a group via module command :mod:\n"
5098 "\t Does not accept triggers\n"
5099 #endif /* CONFIG_DYNAMIC_FTRACE */
5100 #ifdef CONFIG_FUNCTION_TRACER
5101 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
5103 " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n"
5106 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5107 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
5108 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
5109 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
5111 #ifdef CONFIG_TRACER_SNAPSHOT
5112 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
5113 "\t\t\t snapshot buffer. Read the contents for more\n"
5114 "\t\t\t information\n"
5116 #ifdef CONFIG_STACK_TRACER
5117 " stack_trace\t\t- Shows the max stack trace when active\n"
5118 " stack_max_size\t- Shows current max stack size that was traced\n"
5119 "\t\t\t Write into this file to reset the max size (trigger a\n"
5120 "\t\t\t new trace)\n"
5121 #ifdef CONFIG_DYNAMIC_FTRACE
5122 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
5125 #endif /* CONFIG_STACK_TRACER */
5126 #ifdef CONFIG_DYNAMIC_EVENTS
5127 " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n"
5128 "\t\t\t Write into this file to define/undefine new trace events.\n"
5130 #ifdef CONFIG_KPROBE_EVENTS
5131 " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n"
5132 "\t\t\t Write into this file to define/undefine new trace events.\n"
5134 #ifdef CONFIG_UPROBE_EVENTS
5135 " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n"
5136 "\t\t\t Write into this file to define/undefine new trace events.\n"
5138 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
5139 "\t accepts: event-definitions (one definition per line)\n"
5140 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
5141 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
5142 #ifdef CONFIG_HIST_TRIGGERS
5143 "\t s:[synthetic/]<event> <field> [<field>]\n"
5145 "\t -:[<group>/]<event>\n"
5146 #ifdef CONFIG_KPROBE_EVENTS
5147 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
5148 "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n"
5150 #ifdef CONFIG_UPROBE_EVENTS
5151 " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n"
5153 "\t args: <name>=fetcharg[:type]\n"
5154 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
5155 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
5156 "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
5158 "\t $stack<index>, $stack, $retval, $comm,\n"
5160 "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
5161 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
5162 "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
5163 "\t <type>\\[<array-size>\\]\n"
5164 #ifdef CONFIG_HIST_TRIGGERS
5165 "\t field: <stype> <name>;\n"
5166 "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n"
5167 "\t [unsigned] char/int/long\n"
5170 " events/\t\t- Directory containing all trace event subsystems:\n"
5171 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
5172 " events/<system>/\t- Directory containing all trace events for <system>:\n"
5173 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
5175 " filter\t\t- If set, only events passing filter are traced\n"
5176 " events/<system>/<event>/\t- Directory containing control files for\n"
5178 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
5179 " filter\t\t- If set, only events passing filter are traced\n"
5180 " trigger\t\t- If set, a command to perform when event is hit\n"
5181 "\t Format: <trigger>[:count][if <filter>]\n"
5182 "\t trigger: traceon, traceoff\n"
5183 "\t enable_event:<system>:<event>\n"
5184 "\t disable_event:<system>:<event>\n"
5185 #ifdef CONFIG_HIST_TRIGGERS
5186 "\t enable_hist:<system>:<event>\n"
5187 "\t disable_hist:<system>:<event>\n"
5189 #ifdef CONFIG_STACKTRACE
5192 #ifdef CONFIG_TRACER_SNAPSHOT
5195 #ifdef CONFIG_HIST_TRIGGERS
5196 "\t\t hist (see below)\n"
5198 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
5199 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
5200 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
5201 "\t events/block/block_unplug/trigger\n"
5202 "\t The first disables tracing every time block_unplug is hit.\n"
5203 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
5204 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
5205 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
5206 "\t Like function triggers, the counter is only decremented if it\n"
5207 "\t enabled or disabled tracing.\n"
5208 "\t To remove a trigger without a count:\n"
5209 "\t echo '!<trigger> > <system>/<event>/trigger\n"
5210 "\t To remove a trigger with a count:\n"
5211 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
5212 "\t Filters can be ignored when removing a trigger.\n"
5213 #ifdef CONFIG_HIST_TRIGGERS
5214 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
5215 "\t Format: hist:keys=<field1[,field2,...]>\n"
5216 "\t [:values=<field1[,field2,...]>]\n"
5217 "\t [:sort=<field1[,field2,...]>]\n"
5218 "\t [:size=#entries]\n"
5219 "\t [:pause][:continue][:clear]\n"
5220 "\t [:name=histname1]\n"
5221 "\t [:<handler>.<action>]\n"
5222 "\t [if <filter>]\n\n"
5223 "\t When a matching event is hit, an entry is added to a hash\n"
5224 "\t table using the key(s) and value(s) named, and the value of a\n"
5225 "\t sum called 'hitcount' is incremented. Keys and values\n"
5226 "\t correspond to fields in the event's format description. Keys\n"
5227 "\t can be any field, or the special string 'stacktrace'.\n"
5228 "\t Compound keys consisting of up to two fields can be specified\n"
5229 "\t by the 'keys' keyword. Values must correspond to numeric\n"
5230 "\t fields. Sort keys consisting of up to two fields can be\n"
5231 "\t specified using the 'sort' keyword. The sort direction can\n"
5232 "\t be modified by appending '.descending' or '.ascending' to a\n"
5233 "\t sort field. The 'size' parameter can be used to specify more\n"
5234 "\t or fewer than the default 2048 entries for the hashtable size.\n"
5235 "\t If a hist trigger is given a name using the 'name' parameter,\n"
5236 "\t its histogram data will be shared with other triggers of the\n"
5237 "\t same name, and trigger hits will update this common data.\n\n"
5238 "\t Reading the 'hist' file for the event will dump the hash\n"
5239 "\t table in its entirety to stdout. If there are multiple hist\n"
5240 "\t triggers attached to an event, there will be a table for each\n"
5241 "\t trigger in the output. The table displayed for a named\n"
5242 "\t trigger will be the same as any other instance having the\n"
5243 "\t same name. The default format used to display a given field\n"
5244 "\t can be modified by appending any of the following modifiers\n"
5245 "\t to the field name, as applicable:\n\n"
5246 "\t .hex display a number as a hex value\n"
5247 "\t .sym display an address as a symbol\n"
5248 "\t .sym-offset display an address as a symbol and offset\n"
5249 "\t .execname display a common_pid as a program name\n"
5250 "\t .syscall display a syscall id as a syscall name\n"
5251 "\t .log2 display log2 value rather than raw number\n"
5252 "\t .usecs display a common_timestamp in microseconds\n\n"
5253 "\t The 'pause' parameter can be used to pause an existing hist\n"
5254 "\t trigger or to start a hist trigger but not log any events\n"
5255 "\t until told to do so. 'continue' can be used to start or\n"
5256 "\t restart a paused hist trigger.\n\n"
5257 "\t The 'clear' parameter will clear the contents of a running\n"
5258 "\t hist trigger and leave its current paused/active state\n"
5260 "\t The enable_hist and disable_hist triggers can be used to\n"
5261 "\t have one event conditionally start and stop another event's\n"
5262 "\t already-attached hist trigger. The syntax is analogous to\n"
5263 "\t the enable_event and disable_event triggers.\n\n"
5264 "\t Hist trigger handlers and actions are executed whenever a\n"
5265 "\t a histogram entry is added or updated. They take the form:\n\n"
5266 "\t <handler>.<action>\n\n"
5267 "\t The available handlers are:\n\n"
5268 "\t onmatch(matching.event) - invoke on addition or update\n"
5269 "\t onmax(var) - invoke if var exceeds current max\n"
5270 "\t onchange(var) - invoke action if var changes\n\n"
5271 "\t The available actions are:\n\n"
5272 "\t trace(<synthetic_event>,param list) - generate synthetic event\n"
5273 "\t save(field,...) - save current event fields\n"
5274 #ifdef CONFIG_TRACER_SNAPSHOT
5275 "\t snapshot() - snapshot the trace buffer\n\n"
5277 #ifdef CONFIG_SYNTH_EVENTS
5278 " events/synthetic_events\t- Create/append/remove/show synthetic events\n"
5279 "\t Write into this file to define/undefine new synthetic events.\n"
5280 "\t example: echo 'myevent u64 lat; char name[]' >> synthetic_events\n"
5286 tracing_readme_read(struct file *filp, char __user *ubuf,
5287 size_t cnt, loff_t *ppos)
5289 return simple_read_from_buffer(ubuf, cnt, ppos,
5290 readme_msg, strlen(readme_msg));
5293 static const struct file_operations tracing_readme_fops = {
5294 .open = tracing_open_generic,
5295 .read = tracing_readme_read,
5296 .llseek = generic_file_llseek,
5299 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
5303 if (*pos || m->count)
5308 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
5309 if (trace_find_tgid(*ptr))
5316 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
5326 v = saved_tgids_next(m, v, &l);
5334 static void saved_tgids_stop(struct seq_file *m, void *v)
5338 static int saved_tgids_show(struct seq_file *m, void *v)
5340 int pid = (int *)v - tgid_map;
5342 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
5346 static const struct seq_operations tracing_saved_tgids_seq_ops = {
5347 .start = saved_tgids_start,
5348 .stop = saved_tgids_stop,
5349 .next = saved_tgids_next,
5350 .show = saved_tgids_show,
5353 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
5357 ret = tracing_check_open_get_tr(NULL);
5361 return seq_open(filp, &tracing_saved_tgids_seq_ops);
5365 static const struct file_operations tracing_saved_tgids_fops = {
5366 .open = tracing_saved_tgids_open,
5368 .llseek = seq_lseek,
5369 .release = seq_release,
5372 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
5374 unsigned int *ptr = v;
5376 if (*pos || m->count)
5381 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
5383 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
5392 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
5398 arch_spin_lock(&trace_cmdline_lock);
5400 v = &savedcmd->map_cmdline_to_pid[0];
5402 v = saved_cmdlines_next(m, v, &l);
5410 static void saved_cmdlines_stop(struct seq_file *m, void *v)
5412 arch_spin_unlock(&trace_cmdline_lock);
5416 static int saved_cmdlines_show(struct seq_file *m, void *v)
5418 char buf[TASK_COMM_LEN];
5419 unsigned int *pid = v;
5421 __trace_find_cmdline(*pid, buf);
5422 seq_printf(m, "%d %s\n", *pid, buf);
5426 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
5427 .start = saved_cmdlines_start,
5428 .next = saved_cmdlines_next,
5429 .stop = saved_cmdlines_stop,
5430 .show = saved_cmdlines_show,
5433 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
5437 ret = tracing_check_open_get_tr(NULL);
5441 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
5444 static const struct file_operations tracing_saved_cmdlines_fops = {
5445 .open = tracing_saved_cmdlines_open,
5447 .llseek = seq_lseek,
5448 .release = seq_release,
5452 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
5453 size_t cnt, loff_t *ppos)
5458 arch_spin_lock(&trace_cmdline_lock);
5459 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
5460 arch_spin_unlock(&trace_cmdline_lock);
5462 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5465 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
5467 kfree(s->saved_cmdlines);
5468 kfree(s->map_cmdline_to_pid);
5472 static int tracing_resize_saved_cmdlines(unsigned int val)
5474 struct saved_cmdlines_buffer *s, *savedcmd_temp;
5476 s = kmalloc(sizeof(*s), GFP_KERNEL);
5480 if (allocate_cmdlines_buffer(val, s) < 0) {
5485 arch_spin_lock(&trace_cmdline_lock);
5486 savedcmd_temp = savedcmd;
5488 arch_spin_unlock(&trace_cmdline_lock);
5489 free_saved_cmdlines_buffer(savedcmd_temp);
5495 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5501 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5505 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
5506 if (!val || val > PID_MAX_DEFAULT)
5509 ret = tracing_resize_saved_cmdlines((unsigned int)val);
5518 static const struct file_operations tracing_saved_cmdlines_size_fops = {
5519 .open = tracing_open_generic,
5520 .read = tracing_saved_cmdlines_size_read,
5521 .write = tracing_saved_cmdlines_size_write,
5524 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
5525 static union trace_eval_map_item *
5526 update_eval_map(union trace_eval_map_item *ptr)
5528 if (!ptr->map.eval_string) {
5529 if (ptr->tail.next) {
5530 ptr = ptr->tail.next;
5531 /* Set ptr to the next real item (skip head) */
5539 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
5541 union trace_eval_map_item *ptr = v;
5544 * Paranoid! If ptr points to end, we don't want to increment past it.
5545 * This really should never happen.
5548 ptr = update_eval_map(ptr);
5549 if (WARN_ON_ONCE(!ptr))
5553 ptr = update_eval_map(ptr);
5558 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5560 union trace_eval_map_item *v;
5563 mutex_lock(&trace_eval_mutex);
5565 v = trace_eval_maps;
5569 while (v && l < *pos) {
5570 v = eval_map_next(m, v, &l);
5576 static void eval_map_stop(struct seq_file *m, void *v)
5578 mutex_unlock(&trace_eval_mutex);
5581 static int eval_map_show(struct seq_file *m, void *v)
5583 union trace_eval_map_item *ptr = v;
5585 seq_printf(m, "%s %ld (%s)\n",
5586 ptr->map.eval_string, ptr->map.eval_value,
5592 static const struct seq_operations tracing_eval_map_seq_ops = {
5593 .start = eval_map_start,
5594 .next = eval_map_next,
5595 .stop = eval_map_stop,
5596 .show = eval_map_show,
5599 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5603 ret = tracing_check_open_get_tr(NULL);
5607 return seq_open(filp, &tracing_eval_map_seq_ops);
5610 static const struct file_operations tracing_eval_map_fops = {
5611 .open = tracing_eval_map_open,
5613 .llseek = seq_lseek,
5614 .release = seq_release,
5617 static inline union trace_eval_map_item *
5618 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5620 /* Return tail of array given the head */
5621 return ptr + ptr->head.length + 1;
5625 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5628 struct trace_eval_map **stop;
5629 struct trace_eval_map **map;
5630 union trace_eval_map_item *map_array;
5631 union trace_eval_map_item *ptr;
5636 * The trace_eval_maps contains the map plus a head and tail item,
5637 * where the head holds the module and length of array, and the
5638 * tail holds a pointer to the next list.
5640 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5642 pr_warn("Unable to allocate trace eval mapping\n");
5646 mutex_lock(&trace_eval_mutex);
5648 if (!trace_eval_maps)
5649 trace_eval_maps = map_array;
5651 ptr = trace_eval_maps;
5653 ptr = trace_eval_jmp_to_tail(ptr);
5654 if (!ptr->tail.next)
5656 ptr = ptr->tail.next;
5659 ptr->tail.next = map_array;
5661 map_array->head.mod = mod;
5662 map_array->head.length = len;
5665 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5666 map_array->map = **map;
5669 memset(map_array, 0, sizeof(*map_array));
5671 mutex_unlock(&trace_eval_mutex);
5674 static void trace_create_eval_file(struct dentry *d_tracer)
5676 trace_create_file("eval_map", 0444, d_tracer,
5677 NULL, &tracing_eval_map_fops);
5680 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5681 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5682 static inline void trace_insert_eval_map_file(struct module *mod,
5683 struct trace_eval_map **start, int len) { }
5684 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5686 static void trace_insert_eval_map(struct module *mod,
5687 struct trace_eval_map **start, int len)
5689 struct trace_eval_map **map;
5696 trace_event_eval_update(map, len);
5698 trace_insert_eval_map_file(mod, start, len);
5702 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5703 size_t cnt, loff_t *ppos)
5705 struct trace_array *tr = filp->private_data;
5706 char buf[MAX_TRACER_SIZE+2];
5709 mutex_lock(&trace_types_lock);
5710 r = sprintf(buf, "%s\n", tr->current_trace->name);
5711 mutex_unlock(&trace_types_lock);
5713 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5716 int tracer_init(struct tracer *t, struct trace_array *tr)
5718 tracing_reset_online_cpus(&tr->array_buffer);
5722 static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
5726 for_each_tracing_cpu(cpu)
5727 per_cpu_ptr(buf->data, cpu)->entries = val;
5730 #ifdef CONFIG_TRACER_MAX_TRACE
5731 /* resize @tr's buffer to the size of @size_tr's entries */
5732 static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
5733 struct array_buffer *size_buf, int cpu_id)
5737 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5738 for_each_tracing_cpu(cpu) {
5739 ret = ring_buffer_resize(trace_buf->buffer,
5740 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5743 per_cpu_ptr(trace_buf->data, cpu)->entries =
5744 per_cpu_ptr(size_buf->data, cpu)->entries;
5747 ret = ring_buffer_resize(trace_buf->buffer,
5748 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5750 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5751 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5756 #endif /* CONFIG_TRACER_MAX_TRACE */
5758 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5759 unsigned long size, int cpu)
5764 * If kernel or user changes the size of the ring buffer
5765 * we use the size that was given, and we can forget about
5766 * expanding it later.
5768 ring_buffer_expanded = true;
5770 /* May be called before buffers are initialized */
5771 if (!tr->array_buffer.buffer)
5774 ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
5778 #ifdef CONFIG_TRACER_MAX_TRACE
5779 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5780 !tr->current_trace->use_max_tr)
5783 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5785 int r = resize_buffer_duplicate_size(&tr->array_buffer,
5786 &tr->array_buffer, cpu);
5789 * AARGH! We are left with different
5790 * size max buffer!!!!
5791 * The max buffer is our "snapshot" buffer.
5792 * When a tracer needs a snapshot (one of the
5793 * latency tracers), it swaps the max buffer
5794 * with the saved snap shot. We succeeded to
5795 * update the size of the main buffer, but failed to
5796 * update the size of the max buffer. But when we tried
5797 * to reset the main buffer to the original size, we
5798 * failed there too. This is very unlikely to
5799 * happen, but if it does, warn and kill all
5803 tracing_disabled = 1;
5808 if (cpu == RING_BUFFER_ALL_CPUS)
5809 set_buffer_entries(&tr->max_buffer, size);
5811 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5814 #endif /* CONFIG_TRACER_MAX_TRACE */
5816 if (cpu == RING_BUFFER_ALL_CPUS)
5817 set_buffer_entries(&tr->array_buffer, size);
5819 per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
5824 ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5825 unsigned long size, int cpu_id)
5829 mutex_lock(&trace_types_lock);
5831 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5832 /* make sure, this cpu is enabled in the mask */
5833 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5839 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5844 mutex_unlock(&trace_types_lock);
5851 * tracing_update_buffers - used by tracing facility to expand ring buffers
5853 * To save on memory when the tracing is never used on a system with it
5854 * configured in. The ring buffers are set to a minimum size. But once
5855 * a user starts to use the tracing facility, then they need to grow
5856 * to their default size.
5858 * This function is to be called when a tracer is about to be used.
5860 int tracing_update_buffers(void)
5864 mutex_lock(&trace_types_lock);
5865 if (!ring_buffer_expanded)
5866 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5867 RING_BUFFER_ALL_CPUS);
5868 mutex_unlock(&trace_types_lock);
5873 struct trace_option_dentry;
5876 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5879 * Used to clear out the tracer before deletion of an instance.
5880 * Must have trace_types_lock held.
5882 static void tracing_set_nop(struct trace_array *tr)
5884 if (tr->current_trace == &nop_trace)
5887 tr->current_trace->enabled--;
5889 if (tr->current_trace->reset)
5890 tr->current_trace->reset(tr);
5892 tr->current_trace = &nop_trace;
5895 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5897 /* Only enable if the directory has been created already. */
5901 create_trace_option_files(tr, t);
5904 int tracing_set_tracer(struct trace_array *tr, const char *buf)
5907 #ifdef CONFIG_TRACER_MAX_TRACE
5912 mutex_lock(&trace_types_lock);
5914 if (!ring_buffer_expanded) {
5915 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5916 RING_BUFFER_ALL_CPUS);
5922 for (t = trace_types; t; t = t->next) {
5923 if (strcmp(t->name, buf) == 0)
5930 if (t == tr->current_trace)
5933 #ifdef CONFIG_TRACER_SNAPSHOT
5934 if (t->use_max_tr) {
5935 arch_spin_lock(&tr->max_lock);
5936 if (tr->cond_snapshot)
5938 arch_spin_unlock(&tr->max_lock);
5943 /* Some tracers won't work on kernel command line */
5944 if (system_state < SYSTEM_RUNNING && t->noboot) {
5945 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5950 /* Some tracers are only allowed for the top level buffer */
5951 if (!trace_ok_for_array(t, tr)) {
5956 /* If trace pipe files are being read, we can't change the tracer */
5957 if (tr->trace_ref) {
5962 trace_branch_disable();
5964 tr->current_trace->enabled--;
5966 if (tr->current_trace->reset)
5967 tr->current_trace->reset(tr);
5969 /* Current trace needs to be nop_trace before synchronize_rcu */
5970 tr->current_trace = &nop_trace;
5972 #ifdef CONFIG_TRACER_MAX_TRACE
5973 had_max_tr = tr->allocated_snapshot;
5975 if (had_max_tr && !t->use_max_tr) {
5977 * We need to make sure that the update_max_tr sees that
5978 * current_trace changed to nop_trace to keep it from
5979 * swapping the buffers after we resize it.
5980 * The update_max_tr is called from interrupts disabled
5981 * so a synchronized_sched() is sufficient.
5988 #ifdef CONFIG_TRACER_MAX_TRACE
5989 if (t->use_max_tr && !had_max_tr) {
5990 ret = tracing_alloc_snapshot_instance(tr);
5997 ret = tracer_init(t, tr);
6002 tr->current_trace = t;
6003 tr->current_trace->enabled++;
6004 trace_branch_enable(tr);
6006 mutex_unlock(&trace_types_lock);
6012 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
6013 size_t cnt, loff_t *ppos)
6015 struct trace_array *tr = filp->private_data;
6016 char buf[MAX_TRACER_SIZE+1];
6023 if (cnt > MAX_TRACER_SIZE)
6024 cnt = MAX_TRACER_SIZE;
6026 if (copy_from_user(buf, ubuf, cnt))
6031 /* strip ending whitespace. */
6032 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
6035 err = tracing_set_tracer(tr, buf);
6045 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
6046 size_t cnt, loff_t *ppos)
6051 r = snprintf(buf, sizeof(buf), "%ld\n",
6052 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
6053 if (r > sizeof(buf))
6055 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6059 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
6060 size_t cnt, loff_t *ppos)
6065 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6075 tracing_thresh_read(struct file *filp, char __user *ubuf,
6076 size_t cnt, loff_t *ppos)
6078 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
6082 tracing_thresh_write(struct file *filp, const char __user *ubuf,
6083 size_t cnt, loff_t *ppos)
6085 struct trace_array *tr = filp->private_data;
6088 mutex_lock(&trace_types_lock);
6089 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
6093 if (tr->current_trace->update_thresh) {
6094 ret = tr->current_trace->update_thresh(tr);
6101 mutex_unlock(&trace_types_lock);
6106 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6109 tracing_max_lat_read(struct file *filp, char __user *ubuf,
6110 size_t cnt, loff_t *ppos)
6112 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
6116 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
6117 size_t cnt, loff_t *ppos)
6119 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
6124 static int tracing_open_pipe(struct inode *inode, struct file *filp)
6126 struct trace_array *tr = inode->i_private;
6127 struct trace_iterator *iter;
6130 ret = tracing_check_open_get_tr(tr);
6134 mutex_lock(&trace_types_lock);
6136 /* create a buffer to store the information to pass to userspace */
6137 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6140 __trace_array_put(tr);
6144 trace_seq_init(&iter->seq);
6145 iter->trace = tr->current_trace;
6147 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
6152 /* trace pipe does not show start of buffer */
6153 cpumask_setall(iter->started);
6155 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
6156 iter->iter_flags |= TRACE_FILE_LAT_FMT;
6158 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6159 if (trace_clocks[tr->clock_id].in_ns)
6160 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
6163 iter->array_buffer = &tr->array_buffer;
6164 iter->cpu_file = tracing_get_cpu(inode);
6165 mutex_init(&iter->mutex);
6166 filp->private_data = iter;
6168 if (iter->trace->pipe_open)
6169 iter->trace->pipe_open(iter);
6171 nonseekable_open(inode, filp);
6175 mutex_unlock(&trace_types_lock);
6180 __trace_array_put(tr);
6181 mutex_unlock(&trace_types_lock);
6185 static int tracing_release_pipe(struct inode *inode, struct file *file)
6187 struct trace_iterator *iter = file->private_data;
6188 struct trace_array *tr = inode->i_private;
6190 mutex_lock(&trace_types_lock);
6194 if (iter->trace->pipe_close)
6195 iter->trace->pipe_close(iter);
6197 mutex_unlock(&trace_types_lock);
6199 free_cpumask_var(iter->started);
6200 mutex_destroy(&iter->mutex);
6203 trace_array_put(tr);
6209 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
6211 struct trace_array *tr = iter->tr;
6213 /* Iterators are static, they should be filled or empty */
6214 if (trace_buffer_iter(iter, iter->cpu_file))
6215 return EPOLLIN | EPOLLRDNORM;
6217 if (tr->trace_flags & TRACE_ITER_BLOCK)
6219 * Always select as readable when in blocking mode
6221 return EPOLLIN | EPOLLRDNORM;
6223 return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file,
6228 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
6230 struct trace_iterator *iter = filp->private_data;
6232 return trace_poll(iter, filp, poll_table);
6235 /* Must be called with iter->mutex held. */
6236 static int tracing_wait_pipe(struct file *filp)
6238 struct trace_iterator *iter = filp->private_data;
6241 while (trace_empty(iter)) {
6243 if ((filp->f_flags & O_NONBLOCK)) {
6248 * We block until we read something and tracing is disabled.
6249 * We still block if tracing is disabled, but we have never
6250 * read anything. This allows a user to cat this file, and
6251 * then enable tracing. But after we have read something,
6252 * we give an EOF when tracing is again disabled.
6254 * iter->pos will be 0 if we haven't read anything.
6256 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
6259 mutex_unlock(&iter->mutex);
6261 ret = wait_on_pipe(iter, 0);
6263 mutex_lock(&iter->mutex);
6276 tracing_read_pipe(struct file *filp, char __user *ubuf,
6277 size_t cnt, loff_t *ppos)
6279 struct trace_iterator *iter = filp->private_data;
6283 * Avoid more than one consumer on a single file descriptor
6284 * This is just a matter of traces coherency, the ring buffer itself
6287 mutex_lock(&iter->mutex);
6289 /* return any leftover data */
6290 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6294 trace_seq_init(&iter->seq);
6296 if (iter->trace->read) {
6297 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
6303 sret = tracing_wait_pipe(filp);
6307 /* stop when tracing is finished */
6308 if (trace_empty(iter)) {
6313 if (cnt >= PAGE_SIZE)
6314 cnt = PAGE_SIZE - 1;
6316 /* reset all but tr, trace, and overruns */
6317 memset(&iter->seq, 0,
6318 sizeof(struct trace_iterator) -
6319 offsetof(struct trace_iterator, seq));
6320 cpumask_clear(iter->started);
6321 trace_seq_init(&iter->seq);
6324 trace_event_read_lock();
6325 trace_access_lock(iter->cpu_file);
6326 while (trace_find_next_entry_inc(iter) != NULL) {
6327 enum print_line_t ret;
6328 int save_len = iter->seq.seq.len;
6330 ret = print_trace_line(iter);
6331 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6332 /* don't print partial lines */
6333 iter->seq.seq.len = save_len;
6336 if (ret != TRACE_TYPE_NO_CONSUME)
6337 trace_consume(iter);
6339 if (trace_seq_used(&iter->seq) >= cnt)
6343 * Setting the full flag means we reached the trace_seq buffer
6344 * size and we should leave by partial output condition above.
6345 * One of the trace_seq_* functions is not used properly.
6347 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
6350 trace_access_unlock(iter->cpu_file);
6351 trace_event_read_unlock();
6353 /* Now copy what we have to the user */
6354 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
6355 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
6356 trace_seq_init(&iter->seq);
6359 * If there was nothing to send to user, in spite of consuming trace
6360 * entries, go back to wait for more entries.
6366 mutex_unlock(&iter->mutex);
6371 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
6374 __free_page(spd->pages[idx]);
6378 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
6384 /* Seq buffer is page-sized, exactly what we need. */
6386 save_len = iter->seq.seq.len;
6387 ret = print_trace_line(iter);
6389 if (trace_seq_has_overflowed(&iter->seq)) {
6390 iter->seq.seq.len = save_len;
6395 * This should not be hit, because it should only
6396 * be set if the iter->seq overflowed. But check it
6397 * anyway to be safe.
6399 if (ret == TRACE_TYPE_PARTIAL_LINE) {
6400 iter->seq.seq.len = save_len;
6404 count = trace_seq_used(&iter->seq) - save_len;
6407 iter->seq.seq.len = save_len;
6411 if (ret != TRACE_TYPE_NO_CONSUME)
6412 trace_consume(iter);
6414 if (!trace_find_next_entry_inc(iter)) {
6424 static ssize_t tracing_splice_read_pipe(struct file *filp,
6426 struct pipe_inode_info *pipe,
6430 struct page *pages_def[PIPE_DEF_BUFFERS];
6431 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6432 struct trace_iterator *iter = filp->private_data;
6433 struct splice_pipe_desc spd = {
6435 .partial = partial_def,
6436 .nr_pages = 0, /* This gets updated below. */
6437 .nr_pages_max = PIPE_DEF_BUFFERS,
6438 .ops = &default_pipe_buf_ops,
6439 .spd_release = tracing_spd_release_pipe,
6445 if (splice_grow_spd(pipe, &spd))
6448 mutex_lock(&iter->mutex);
6450 if (iter->trace->splice_read) {
6451 ret = iter->trace->splice_read(iter, filp,
6452 ppos, pipe, len, flags);
6457 ret = tracing_wait_pipe(filp);
6461 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
6466 trace_event_read_lock();
6467 trace_access_lock(iter->cpu_file);
6469 /* Fill as many pages as possible. */
6470 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
6471 spd.pages[i] = alloc_page(GFP_KERNEL);
6475 rem = tracing_fill_pipe_page(rem, iter);
6477 /* Copy the data into the page, so we can start over. */
6478 ret = trace_seq_to_buffer(&iter->seq,
6479 page_address(spd.pages[i]),
6480 trace_seq_used(&iter->seq));
6482 __free_page(spd.pages[i]);
6485 spd.partial[i].offset = 0;
6486 spd.partial[i].len = trace_seq_used(&iter->seq);
6488 trace_seq_init(&iter->seq);
6491 trace_access_unlock(iter->cpu_file);
6492 trace_event_read_unlock();
6493 mutex_unlock(&iter->mutex);
6498 ret = splice_to_pipe(pipe, &spd);
6502 splice_shrink_spd(&spd);
6506 mutex_unlock(&iter->mutex);
6511 tracing_entries_read(struct file *filp, char __user *ubuf,
6512 size_t cnt, loff_t *ppos)
6514 struct inode *inode = file_inode(filp);
6515 struct trace_array *tr = inode->i_private;
6516 int cpu = tracing_get_cpu(inode);
6521 mutex_lock(&trace_types_lock);
6523 if (cpu == RING_BUFFER_ALL_CPUS) {
6524 int cpu, buf_size_same;
6529 /* check if all cpu sizes are same */
6530 for_each_tracing_cpu(cpu) {
6531 /* fill in the size from first enabled cpu */
6533 size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries;
6534 if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) {
6540 if (buf_size_same) {
6541 if (!ring_buffer_expanded)
6542 r = sprintf(buf, "%lu (expanded: %lu)\n",
6544 trace_buf_size >> 10);
6546 r = sprintf(buf, "%lu\n", size >> 10);
6548 r = sprintf(buf, "X\n");
6550 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10);
6552 mutex_unlock(&trace_types_lock);
6554 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6559 tracing_entries_write(struct file *filp, const char __user *ubuf,
6560 size_t cnt, loff_t *ppos)
6562 struct inode *inode = file_inode(filp);
6563 struct trace_array *tr = inode->i_private;
6567 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6571 /* must have at least 1 entry */
6575 /* value is in KB */
6577 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6587 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6588 size_t cnt, loff_t *ppos)
6590 struct trace_array *tr = filp->private_data;
6593 unsigned long size = 0, expanded_size = 0;
6595 mutex_lock(&trace_types_lock);
6596 for_each_tracing_cpu(cpu) {
6597 size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10;
6598 if (!ring_buffer_expanded)
6599 expanded_size += trace_buf_size >> 10;
6601 if (ring_buffer_expanded)
6602 r = sprintf(buf, "%lu\n", size);
6604 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6605 mutex_unlock(&trace_types_lock);
6607 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6611 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6612 size_t cnt, loff_t *ppos)
6615 * There is no need to read what the user has written, this function
6616 * is just to make sure that there is no error when "echo" is used
6625 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6627 struct trace_array *tr = inode->i_private;
6629 /* disable tracing ? */
6630 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6631 tracer_tracing_off(tr);
6632 /* resize the ring buffer to 0 */
6633 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6635 trace_array_put(tr);
6641 tracing_mark_write(struct file *filp, const char __user *ubuf,
6642 size_t cnt, loff_t *fpos)
6644 struct trace_array *tr = filp->private_data;
6645 struct ring_buffer_event *event;
6646 enum event_trigger_type tt = ETT_NONE;
6647 struct trace_buffer *buffer;
6648 struct print_entry *entry;
6649 unsigned long irq_flags;
6654 /* Used in tracing_mark_raw_write() as well */
6655 #define FAULTED_STR "<faulted>"
6656 #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */
6658 if (tracing_disabled)
6661 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6664 if (cnt > TRACE_BUF_SIZE)
6665 cnt = TRACE_BUF_SIZE;
6667 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6669 local_save_flags(irq_flags);
6670 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6672 /* If less than "<faulted>", then make sure we can still add that */
6673 if (cnt < FAULTED_SIZE)
6674 size += FAULTED_SIZE - cnt;
6676 buffer = tr->array_buffer.buffer;
6677 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6678 irq_flags, preempt_count());
6679 if (unlikely(!event))
6680 /* Ring buffer disabled, return as if not open for write */
6683 entry = ring_buffer_event_data(event);
6684 entry->ip = _THIS_IP_;
6686 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6688 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6694 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6695 /* do not add \n before testing triggers, but add \0 */
6696 entry->buf[cnt] = '\0';
6697 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6700 if (entry->buf[cnt - 1] != '\n') {
6701 entry->buf[cnt] = '\n';
6702 entry->buf[cnt + 1] = '\0';
6704 entry->buf[cnt] = '\0';
6706 if (static_branch_unlikely(&trace_marker_exports_enabled))
6707 ftrace_exports(event, TRACE_EXPORT_MARKER);
6708 __buffer_unlock_commit(buffer, event);
6711 event_triggers_post_call(tr->trace_marker_file, tt);
6719 /* Limit it for now to 3K (including tag) */
6720 #define RAW_DATA_MAX_SIZE (1024*3)
6723 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6724 size_t cnt, loff_t *fpos)
6726 struct trace_array *tr = filp->private_data;
6727 struct ring_buffer_event *event;
6728 struct trace_buffer *buffer;
6729 struct raw_data_entry *entry;
6730 unsigned long irq_flags;
6735 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6737 if (tracing_disabled)
6740 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6743 /* The marker must at least have a tag id */
6744 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6747 if (cnt > TRACE_BUF_SIZE)
6748 cnt = TRACE_BUF_SIZE;
6750 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6752 local_save_flags(irq_flags);
6753 size = sizeof(*entry) + cnt;
6754 if (cnt < FAULT_SIZE_ID)
6755 size += FAULT_SIZE_ID - cnt;
6757 buffer = tr->array_buffer.buffer;
6758 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6759 irq_flags, preempt_count());
6761 /* Ring buffer disabled, return as if not open for write */
6764 entry = ring_buffer_event_data(event);
6766 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6769 memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE);
6774 __buffer_unlock_commit(buffer, event);
6782 static int tracing_clock_show(struct seq_file *m, void *v)
6784 struct trace_array *tr = m->private;
6787 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6789 "%s%s%s%s", i ? " " : "",
6790 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6791 i == tr->clock_id ? "]" : "");
6797 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6801 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6802 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6805 if (i == ARRAY_SIZE(trace_clocks))
6808 mutex_lock(&trace_types_lock);
6812 ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func);
6815 * New clock may not be consistent with the previous clock.
6816 * Reset the buffer so that it doesn't have incomparable timestamps.
6818 tracing_reset_online_cpus(&tr->array_buffer);
6820 #ifdef CONFIG_TRACER_MAX_TRACE
6821 if (tr->max_buffer.buffer)
6822 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6823 tracing_reset_online_cpus(&tr->max_buffer);
6826 mutex_unlock(&trace_types_lock);
6831 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6832 size_t cnt, loff_t *fpos)
6834 struct seq_file *m = filp->private_data;
6835 struct trace_array *tr = m->private;
6837 const char *clockstr;
6840 if (cnt >= sizeof(buf))
6843 if (copy_from_user(buf, ubuf, cnt))
6848 clockstr = strstrip(buf);
6850 ret = tracing_set_clock(tr, clockstr);
6859 static int tracing_clock_open(struct inode *inode, struct file *file)
6861 struct trace_array *tr = inode->i_private;
6864 ret = tracing_check_open_get_tr(tr);
6868 ret = single_open(file, tracing_clock_show, inode->i_private);
6870 trace_array_put(tr);
6875 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6877 struct trace_array *tr = m->private;
6879 mutex_lock(&trace_types_lock);
6881 if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer))
6882 seq_puts(m, "delta [absolute]\n");
6884 seq_puts(m, "[delta] absolute\n");
6886 mutex_unlock(&trace_types_lock);
6891 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6893 struct trace_array *tr = inode->i_private;
6896 ret = tracing_check_open_get_tr(tr);
6900 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6902 trace_array_put(tr);
6907 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6911 mutex_lock(&trace_types_lock);
6913 if (abs && tr->time_stamp_abs_ref++)
6917 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6922 if (--tr->time_stamp_abs_ref)
6926 ring_buffer_set_time_stamp_abs(tr->array_buffer.buffer, abs);
6928 #ifdef CONFIG_TRACER_MAX_TRACE
6929 if (tr->max_buffer.buffer)
6930 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6933 mutex_unlock(&trace_types_lock);
6938 struct ftrace_buffer_info {
6939 struct trace_iterator iter;
6941 unsigned int spare_cpu;
6945 #ifdef CONFIG_TRACER_SNAPSHOT
6946 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6948 struct trace_array *tr = inode->i_private;
6949 struct trace_iterator *iter;
6953 ret = tracing_check_open_get_tr(tr);
6957 if (file->f_mode & FMODE_READ) {
6958 iter = __tracing_open(inode, file, true);
6960 ret = PTR_ERR(iter);
6962 /* Writes still need the seq_file to hold the private data */
6964 m = kzalloc(sizeof(*m), GFP_KERNEL);
6967 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6975 iter->array_buffer = &tr->max_buffer;
6976 iter->cpu_file = tracing_get_cpu(inode);
6978 file->private_data = m;
6982 trace_array_put(tr);
6988 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6991 struct seq_file *m = filp->private_data;
6992 struct trace_iterator *iter = m->private;
6993 struct trace_array *tr = iter->tr;
6997 ret = tracing_update_buffers();
7001 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7005 mutex_lock(&trace_types_lock);
7007 if (tr->current_trace->use_max_tr) {
7012 arch_spin_lock(&tr->max_lock);
7013 if (tr->cond_snapshot)
7015 arch_spin_unlock(&tr->max_lock);
7021 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7025 if (tr->allocated_snapshot)
7029 /* Only allow per-cpu swap if the ring buffer supports it */
7030 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
7031 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
7036 if (tr->allocated_snapshot)
7037 ret = resize_buffer_duplicate_size(&tr->max_buffer,
7038 &tr->array_buffer, iter->cpu_file);
7040 ret = tracing_alloc_snapshot_instance(tr);
7043 local_irq_disable();
7044 /* Now, we're going to swap */
7045 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7046 update_max_tr(tr, current, smp_processor_id(), NULL);
7048 update_max_tr_single(tr, current, iter->cpu_file);
7052 if (tr->allocated_snapshot) {
7053 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
7054 tracing_reset_online_cpus(&tr->max_buffer);
7056 tracing_reset_cpu(&tr->max_buffer, iter->cpu_file);
7066 mutex_unlock(&trace_types_lock);
7070 static int tracing_snapshot_release(struct inode *inode, struct file *file)
7072 struct seq_file *m = file->private_data;
7075 ret = tracing_release(inode, file);
7077 if (file->f_mode & FMODE_READ)
7080 /* If write only, the seq_file is just a stub */
7088 static int tracing_buffers_open(struct inode *inode, struct file *filp);
7089 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
7090 size_t count, loff_t *ppos);
7091 static int tracing_buffers_release(struct inode *inode, struct file *file);
7092 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7093 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
7095 static int snapshot_raw_open(struct inode *inode, struct file *filp)
7097 struct ftrace_buffer_info *info;
7100 /* The following checks for tracefs lockdown */
7101 ret = tracing_buffers_open(inode, filp);
7105 info = filp->private_data;
7107 if (info->iter.trace->use_max_tr) {
7108 tracing_buffers_release(inode, filp);
7112 info->iter.snapshot = true;
7113 info->iter.array_buffer = &info->iter.tr->max_buffer;
7118 #endif /* CONFIG_TRACER_SNAPSHOT */
7121 static const struct file_operations tracing_thresh_fops = {
7122 .open = tracing_open_generic,
7123 .read = tracing_thresh_read,
7124 .write = tracing_thresh_write,
7125 .llseek = generic_file_llseek,
7128 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
7129 static const struct file_operations tracing_max_lat_fops = {
7130 .open = tracing_open_generic,
7131 .read = tracing_max_lat_read,
7132 .write = tracing_max_lat_write,
7133 .llseek = generic_file_llseek,
7137 static const struct file_operations set_tracer_fops = {
7138 .open = tracing_open_generic,
7139 .read = tracing_set_trace_read,
7140 .write = tracing_set_trace_write,
7141 .llseek = generic_file_llseek,
7144 static const struct file_operations tracing_pipe_fops = {
7145 .open = tracing_open_pipe,
7146 .poll = tracing_poll_pipe,
7147 .read = tracing_read_pipe,
7148 .splice_read = tracing_splice_read_pipe,
7149 .release = tracing_release_pipe,
7150 .llseek = no_llseek,
7153 static const struct file_operations tracing_entries_fops = {
7154 .open = tracing_open_generic_tr,
7155 .read = tracing_entries_read,
7156 .write = tracing_entries_write,
7157 .llseek = generic_file_llseek,
7158 .release = tracing_release_generic_tr,
7161 static const struct file_operations tracing_total_entries_fops = {
7162 .open = tracing_open_generic_tr,
7163 .read = tracing_total_entries_read,
7164 .llseek = generic_file_llseek,
7165 .release = tracing_release_generic_tr,
7168 static const struct file_operations tracing_free_buffer_fops = {
7169 .open = tracing_open_generic_tr,
7170 .write = tracing_free_buffer_write,
7171 .release = tracing_free_buffer_release,
7174 static const struct file_operations tracing_mark_fops = {
7175 .open = tracing_open_generic_tr,
7176 .write = tracing_mark_write,
7177 .llseek = generic_file_llseek,
7178 .release = tracing_release_generic_tr,
7181 static const struct file_operations tracing_mark_raw_fops = {
7182 .open = tracing_open_generic_tr,
7183 .write = tracing_mark_raw_write,
7184 .llseek = generic_file_llseek,
7185 .release = tracing_release_generic_tr,
7188 static const struct file_operations trace_clock_fops = {
7189 .open = tracing_clock_open,
7191 .llseek = seq_lseek,
7192 .release = tracing_single_release_tr,
7193 .write = tracing_clock_write,
7196 static const struct file_operations trace_time_stamp_mode_fops = {
7197 .open = tracing_time_stamp_mode_open,
7199 .llseek = seq_lseek,
7200 .release = tracing_single_release_tr,
7203 #ifdef CONFIG_TRACER_SNAPSHOT
7204 static const struct file_operations snapshot_fops = {
7205 .open = tracing_snapshot_open,
7207 .write = tracing_snapshot_write,
7208 .llseek = tracing_lseek,
7209 .release = tracing_snapshot_release,
7212 static const struct file_operations snapshot_raw_fops = {
7213 .open = snapshot_raw_open,
7214 .read = tracing_buffers_read,
7215 .release = tracing_buffers_release,
7216 .splice_read = tracing_buffers_splice_read,
7217 .llseek = no_llseek,
7220 #endif /* CONFIG_TRACER_SNAPSHOT */
7222 #define TRACING_LOG_ERRS_MAX 8
7223 #define TRACING_LOG_LOC_MAX 128
7225 #define CMD_PREFIX " Command: "
7228 const char **errs; /* ptr to loc-specific array of err strings */
7229 u8 type; /* index into errs -> specific err string */
7230 u8 pos; /* MAX_FILTER_STR_VAL = 256 */
7234 struct tracing_log_err {
7235 struct list_head list;
7236 struct err_info info;
7237 char loc[TRACING_LOG_LOC_MAX]; /* err location */
7238 char cmd[MAX_FILTER_STR_VAL]; /* what caused err */
7241 static DEFINE_MUTEX(tracing_err_log_lock);
7243 static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
7245 struct tracing_log_err *err;
7247 if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) {
7248 err = kzalloc(sizeof(*err), GFP_KERNEL);
7250 err = ERR_PTR(-ENOMEM);
7251 tr->n_err_log_entries++;
7256 err = list_first_entry(&tr->err_log, struct tracing_log_err, list);
7257 list_del(&err->list);
7263 * err_pos - find the position of a string within a command for error careting
7264 * @cmd: The tracing command that caused the error
7265 * @str: The string to position the caret at within @cmd
7267 * Finds the position of the first occurence of @str within @cmd. The
7268 * return value can be passed to tracing_log_err() for caret placement
7271 * Returns the index within @cmd of the first occurence of @str or 0
7272 * if @str was not found.
7274 unsigned int err_pos(char *cmd, const char *str)
7278 if (WARN_ON(!strlen(cmd)))
7281 found = strstr(cmd, str);
7289 * tracing_log_err - write an error to the tracing error log
7290 * @tr: The associated trace array for the error (NULL for top level array)
7291 * @loc: A string describing where the error occurred
7292 * @cmd: The tracing command that caused the error
7293 * @errs: The array of loc-specific static error strings
7294 * @type: The index into errs[], which produces the specific static err string
7295 * @pos: The position the caret should be placed in the cmd
7297 * Writes an error into tracing/error_log of the form:
7299 * <loc>: error: <text>
7303 * tracing/error_log is a small log file containing the last
7304 * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated
7305 * unless there has been a tracing error, and the error log can be
7306 * cleared and have its memory freed by writing the empty string in
7307 * truncation mode to it i.e. echo > tracing/error_log.
7309 * NOTE: the @errs array along with the @type param are used to
7310 * produce a static error string - this string is not copied and saved
7311 * when the error is logged - only a pointer to it is saved. See
7312 * existing callers for examples of how static strings are typically
7313 * defined for use with tracing_log_err().
7315 void tracing_log_err(struct trace_array *tr,
7316 const char *loc, const char *cmd,
7317 const char **errs, u8 type, u8 pos)
7319 struct tracing_log_err *err;
7324 mutex_lock(&tracing_err_log_lock);
7325 err = get_tracing_log_err(tr);
7326 if (PTR_ERR(err) == -ENOMEM) {
7327 mutex_unlock(&tracing_err_log_lock);
7331 snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc);
7332 snprintf(err->cmd, MAX_FILTER_STR_VAL,"\n" CMD_PREFIX "%s\n", cmd);
7334 err->info.errs = errs;
7335 err->info.type = type;
7336 err->info.pos = pos;
7337 err->info.ts = local_clock();
7339 list_add_tail(&err->list, &tr->err_log);
7340 mutex_unlock(&tracing_err_log_lock);
7343 static void clear_tracing_err_log(struct trace_array *tr)
7345 struct tracing_log_err *err, *next;
7347 mutex_lock(&tracing_err_log_lock);
7348 list_for_each_entry_safe(err, next, &tr->err_log, list) {
7349 list_del(&err->list);
7353 tr->n_err_log_entries = 0;
7354 mutex_unlock(&tracing_err_log_lock);
7357 static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos)
7359 struct trace_array *tr = m->private;
7361 mutex_lock(&tracing_err_log_lock);
7363 return seq_list_start(&tr->err_log, *pos);
7366 static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos)
7368 struct trace_array *tr = m->private;
7370 return seq_list_next(v, &tr->err_log, pos);
7373 static void tracing_err_log_seq_stop(struct seq_file *m, void *v)
7375 mutex_unlock(&tracing_err_log_lock);
7378 static void tracing_err_log_show_pos(struct seq_file *m, u8 pos)
7382 for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++)
7384 for (i = 0; i < pos; i++)
7389 static int tracing_err_log_seq_show(struct seq_file *m, void *v)
7391 struct tracing_log_err *err = v;
7394 const char *err_text = err->info.errs[err->info.type];
7395 u64 sec = err->info.ts;
7398 nsec = do_div(sec, NSEC_PER_SEC);
7399 seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000,
7400 err->loc, err_text);
7401 seq_printf(m, "%s", err->cmd);
7402 tracing_err_log_show_pos(m, err->info.pos);
7408 static const struct seq_operations tracing_err_log_seq_ops = {
7409 .start = tracing_err_log_seq_start,
7410 .next = tracing_err_log_seq_next,
7411 .stop = tracing_err_log_seq_stop,
7412 .show = tracing_err_log_seq_show
7415 static int tracing_err_log_open(struct inode *inode, struct file *file)
7417 struct trace_array *tr = inode->i_private;
7420 ret = tracing_check_open_get_tr(tr);
7424 /* If this file was opened for write, then erase contents */
7425 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
7426 clear_tracing_err_log(tr);
7428 if (file->f_mode & FMODE_READ) {
7429 ret = seq_open(file, &tracing_err_log_seq_ops);
7431 struct seq_file *m = file->private_data;
7434 trace_array_put(tr);
7440 static ssize_t tracing_err_log_write(struct file *file,
7441 const char __user *buffer,
7442 size_t count, loff_t *ppos)
7447 static int tracing_err_log_release(struct inode *inode, struct file *file)
7449 struct trace_array *tr = inode->i_private;
7451 trace_array_put(tr);
7453 if (file->f_mode & FMODE_READ)
7454 seq_release(inode, file);
7459 static const struct file_operations tracing_err_log_fops = {
7460 .open = tracing_err_log_open,
7461 .write = tracing_err_log_write,
7463 .llseek = seq_lseek,
7464 .release = tracing_err_log_release,
7467 static int tracing_buffers_open(struct inode *inode, struct file *filp)
7469 struct trace_array *tr = inode->i_private;
7470 struct ftrace_buffer_info *info;
7473 ret = tracing_check_open_get_tr(tr);
7477 info = kvzalloc(sizeof(*info), GFP_KERNEL);
7479 trace_array_put(tr);
7483 mutex_lock(&trace_types_lock);
7486 info->iter.cpu_file = tracing_get_cpu(inode);
7487 info->iter.trace = tr->current_trace;
7488 info->iter.array_buffer = &tr->array_buffer;
7490 /* Force reading ring buffer for first read */
7491 info->read = (unsigned int)-1;
7493 filp->private_data = info;
7497 mutex_unlock(&trace_types_lock);
7499 ret = nonseekable_open(inode, filp);
7501 trace_array_put(tr);
7507 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
7509 struct ftrace_buffer_info *info = filp->private_data;
7510 struct trace_iterator *iter = &info->iter;
7512 return trace_poll(iter, filp, poll_table);
7516 tracing_buffers_read(struct file *filp, char __user *ubuf,
7517 size_t count, loff_t *ppos)
7519 struct ftrace_buffer_info *info = filp->private_data;
7520 struct trace_iterator *iter = &info->iter;
7527 #ifdef CONFIG_TRACER_MAX_TRACE
7528 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7533 info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer,
7535 if (IS_ERR(info->spare)) {
7536 ret = PTR_ERR(info->spare);
7539 info->spare_cpu = iter->cpu_file;
7545 /* Do we have previous read data to read? */
7546 if (info->read < PAGE_SIZE)
7550 trace_access_lock(iter->cpu_file);
7551 ret = ring_buffer_read_page(iter->array_buffer->buffer,
7555 trace_access_unlock(iter->cpu_file);
7558 if (trace_empty(iter)) {
7559 if ((filp->f_flags & O_NONBLOCK))
7562 ret = wait_on_pipe(iter, 0);
7573 size = PAGE_SIZE - info->read;
7577 ret = copy_to_user(ubuf, info->spare + info->read, size);
7589 static int tracing_buffers_release(struct inode *inode, struct file *file)
7591 struct ftrace_buffer_info *info = file->private_data;
7592 struct trace_iterator *iter = &info->iter;
7594 mutex_lock(&trace_types_lock);
7596 iter->tr->trace_ref--;
7598 __trace_array_put(iter->tr);
7601 ring_buffer_free_read_page(iter->array_buffer->buffer,
7602 info->spare_cpu, info->spare);
7605 mutex_unlock(&trace_types_lock);
7611 struct trace_buffer *buffer;
7614 refcount_t refcount;
7617 static void buffer_ref_release(struct buffer_ref *ref)
7619 if (!refcount_dec_and_test(&ref->refcount))
7621 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
7625 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
7626 struct pipe_buffer *buf)
7628 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7630 buffer_ref_release(ref);
7634 static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
7635 struct pipe_buffer *buf)
7637 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
7639 if (refcount_read(&ref->refcount) > INT_MAX/2)
7642 refcount_inc(&ref->refcount);
7646 /* Pipe buffer operations for a buffer. */
7647 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
7648 .release = buffer_pipe_buf_release,
7649 .get = buffer_pipe_buf_get,
7653 * Callback from splice_to_pipe(), if we need to release some pages
7654 * at the end of the spd in case we error'ed out in filling the pipe.
7656 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
7658 struct buffer_ref *ref =
7659 (struct buffer_ref *)spd->partial[i].private;
7661 buffer_ref_release(ref);
7662 spd->partial[i].private = 0;
7666 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
7667 struct pipe_inode_info *pipe, size_t len,
7670 struct ftrace_buffer_info *info = file->private_data;
7671 struct trace_iterator *iter = &info->iter;
7672 struct partial_page partial_def[PIPE_DEF_BUFFERS];
7673 struct page *pages_def[PIPE_DEF_BUFFERS];
7674 struct splice_pipe_desc spd = {
7676 .partial = partial_def,
7677 .nr_pages_max = PIPE_DEF_BUFFERS,
7678 .ops = &buffer_pipe_buf_ops,
7679 .spd_release = buffer_spd_release,
7681 struct buffer_ref *ref;
7685 #ifdef CONFIG_TRACER_MAX_TRACE
7686 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
7690 if (*ppos & (PAGE_SIZE - 1))
7693 if (len & (PAGE_SIZE - 1)) {
7694 if (len < PAGE_SIZE)
7699 if (splice_grow_spd(pipe, &spd))
7703 trace_access_lock(iter->cpu_file);
7704 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7706 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
7710 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
7716 refcount_set(&ref->refcount, 1);
7717 ref->buffer = iter->array_buffer->buffer;
7718 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
7719 if (IS_ERR(ref->page)) {
7720 ret = PTR_ERR(ref->page);
7725 ref->cpu = iter->cpu_file;
7727 r = ring_buffer_read_page(ref->buffer, &ref->page,
7728 len, iter->cpu_file, 1);
7730 ring_buffer_free_read_page(ref->buffer, ref->cpu,
7736 page = virt_to_page(ref->page);
7738 spd.pages[i] = page;
7739 spd.partial[i].len = PAGE_SIZE;
7740 spd.partial[i].offset = 0;
7741 spd.partial[i].private = (unsigned long)ref;
7745 entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file);
7748 trace_access_unlock(iter->cpu_file);
7751 /* did we read anything? */
7752 if (!spd.nr_pages) {
7757 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
7760 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
7767 ret = splice_to_pipe(pipe, &spd);
7769 splice_shrink_spd(&spd);
7774 static const struct file_operations tracing_buffers_fops = {
7775 .open = tracing_buffers_open,
7776 .read = tracing_buffers_read,
7777 .poll = tracing_buffers_poll,
7778 .release = tracing_buffers_release,
7779 .splice_read = tracing_buffers_splice_read,
7780 .llseek = no_llseek,
7784 tracing_stats_read(struct file *filp, char __user *ubuf,
7785 size_t count, loff_t *ppos)
7787 struct inode *inode = file_inode(filp);
7788 struct trace_array *tr = inode->i_private;
7789 struct array_buffer *trace_buf = &tr->array_buffer;
7790 int cpu = tracing_get_cpu(inode);
7791 struct trace_seq *s;
7793 unsigned long long t;
7794 unsigned long usec_rem;
7796 s = kmalloc(sizeof(*s), GFP_KERNEL);
7802 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
7803 trace_seq_printf(s, "entries: %ld\n", cnt);
7805 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7806 trace_seq_printf(s, "overrun: %ld\n", cnt);
7808 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7809 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7811 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7812 trace_seq_printf(s, "bytes: %ld\n", cnt);
7814 if (trace_clocks[tr->clock_id].in_ns) {
7815 /* local or global for trace_clock */
7816 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7817 usec_rem = do_div(t, USEC_PER_SEC);
7818 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7821 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7822 usec_rem = do_div(t, USEC_PER_SEC);
7823 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7825 /* counter or tsc mode for trace_clock */
7826 trace_seq_printf(s, "oldest event ts: %llu\n",
7827 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7829 trace_seq_printf(s, "now ts: %llu\n",
7830 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7833 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7834 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7836 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7837 trace_seq_printf(s, "read events: %ld\n", cnt);
7839 count = simple_read_from_buffer(ubuf, count, ppos,
7840 s->buffer, trace_seq_used(s));
7847 static const struct file_operations tracing_stats_fops = {
7848 .open = tracing_open_generic_tr,
7849 .read = tracing_stats_read,
7850 .llseek = generic_file_llseek,
7851 .release = tracing_release_generic_tr,
7854 #ifdef CONFIG_DYNAMIC_FTRACE
7857 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7858 size_t cnt, loff_t *ppos)
7864 /* 256 should be plenty to hold the amount needed */
7865 buf = kmalloc(256, GFP_KERNEL);
7869 r = scnprintf(buf, 256, "%ld pages:%ld groups: %ld\n",
7870 ftrace_update_tot_cnt,
7871 ftrace_number_of_pages,
7872 ftrace_number_of_groups);
7874 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7879 static const struct file_operations tracing_dyn_info_fops = {
7880 .open = tracing_open_generic,
7881 .read = tracing_read_dyn_info,
7882 .llseek = generic_file_llseek,
7884 #endif /* CONFIG_DYNAMIC_FTRACE */
7886 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7888 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7889 struct trace_array *tr, struct ftrace_probe_ops *ops,
7892 tracing_snapshot_instance(tr);
7896 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7897 struct trace_array *tr, struct ftrace_probe_ops *ops,
7900 struct ftrace_func_mapper *mapper = data;
7904 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7914 tracing_snapshot_instance(tr);
7918 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7919 struct ftrace_probe_ops *ops, void *data)
7921 struct ftrace_func_mapper *mapper = data;
7924 seq_printf(m, "%ps:", (void *)ip);
7926 seq_puts(m, "snapshot");
7929 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7932 seq_printf(m, ":count=%ld\n", *count);
7934 seq_puts(m, ":unlimited\n");
7940 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7941 unsigned long ip, void *init_data, void **data)
7943 struct ftrace_func_mapper *mapper = *data;
7946 mapper = allocate_ftrace_func_mapper();
7952 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7956 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7957 unsigned long ip, void *data)
7959 struct ftrace_func_mapper *mapper = data;
7964 free_ftrace_func_mapper(mapper, NULL);
7968 ftrace_func_mapper_remove_ip(mapper, ip);
7971 static struct ftrace_probe_ops snapshot_probe_ops = {
7972 .func = ftrace_snapshot,
7973 .print = ftrace_snapshot_print,
7976 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7977 .func = ftrace_count_snapshot,
7978 .print = ftrace_snapshot_print,
7979 .init = ftrace_snapshot_init,
7980 .free = ftrace_snapshot_free,
7984 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7985 char *glob, char *cmd, char *param, int enable)
7987 struct ftrace_probe_ops *ops;
7988 void *count = (void *)-1;
7995 /* hash funcs only work with set_ftrace_filter */
7999 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
8002 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
8007 number = strsep(¶m, ":");
8009 if (!strlen(number))
8013 * We use the callback data field (which is a pointer)
8016 ret = kstrtoul(number, 0, (unsigned long *)&count);
8021 ret = tracing_alloc_snapshot_instance(tr);
8025 ret = register_ftrace_function_probe(glob, tr, ops, count);
8028 return ret < 0 ? ret : 0;
8031 static struct ftrace_func_command ftrace_snapshot_cmd = {
8033 .func = ftrace_trace_snapshot_callback,
8036 static __init int register_snapshot_cmd(void)
8038 return register_ftrace_command(&ftrace_snapshot_cmd);
8041 static inline __init int register_snapshot_cmd(void) { return 0; }
8042 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
8044 static struct dentry *tracing_get_dentry(struct trace_array *tr)
8046 if (WARN_ON(!tr->dir))
8047 return ERR_PTR(-ENODEV);
8049 /* Top directory uses NULL as the parent */
8050 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
8053 /* All sub buffers have a descriptor */
8057 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
8059 struct dentry *d_tracer;
8062 return tr->percpu_dir;
8064 d_tracer = tracing_get_dentry(tr);
8065 if (IS_ERR(d_tracer))
8068 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
8070 MEM_FAIL(!tr->percpu_dir,
8071 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
8073 return tr->percpu_dir;
8076 static struct dentry *
8077 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
8078 void *data, long cpu, const struct file_operations *fops)
8080 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
8082 if (ret) /* See tracing_get_cpu() */
8083 d_inode(ret)->i_cdev = (void *)(cpu + 1);
8088 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
8090 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
8091 struct dentry *d_cpu;
8092 char cpu_dir[30]; /* 30 characters should be more than enough */
8097 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8098 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8100 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8104 /* per cpu trace_pipe */
8105 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
8106 tr, cpu, &tracing_pipe_fops);
8109 trace_create_cpu_file("trace", 0644, d_cpu,
8110 tr, cpu, &tracing_fops);
8112 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
8113 tr, cpu, &tracing_buffers_fops);
8115 trace_create_cpu_file("stats", 0444, d_cpu,
8116 tr, cpu, &tracing_stats_fops);
8118 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
8119 tr, cpu, &tracing_entries_fops);
8121 #ifdef CONFIG_TRACER_SNAPSHOT
8122 trace_create_cpu_file("snapshot", 0644, d_cpu,
8123 tr, cpu, &snapshot_fops);
8125 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
8126 tr, cpu, &snapshot_raw_fops);
8130 #ifdef CONFIG_FTRACE_SELFTEST
8131 /* Let selftest have access to static functions in this file */
8132 #include "trace_selftest.c"
8136 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
8139 struct trace_option_dentry *topt = filp->private_data;
8142 if (topt->flags->val & topt->opt->bit)
8147 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8151 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
8154 struct trace_option_dentry *topt = filp->private_data;
8158 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8162 if (val != 0 && val != 1)
8165 if (!!(topt->flags->val & topt->opt->bit) != val) {
8166 mutex_lock(&trace_types_lock);
8167 ret = __set_tracer_option(topt->tr, topt->flags,
8169 mutex_unlock(&trace_types_lock);
8180 static const struct file_operations trace_options_fops = {
8181 .open = tracing_open_generic,
8182 .read = trace_options_read,
8183 .write = trace_options_write,
8184 .llseek = generic_file_llseek,
8188 * In order to pass in both the trace_array descriptor as well as the index
8189 * to the flag that the trace option file represents, the trace_array
8190 * has a character array of trace_flags_index[], which holds the index
8191 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
8192 * The address of this character array is passed to the flag option file
8193 * read/write callbacks.
8195 * In order to extract both the index and the trace_array descriptor,
8196 * get_tr_index() uses the following algorithm.
8200 * As the pointer itself contains the address of the index (remember
8203 * Then to get the trace_array descriptor, by subtracting that index
8204 * from the ptr, we get to the start of the index itself.
8206 * ptr - idx == &index[0]
8208 * Then a simple container_of() from that pointer gets us to the
8209 * trace_array descriptor.
8211 static void get_tr_index(void *data, struct trace_array **ptr,
8212 unsigned int *pindex)
8214 *pindex = *(unsigned char *)data;
8216 *ptr = container_of(data - *pindex, struct trace_array,
8221 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
8224 void *tr_index = filp->private_data;
8225 struct trace_array *tr;
8229 get_tr_index(tr_index, &tr, &index);
8231 if (tr->trace_flags & (1 << index))
8236 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
8240 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
8243 void *tr_index = filp->private_data;
8244 struct trace_array *tr;
8249 get_tr_index(tr_index, &tr, &index);
8251 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8255 if (val != 0 && val != 1)
8258 mutex_lock(&event_mutex);
8259 mutex_lock(&trace_types_lock);
8260 ret = set_tracer_flag(tr, 1 << index, val);
8261 mutex_unlock(&trace_types_lock);
8262 mutex_unlock(&event_mutex);
8272 static const struct file_operations trace_options_core_fops = {
8273 .open = tracing_open_generic,
8274 .read = trace_options_core_read,
8275 .write = trace_options_core_write,
8276 .llseek = generic_file_llseek,
8279 struct dentry *trace_create_file(const char *name,
8281 struct dentry *parent,
8283 const struct file_operations *fops)
8287 ret = tracefs_create_file(name, mode, parent, data, fops);
8289 pr_warn("Could not create tracefs '%s' entry\n", name);
8295 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
8297 struct dentry *d_tracer;
8302 d_tracer = tracing_get_dentry(tr);
8303 if (IS_ERR(d_tracer))
8306 tr->options = tracefs_create_dir("options", d_tracer);
8308 pr_warn("Could not create tracefs directory 'options'\n");
8316 create_trace_option_file(struct trace_array *tr,
8317 struct trace_option_dentry *topt,
8318 struct tracer_flags *flags,
8319 struct tracer_opt *opt)
8321 struct dentry *t_options;
8323 t_options = trace_options_init_dentry(tr);
8327 topt->flags = flags;
8331 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
8332 &trace_options_fops);
8337 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
8339 struct trace_option_dentry *topts;
8340 struct trace_options *tr_topts;
8341 struct tracer_flags *flags;
8342 struct tracer_opt *opts;
8349 flags = tracer->flags;
8351 if (!flags || !flags->opts)
8355 * If this is an instance, only create flags for tracers
8356 * the instance may have.
8358 if (!trace_ok_for_array(tracer, tr))
8361 for (i = 0; i < tr->nr_topts; i++) {
8362 /* Make sure there's no duplicate flags. */
8363 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
8369 for (cnt = 0; opts[cnt].name; cnt++)
8372 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
8376 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
8383 tr->topts = tr_topts;
8384 tr->topts[tr->nr_topts].tracer = tracer;
8385 tr->topts[tr->nr_topts].topts = topts;
8388 for (cnt = 0; opts[cnt].name; cnt++) {
8389 create_trace_option_file(tr, &topts[cnt], flags,
8391 MEM_FAIL(topts[cnt].entry == NULL,
8392 "Failed to create trace option: %s",
8397 static struct dentry *
8398 create_trace_option_core_file(struct trace_array *tr,
8399 const char *option, long index)
8401 struct dentry *t_options;
8403 t_options = trace_options_init_dentry(tr);
8407 return trace_create_file(option, 0644, t_options,
8408 (void *)&tr->trace_flags_index[index],
8409 &trace_options_core_fops);
8412 static void create_trace_options_dir(struct trace_array *tr)
8414 struct dentry *t_options;
8415 bool top_level = tr == &global_trace;
8418 t_options = trace_options_init_dentry(tr);
8422 for (i = 0; trace_options[i]; i++) {
8424 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
8425 create_trace_option_core_file(tr, trace_options[i], i);
8430 rb_simple_read(struct file *filp, char __user *ubuf,
8431 size_t cnt, loff_t *ppos)
8433 struct trace_array *tr = filp->private_data;
8437 r = tracer_tracing_is_on(tr);
8438 r = sprintf(buf, "%d\n", r);
8440 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8444 rb_simple_write(struct file *filp, const char __user *ubuf,
8445 size_t cnt, loff_t *ppos)
8447 struct trace_array *tr = filp->private_data;
8448 struct trace_buffer *buffer = tr->array_buffer.buffer;
8452 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8457 mutex_lock(&trace_types_lock);
8458 if (!!val == tracer_tracing_is_on(tr)) {
8459 val = 0; /* do nothing */
8461 tracer_tracing_on(tr);
8462 if (tr->current_trace->start)
8463 tr->current_trace->start(tr);
8465 tracer_tracing_off(tr);
8466 if (tr->current_trace->stop)
8467 tr->current_trace->stop(tr);
8469 mutex_unlock(&trace_types_lock);
8477 static const struct file_operations rb_simple_fops = {
8478 .open = tracing_open_generic_tr,
8479 .read = rb_simple_read,
8480 .write = rb_simple_write,
8481 .release = tracing_release_generic_tr,
8482 .llseek = default_llseek,
8486 buffer_percent_read(struct file *filp, char __user *ubuf,
8487 size_t cnt, loff_t *ppos)
8489 struct trace_array *tr = filp->private_data;
8493 r = tr->buffer_percent;
8494 r = sprintf(buf, "%d\n", r);
8496 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
8500 buffer_percent_write(struct file *filp, const char __user *ubuf,
8501 size_t cnt, loff_t *ppos)
8503 struct trace_array *tr = filp->private_data;
8507 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
8517 tr->buffer_percent = val;
8524 static const struct file_operations buffer_percent_fops = {
8525 .open = tracing_open_generic_tr,
8526 .read = buffer_percent_read,
8527 .write = buffer_percent_write,
8528 .release = tracing_release_generic_tr,
8529 .llseek = default_llseek,
8532 static struct dentry *trace_instance_dir;
8535 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
8538 allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size)
8540 enum ring_buffer_flags rb_flags;
8542 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
8546 buf->buffer = ring_buffer_alloc(size, rb_flags);
8550 buf->data = alloc_percpu(struct trace_array_cpu);
8552 ring_buffer_free(buf->buffer);
8557 /* Allocate the first page for all buffers */
8558 set_buffer_entries(&tr->array_buffer,
8559 ring_buffer_size(tr->array_buffer.buffer, 0));
8564 static int allocate_trace_buffers(struct trace_array *tr, int size)
8568 ret = allocate_trace_buffer(tr, &tr->array_buffer, size);
8572 #ifdef CONFIG_TRACER_MAX_TRACE
8573 ret = allocate_trace_buffer(tr, &tr->max_buffer,
8574 allocate_snapshot ? size : 1);
8575 if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) {
8576 ring_buffer_free(tr->array_buffer.buffer);
8577 tr->array_buffer.buffer = NULL;
8578 free_percpu(tr->array_buffer.data);
8579 tr->array_buffer.data = NULL;
8582 tr->allocated_snapshot = allocate_snapshot;
8585 * Only the top level trace array gets its snapshot allocated
8586 * from the kernel command line.
8588 allocate_snapshot = false;
8594 static void free_trace_buffer(struct array_buffer *buf)
8597 ring_buffer_free(buf->buffer);
8599 free_percpu(buf->data);
8604 static void free_trace_buffers(struct trace_array *tr)
8609 free_trace_buffer(&tr->array_buffer);
8611 #ifdef CONFIG_TRACER_MAX_TRACE
8612 free_trace_buffer(&tr->max_buffer);
8616 static void init_trace_flags_index(struct trace_array *tr)
8620 /* Used by the trace options files */
8621 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
8622 tr->trace_flags_index[i] = i;
8625 static void __update_tracer_options(struct trace_array *tr)
8629 for (t = trace_types; t; t = t->next)
8630 add_tracer_options(tr, t);
8633 static void update_tracer_options(struct trace_array *tr)
8635 mutex_lock(&trace_types_lock);
8636 __update_tracer_options(tr);
8637 mutex_unlock(&trace_types_lock);
8640 /* Must have trace_types_lock held */
8641 struct trace_array *trace_array_find(const char *instance)
8643 struct trace_array *tr, *found = NULL;
8645 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8646 if (tr->name && strcmp(tr->name, instance) == 0) {
8655 struct trace_array *trace_array_find_get(const char *instance)
8657 struct trace_array *tr;
8659 mutex_lock(&trace_types_lock);
8660 tr = trace_array_find(instance);
8663 mutex_unlock(&trace_types_lock);
8668 static int trace_array_create_dir(struct trace_array *tr)
8672 tr->dir = tracefs_create_dir(tr->name, trace_instance_dir);
8676 ret = event_trace_add_tracer(tr->dir, tr);
8678 tracefs_remove(tr->dir);
8680 init_tracer_tracefs(tr, tr->dir);
8681 __update_tracer_options(tr);
8686 static struct trace_array *trace_array_create(const char *name)
8688 struct trace_array *tr;
8692 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
8694 return ERR_PTR(ret);
8696 tr->name = kstrdup(name, GFP_KERNEL);
8700 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
8703 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
8705 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
8707 raw_spin_lock_init(&tr->start_lock);
8709 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8711 tr->current_trace = &nop_trace;
8713 INIT_LIST_HEAD(&tr->systems);
8714 INIT_LIST_HEAD(&tr->events);
8715 INIT_LIST_HEAD(&tr->hist_vars);
8716 INIT_LIST_HEAD(&tr->err_log);
8718 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
8721 if (ftrace_allocate_ftrace_ops(tr) < 0)
8724 ftrace_init_trace_array(tr);
8726 init_trace_flags_index(tr);
8728 if (trace_instance_dir) {
8729 ret = trace_array_create_dir(tr);
8733 __trace_early_add_events(tr);
8735 list_add(&tr->list, &ftrace_trace_arrays);
8742 ftrace_free_ftrace_ops(tr);
8743 free_trace_buffers(tr);
8744 free_cpumask_var(tr->tracing_cpumask);
8748 return ERR_PTR(ret);
8751 static int instance_mkdir(const char *name)
8753 struct trace_array *tr;
8756 mutex_lock(&event_mutex);
8757 mutex_lock(&trace_types_lock);
8760 if (trace_array_find(name))
8763 tr = trace_array_create(name);
8765 ret = PTR_ERR_OR_ZERO(tr);
8768 mutex_unlock(&trace_types_lock);
8769 mutex_unlock(&event_mutex);
8774 * trace_array_get_by_name - Create/Lookup a trace array, given its name.
8775 * @name: The name of the trace array to be looked up/created.
8777 * Returns pointer to trace array with given name.
8778 * NULL, if it cannot be created.
8780 * NOTE: This function increments the reference counter associated with the
8781 * trace array returned. This makes sure it cannot be freed while in use.
8782 * Use trace_array_put() once the trace array is no longer needed.
8783 * If the trace_array is to be freed, trace_array_destroy() needs to
8784 * be called after the trace_array_put(), or simply let user space delete
8785 * it from the tracefs instances directory. But until the
8786 * trace_array_put() is called, user space can not delete it.
8789 struct trace_array *trace_array_get_by_name(const char *name)
8791 struct trace_array *tr;
8793 mutex_lock(&event_mutex);
8794 mutex_lock(&trace_types_lock);
8796 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8797 if (tr->name && strcmp(tr->name, name) == 0)
8801 tr = trace_array_create(name);
8809 mutex_unlock(&trace_types_lock);
8810 mutex_unlock(&event_mutex);
8813 EXPORT_SYMBOL_GPL(trace_array_get_by_name);
8815 static int __remove_instance(struct trace_array *tr)
8819 /* Reference counter for a newly created trace array = 1. */
8820 if (tr->ref > 1 || (tr->current_trace && tr->trace_ref))
8823 list_del(&tr->list);
8825 /* Disable all the flags that were enabled coming in */
8826 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
8827 if ((1 << i) & ZEROED_TRACE_FLAGS)
8828 set_tracer_flag(tr, 1 << i, 0);
8831 tracing_set_nop(tr);
8832 clear_ftrace_function_probes(tr);
8833 event_trace_del_tracer(tr);
8834 ftrace_clear_pids(tr);
8835 ftrace_destroy_function_files(tr);
8836 tracefs_remove(tr->dir);
8837 free_trace_buffers(tr);
8839 for (i = 0; i < tr->nr_topts; i++) {
8840 kfree(tr->topts[i].topts);
8844 free_cpumask_var(tr->tracing_cpumask);
8851 int trace_array_destroy(struct trace_array *this_tr)
8853 struct trace_array *tr;
8859 mutex_lock(&event_mutex);
8860 mutex_lock(&trace_types_lock);
8864 /* Making sure trace array exists before destroying it. */
8865 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8866 if (tr == this_tr) {
8867 ret = __remove_instance(tr);
8872 mutex_unlock(&trace_types_lock);
8873 mutex_unlock(&event_mutex);
8877 EXPORT_SYMBOL_GPL(trace_array_destroy);
8879 static int instance_rmdir(const char *name)
8881 struct trace_array *tr;
8884 mutex_lock(&event_mutex);
8885 mutex_lock(&trace_types_lock);
8888 tr = trace_array_find(name);
8890 ret = __remove_instance(tr);
8892 mutex_unlock(&trace_types_lock);
8893 mutex_unlock(&event_mutex);
8898 static __init void create_trace_instances(struct dentry *d_tracer)
8900 struct trace_array *tr;
8902 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
8905 if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n"))
8908 mutex_lock(&event_mutex);
8909 mutex_lock(&trace_types_lock);
8911 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
8914 if (MEM_FAIL(trace_array_create_dir(tr) < 0,
8915 "Failed to create instance directory\n"))
8919 mutex_unlock(&trace_types_lock);
8920 mutex_unlock(&event_mutex);
8924 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
8926 struct trace_event_file *file;
8929 trace_create_file("available_tracers", 0444, d_tracer,
8930 tr, &show_traces_fops);
8932 trace_create_file("current_tracer", 0644, d_tracer,
8933 tr, &set_tracer_fops);
8935 trace_create_file("tracing_cpumask", 0644, d_tracer,
8936 tr, &tracing_cpumask_fops);
8938 trace_create_file("trace_options", 0644, d_tracer,
8939 tr, &tracing_iter_fops);
8941 trace_create_file("trace", 0644, d_tracer,
8944 trace_create_file("trace_pipe", 0444, d_tracer,
8945 tr, &tracing_pipe_fops);
8947 trace_create_file("buffer_size_kb", 0644, d_tracer,
8948 tr, &tracing_entries_fops);
8950 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
8951 tr, &tracing_total_entries_fops);
8953 trace_create_file("free_buffer", 0200, d_tracer,
8954 tr, &tracing_free_buffer_fops);
8956 trace_create_file("trace_marker", 0220, d_tracer,
8957 tr, &tracing_mark_fops);
8959 file = __find_event_file(tr, "ftrace", "print");
8960 if (file && file->dir)
8961 trace_create_file("trigger", 0644, file->dir, file,
8962 &event_trigger_fops);
8963 tr->trace_marker_file = file;
8965 trace_create_file("trace_marker_raw", 0220, d_tracer,
8966 tr, &tracing_mark_raw_fops);
8968 trace_create_file("trace_clock", 0644, d_tracer, tr,
8971 trace_create_file("tracing_on", 0644, d_tracer,
8972 tr, &rb_simple_fops);
8974 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8975 &trace_time_stamp_mode_fops);
8977 tr->buffer_percent = 50;
8979 trace_create_file("buffer_percent", 0444, d_tracer,
8980 tr, &buffer_percent_fops);
8982 create_trace_options_dir(tr);
8984 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8985 trace_create_maxlat_file(tr, d_tracer);
8988 if (ftrace_create_function_files(tr, d_tracer))
8989 MEM_FAIL(1, "Could not allocate function filter files");
8991 #ifdef CONFIG_TRACER_SNAPSHOT
8992 trace_create_file("snapshot", 0644, d_tracer,
8993 tr, &snapshot_fops);
8996 trace_create_file("error_log", 0644, d_tracer,
8997 tr, &tracing_err_log_fops);
8999 for_each_tracing_cpu(cpu)
9000 tracing_init_tracefs_percpu(tr, cpu);
9002 ftrace_init_tracefs(tr, d_tracer);
9005 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
9007 struct vfsmount *mnt;
9008 struct file_system_type *type;
9011 * To maintain backward compatibility for tools that mount
9012 * debugfs to get to the tracing facility, tracefs is automatically
9013 * mounted to the debugfs/tracing directory.
9015 type = get_fs_type("tracefs");
9018 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
9019 put_filesystem(type);
9028 * tracing_init_dentry - initialize top level trace array
9030 * This is called when creating files or directories in the tracing
9031 * directory. It is called via fs_initcall() by any of the boot up code
9032 * and expects to return the dentry of the top level tracing directory.
9034 int tracing_init_dentry(void)
9036 struct trace_array *tr = &global_trace;
9038 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9039 pr_warn("Tracing disabled due to lockdown\n");
9043 /* The top level trace array uses NULL as parent */
9047 if (WARN_ON(!tracefs_initialized()))
9051 * As there may still be users that expect the tracing
9052 * files to exist in debugfs/tracing, we must automount
9053 * the tracefs file system there, so older tools still
9054 * work with the newer kerenl.
9056 tr->dir = debugfs_create_automount("tracing", NULL,
9057 trace_automount, NULL);
9062 extern struct trace_eval_map *__start_ftrace_eval_maps[];
9063 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
9065 static void __init trace_eval_init(void)
9069 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
9070 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
9073 #ifdef CONFIG_MODULES
9074 static void trace_module_add_evals(struct module *mod)
9076 if (!mod->num_trace_evals)
9080 * Modules with bad taint do not have events created, do
9081 * not bother with enums either.
9083 if (trace_module_has_bad_taint(mod))
9086 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
9089 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
9090 static void trace_module_remove_evals(struct module *mod)
9092 union trace_eval_map_item *map;
9093 union trace_eval_map_item **last = &trace_eval_maps;
9095 if (!mod->num_trace_evals)
9098 mutex_lock(&trace_eval_mutex);
9100 map = trace_eval_maps;
9103 if (map->head.mod == mod)
9105 map = trace_eval_jmp_to_tail(map);
9106 last = &map->tail.next;
9107 map = map->tail.next;
9112 *last = trace_eval_jmp_to_tail(map)->tail.next;
9115 mutex_unlock(&trace_eval_mutex);
9118 static inline void trace_module_remove_evals(struct module *mod) { }
9119 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
9121 static int trace_module_notify(struct notifier_block *self,
9122 unsigned long val, void *data)
9124 struct module *mod = data;
9127 case MODULE_STATE_COMING:
9128 trace_module_add_evals(mod);
9130 case MODULE_STATE_GOING:
9131 trace_module_remove_evals(mod);
9138 static struct notifier_block trace_module_nb = {
9139 .notifier_call = trace_module_notify,
9142 #endif /* CONFIG_MODULES */
9144 static __init int tracer_init_tracefs(void)
9148 trace_access_lock_init();
9150 ret = tracing_init_dentry();
9156 init_tracer_tracefs(&global_trace, NULL);
9157 ftrace_init_tracefs_toplevel(&global_trace, NULL);
9159 trace_create_file("tracing_thresh", 0644, NULL,
9160 &global_trace, &tracing_thresh_fops);
9162 trace_create_file("README", 0444, NULL,
9163 NULL, &tracing_readme_fops);
9165 trace_create_file("saved_cmdlines", 0444, NULL,
9166 NULL, &tracing_saved_cmdlines_fops);
9168 trace_create_file("saved_cmdlines_size", 0644, NULL,
9169 NULL, &tracing_saved_cmdlines_size_fops);
9171 trace_create_file("saved_tgids", 0444, NULL,
9172 NULL, &tracing_saved_tgids_fops);
9176 trace_create_eval_file(NULL);
9178 #ifdef CONFIG_MODULES
9179 register_module_notifier(&trace_module_nb);
9182 #ifdef CONFIG_DYNAMIC_FTRACE
9183 trace_create_file("dyn_ftrace_total_info", 0444, NULL,
9184 NULL, &tracing_dyn_info_fops);
9187 create_trace_instances(NULL);
9189 update_tracer_options(&global_trace);
9194 static int trace_panic_handler(struct notifier_block *this,
9195 unsigned long event, void *unused)
9197 if (ftrace_dump_on_oops)
9198 ftrace_dump(ftrace_dump_on_oops);
9202 static struct notifier_block trace_panic_notifier = {
9203 .notifier_call = trace_panic_handler,
9205 .priority = 150 /* priority: INT_MAX >= x >= 0 */
9208 static int trace_die_handler(struct notifier_block *self,
9214 if (ftrace_dump_on_oops)
9215 ftrace_dump(ftrace_dump_on_oops);
9223 static struct notifier_block trace_die_notifier = {
9224 .notifier_call = trace_die_handler,
9229 * printk is set to max of 1024, we really don't need it that big.
9230 * Nothing should be printing 1000 characters anyway.
9232 #define TRACE_MAX_PRINT 1000
9235 * Define here KERN_TRACE so that we have one place to modify
9236 * it if we decide to change what log level the ftrace dump
9239 #define KERN_TRACE KERN_EMERG
9242 trace_printk_seq(struct trace_seq *s)
9244 /* Probably should print a warning here. */
9245 if (s->seq.len >= TRACE_MAX_PRINT)
9246 s->seq.len = TRACE_MAX_PRINT;
9249 * More paranoid code. Although the buffer size is set to
9250 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
9251 * an extra layer of protection.
9253 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
9254 s->seq.len = s->seq.size - 1;
9256 /* should be zero ended, but we are paranoid. */
9257 s->buffer[s->seq.len] = 0;
9259 printk(KERN_TRACE "%s", s->buffer);
9264 void trace_init_global_iter(struct trace_iterator *iter)
9266 iter->tr = &global_trace;
9267 iter->trace = iter->tr->current_trace;
9268 iter->cpu_file = RING_BUFFER_ALL_CPUS;
9269 iter->array_buffer = &global_trace.array_buffer;
9271 if (iter->trace && iter->trace->open)
9272 iter->trace->open(iter);
9274 /* Annotate start of buffers if we had overruns */
9275 if (ring_buffer_overruns(iter->array_buffer->buffer))
9276 iter->iter_flags |= TRACE_FILE_ANNOTATE;
9278 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
9279 if (trace_clocks[iter->tr->clock_id].in_ns)
9280 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
9283 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
9285 /* use static because iter can be a bit big for the stack */
9286 static struct trace_iterator iter;
9287 static atomic_t dump_running;
9288 struct trace_array *tr = &global_trace;
9289 unsigned int old_userobj;
9290 unsigned long flags;
9293 /* Only allow one dump user at a time. */
9294 if (atomic_inc_return(&dump_running) != 1) {
9295 atomic_dec(&dump_running);
9300 * Always turn off tracing when we dump.
9301 * We don't need to show trace output of what happens
9302 * between multiple crashes.
9304 * If the user does a sysrq-z, then they can re-enable
9305 * tracing with echo 1 > tracing_on.
9309 local_irq_save(flags);
9310 printk_nmi_direct_enter();
9312 /* Simulate the iterator */
9313 trace_init_global_iter(&iter);
9314 /* Can not use kmalloc for iter.temp */
9315 iter.temp = static_temp_buf;
9316 iter.temp_size = STATIC_TEMP_BUF_SIZE;
9318 for_each_tracing_cpu(cpu) {
9319 atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9322 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
9324 /* don't look at user memory in panic mode */
9325 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
9327 switch (oops_dump_mode) {
9329 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9332 iter.cpu_file = raw_smp_processor_id();
9337 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
9338 iter.cpu_file = RING_BUFFER_ALL_CPUS;
9341 printk(KERN_TRACE "Dumping ftrace buffer:\n");
9343 /* Did function tracer already get disabled? */
9344 if (ftrace_is_dead()) {
9345 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
9346 printk("# MAY BE MISSING FUNCTION EVENTS\n");
9350 * We need to stop all tracing on all CPUS to read
9351 * the next buffer. This is a bit expensive, but is
9352 * not done often. We fill all what we can read,
9353 * and then release the locks again.
9356 while (!trace_empty(&iter)) {
9359 printk(KERN_TRACE "---------------------------------\n");
9363 trace_iterator_reset(&iter);
9364 iter.iter_flags |= TRACE_FILE_LAT_FMT;
9366 if (trace_find_next_entry_inc(&iter) != NULL) {
9369 ret = print_trace_line(&iter);
9370 if (ret != TRACE_TYPE_NO_CONSUME)
9371 trace_consume(&iter);
9373 touch_nmi_watchdog();
9375 trace_printk_seq(&iter.seq);
9379 printk(KERN_TRACE " (ftrace buffer empty)\n");
9381 printk(KERN_TRACE "---------------------------------\n");
9384 tr->trace_flags |= old_userobj;
9386 for_each_tracing_cpu(cpu) {
9387 atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
9389 atomic_dec(&dump_running);
9390 printk_nmi_direct_exit();
9391 local_irq_restore(flags);
9393 EXPORT_SYMBOL_GPL(ftrace_dump);
9395 int trace_run_command(const char *buf, int (*createfn)(int, char **))
9402 argv = argv_split(GFP_KERNEL, buf, &argc);
9407 ret = createfn(argc, argv);
9414 #define WRITE_BUFSIZE 4096
9416 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
9417 size_t count, loff_t *ppos,
9418 int (*createfn)(int, char **))
9420 char *kbuf, *buf, *tmp;
9425 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
9429 while (done < count) {
9430 size = count - done;
9432 if (size >= WRITE_BUFSIZE)
9433 size = WRITE_BUFSIZE - 1;
9435 if (copy_from_user(kbuf, buffer + done, size)) {
9442 tmp = strchr(buf, '\n');
9445 size = tmp - buf + 1;
9448 if (done + size < count) {
9451 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
9452 pr_warn("Line length is too long: Should be less than %d\n",
9460 /* Remove comments */
9461 tmp = strchr(buf, '#');
9466 ret = trace_run_command(buf, createfn);
9471 } while (done < count);
9481 __init static int tracer_alloc_buffers(void)
9487 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9488 pr_warn("Tracing disabled due to lockdown\n");
9493 * Make sure we don't accidentally add more trace options
9494 * than we have bits for.
9496 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
9498 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
9501 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9502 goto out_free_buffer_mask;
9504 /* Only allocate trace_printk buffers if a trace_printk exists */
9505 if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
9506 /* Must be called before global_trace.buffer is allocated */
9507 trace_printk_init_buffers();
9509 /* To save memory, keep the ring buffer size to its minimum */
9510 if (ring_buffer_expanded)
9511 ring_buf_size = trace_buf_size;
9515 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
9516 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9518 raw_spin_lock_init(&global_trace.start_lock);
9521 * The prepare callbacks allocates some memory for the ring buffer. We
9522 * don't free the buffer if the CPU goes down. If we were to free
9523 * the buffer, then the user would lose any trace that was in the
9524 * buffer. The memory will be removed once the "instance" is removed.
9526 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
9527 "trace/RB:preapre", trace_rb_cpu_prepare,
9530 goto out_free_cpumask;
9531 /* Used for event triggers */
9533 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
9535 goto out_rm_hp_state;
9537 if (trace_create_savedcmd() < 0)
9538 goto out_free_temp_buffer;
9540 /* TODO: make the number of buffers hot pluggable with CPUS */
9541 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
9542 MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n");
9543 goto out_free_savedcmd;
9546 if (global_trace.buffer_disabled)
9549 if (trace_boot_clock) {
9550 ret = tracing_set_clock(&global_trace, trace_boot_clock);
9552 pr_warn("Trace clock %s not defined, going back to default\n",
9557 * register_tracer() might reference current_trace, so it
9558 * needs to be set before we register anything. This is
9559 * just a bootstrap of current_trace anyway.
9561 global_trace.current_trace = &nop_trace;
9563 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
9565 ftrace_init_global_array_ops(&global_trace);
9567 init_trace_flags_index(&global_trace);
9569 register_tracer(&nop_trace);
9571 /* Function tracing may start here (via kernel command line) */
9572 init_function_trace();
9574 /* All seems OK, enable tracing */
9575 tracing_disabled = 0;
9577 atomic_notifier_chain_register(&panic_notifier_list,
9578 &trace_panic_notifier);
9580 register_die_notifier(&trace_die_notifier);
9582 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
9584 INIT_LIST_HEAD(&global_trace.systems);
9585 INIT_LIST_HEAD(&global_trace.events);
9586 INIT_LIST_HEAD(&global_trace.hist_vars);
9587 INIT_LIST_HEAD(&global_trace.err_log);
9588 list_add(&global_trace.list, &ftrace_trace_arrays);
9590 apply_trace_boot_options();
9592 register_snapshot_cmd();
9597 free_saved_cmdlines_buffer(savedcmd);
9598 out_free_temp_buffer:
9599 ring_buffer_free(temp_buffer);
9601 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9603 free_cpumask_var(global_trace.tracing_cpumask);
9604 out_free_buffer_mask:
9605 free_cpumask_var(tracing_buffer_mask);
9610 void __init early_trace_init(void)
9612 if (tracepoint_printk) {
9613 tracepoint_print_iter =
9614 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
9615 if (MEM_FAIL(!tracepoint_print_iter,
9616 "Failed to allocate trace iterator\n"))
9617 tracepoint_printk = 0;
9619 static_key_enable(&tracepoint_printk_key.key);
9621 tracer_alloc_buffers();
9624 void __init trace_init(void)
9629 __init static int clear_boot_tracer(void)
9632 * The default tracer at boot buffer is an init section.
9633 * This function is called in lateinit. If we did not
9634 * find the boot tracer, then clear it out, to prevent
9635 * later registration from accessing the buffer that is
9636 * about to be freed.
9638 if (!default_bootup_tracer)
9641 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
9642 default_bootup_tracer);
9643 default_bootup_tracer = NULL;
9648 fs_initcall(tracer_init_tracefs);
9649 late_initcall_sync(clear_boot_tracer);
9651 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
9652 __init static int tracing_set_default_clock(void)
9654 /* sched_clock_stable() is determined in late_initcall */
9655 if (!trace_boot_clock && !sched_clock_stable()) {
9656 if (security_locked_down(LOCKDOWN_TRACEFS)) {
9657 pr_warn("Can not set tracing clock due to lockdown\n");
9662 "Unstable clock detected, switching default tracing clock to \"global\"\n"
9663 "If you want to keep using the local clock, then add:\n"
9664 " \"trace_clock=local\"\n"
9665 "on the kernel command line\n");
9666 tracing_set_clock(&global_trace, "global");
9671 late_initcall_sync(tracing_set_default_clock);