ftrace: Add helper function ftrace_hash_move_and_update_ops()
[linux-2.6-block.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
76c813e2 28#include <linux/vmalloc.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
478409dd 43#include <linux/trace.h>
8bd75c77 44#include <linux/sched/rt.h>
86387f7e 45
bc0c38d1 46#include "trace.h"
f0868d1e 47#include "trace_output.h"
bc0c38d1 48
73c5162a
SR
49/*
50 * On boot up, the ring buffer is set to the minimum size, so that
51 * we do not waste memory on systems that are not using tracing.
52 */
55034cd6 53bool ring_buffer_expanded;
73c5162a 54
8e1b82e0
FW
55/*
56 * We need to change this state when a selftest is running.
ff32504f
FW
57 * A selftest will lurk into the ring-buffer to count the
58 * entries inserted during the selftest although some concurrent
5e1607a0 59 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
60 * at the same time, giving false positive or negative results.
61 */
8e1b82e0 62static bool __read_mostly tracing_selftest_running;
ff32504f 63
b2821ae6
SR
64/*
65 * If a tracer is running, we do not want to run SELFTEST.
66 */
020e5f85 67bool __read_mostly tracing_selftest_disabled;
b2821ae6 68
0daa2302
SRRH
69/* Pipe tracepoints to printk */
70struct trace_iterator *tracepoint_print_iter;
71int tracepoint_printk;
42391745 72static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
0daa2302 73
adf9f195
FW
74/* For tracers that don't implement custom flags */
75static struct tracer_opt dummy_tracer_opt[] = {
76 { }
77};
78
8c1a49ae
SRRH
79static int
80dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
81{
82 return 0;
83}
0f048701 84
7ffbd48d
SR
85/*
86 * To prevent the comm cache from being overwritten when no
87 * tracing is active, only save the comm when a trace event
88 * occurred.
89 */
90static DEFINE_PER_CPU(bool, trace_cmdline_save);
91
0f048701
SR
92/*
93 * Kill all tracing for good (never come back).
94 * It is initialized to 1 but will turn to zero if the initialization
95 * of the tracer is successful. But that is the only place that sets
96 * this back to zero.
97 */
4fd27358 98static int tracing_disabled = 1;
0f048701 99
955b61e5 100cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 101
944ac425
SR
102/*
103 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
104 *
105 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
106 * is set, then ftrace_dump is called. This will output the contents
107 * of the ftrace buffers to the console. This is very useful for
108 * capturing traces that lead to crashes and outputing it to a
109 * serial console.
110 *
111 * It is default off, but you can enable it with either specifying
112 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
113 * /proc/sys/kernel/ftrace_dump_on_oops
114 * Set 1 if you want to dump buffers of all CPUs
115 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 116 */
cecbca96
FW
117
118enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 119
de7edd31
SRRH
120/* When set, tracing will stop when a WARN*() is hit */
121int __disable_trace_on_warning;
122
9828413d
SRRH
123#ifdef CONFIG_TRACE_ENUM_MAP_FILE
124/* Map of enums to their values, for "enum_map" file */
125struct trace_enum_map_head {
126 struct module *mod;
127 unsigned long length;
128};
129
130union trace_enum_map_item;
131
132struct trace_enum_map_tail {
133 /*
134 * "end" is first and points to NULL as it must be different
135 * than "mod" or "enum_string"
136 */
137 union trace_enum_map_item *next;
138 const char *end; /* points to NULL */
139};
140
141static DEFINE_MUTEX(trace_enum_mutex);
142
143/*
144 * The trace_enum_maps are saved in an array with two extra elements,
145 * one at the beginning, and one at the end. The beginning item contains
146 * the count of the saved maps (head.length), and the module they
147 * belong to if not built in (head.mod). The ending item contains a
148 * pointer to the next array of saved enum_map items.
149 */
150union trace_enum_map_item {
151 struct trace_enum_map map;
152 struct trace_enum_map_head head;
153 struct trace_enum_map_tail tail;
154};
155
156static union trace_enum_map_item *trace_enum_maps;
157#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
158
607e2ea1 159static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 160
ee6c2c1b
LZ
161#define MAX_TRACER_SIZE 100
162static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 163static char *default_bootup_tracer;
d9e54076 164
55034cd6
SRRH
165static bool allocate_snapshot;
166
1beee96b 167static int __init set_cmdline_ftrace(char *str)
d9e54076 168{
67012ab1 169 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 170 default_bootup_tracer = bootup_tracer_buf;
73c5162a 171 /* We are using ftrace early, expand it */
55034cd6 172 ring_buffer_expanded = true;
d9e54076
PZ
173 return 1;
174}
1beee96b 175__setup("ftrace=", set_cmdline_ftrace);
d9e54076 176
944ac425
SR
177static int __init set_ftrace_dump_on_oops(char *str)
178{
cecbca96
FW
179 if (*str++ != '=' || !*str) {
180 ftrace_dump_on_oops = DUMP_ALL;
181 return 1;
182 }
183
184 if (!strcmp("orig_cpu", str)) {
185 ftrace_dump_on_oops = DUMP_ORIG;
186 return 1;
187 }
188
189 return 0;
944ac425
SR
190}
191__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 192
de7edd31
SRRH
193static int __init stop_trace_on_warning(char *str)
194{
933ff9f2
LCG
195 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
196 __disable_trace_on_warning = 1;
de7edd31
SRRH
197 return 1;
198}
933ff9f2 199__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 200
3209cff4 201static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
202{
203 allocate_snapshot = true;
204 /* We also need the main ring buffer expanded */
205 ring_buffer_expanded = true;
206 return 1;
207}
3209cff4 208__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 209
7bcfaf54
SR
210
211static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
7bcfaf54
SR
212
213static int __init set_trace_boot_options(char *str)
214{
67012ab1 215 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
216 return 0;
217}
218__setup("trace_options=", set_trace_boot_options);
219
e1e232ca
SR
220static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
221static char *trace_boot_clock __initdata;
222
223static int __init set_trace_boot_clock(char *str)
224{
225 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
226 trace_boot_clock = trace_boot_clock_buf;
227 return 0;
228}
229__setup("trace_clock=", set_trace_boot_clock);
230
0daa2302
SRRH
231static int __init set_tracepoint_printk(char *str)
232{
233 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
234 tracepoint_printk = 1;
235 return 1;
236}
237__setup("tp_printk", set_tracepoint_printk);
de7edd31 238
a5a1d1c2 239unsigned long long ns2usecs(u64 nsec)
bc0c38d1
SR
240{
241 nsec += 500;
242 do_div(nsec, 1000);
243 return nsec;
244}
245
983f938a
SRRH
246/* trace_flags holds trace_options default values */
247#define TRACE_DEFAULT_FLAGS \
248 (FUNCTION_DEFAULT_FLAGS | \
249 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
250 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
251 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
252 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
253
16270145
SRRH
254/* trace_options that are only supported by global_trace */
255#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
256 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
257
20550622
SRRH
258/* trace_flags that are default zero for instances */
259#define ZEROED_TRACE_FLAGS \
1e10486f 260 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
16270145 261
4fcdae83 262/*
67d04bb2
JF
263 * The global_trace is the descriptor that holds the top-level tracing
264 * buffers for the live tracing.
4fcdae83 265 */
983f938a
SRRH
266static struct trace_array global_trace = {
267 .trace_flags = TRACE_DEFAULT_FLAGS,
268};
bc0c38d1 269
ae63b31e 270LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 271
ff451961
SRRH
272int trace_array_get(struct trace_array *this_tr)
273{
274 struct trace_array *tr;
275 int ret = -ENODEV;
276
277 mutex_lock(&trace_types_lock);
278 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
279 if (tr == this_tr) {
280 tr->ref++;
281 ret = 0;
282 break;
283 }
284 }
285 mutex_unlock(&trace_types_lock);
286
287 return ret;
288}
289
290static void __trace_array_put(struct trace_array *this_tr)
291{
292 WARN_ON(!this_tr->ref);
293 this_tr->ref--;
294}
295
296void trace_array_put(struct trace_array *this_tr)
297{
298 mutex_lock(&trace_types_lock);
299 __trace_array_put(this_tr);
300 mutex_unlock(&trace_types_lock);
301}
302
2425bcb9 303int call_filter_check_discard(struct trace_event_call *call, void *rec,
f306cc82
TZ
304 struct ring_buffer *buffer,
305 struct ring_buffer_event *event)
306{
307 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
308 !filter_match_preds(call->filter, rec)) {
0fc1b09f 309 __trace_event_discard_commit(buffer, event);
f306cc82
TZ
310 return 1;
311 }
312
313 return 0;
eb02ce01
TZ
314}
315
76c813e2
SRRH
316void trace_free_pid_list(struct trace_pid_list *pid_list)
317{
318 vfree(pid_list->pids);
319 kfree(pid_list);
320}
321
d8275c45
SR
322/**
323 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
324 * @filtered_pids: The list of pids to check
325 * @search_pid: The PID to find in @filtered_pids
326 *
327 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
328 */
329bool
330trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
331{
332 /*
333 * If pid_max changed after filtered_pids was created, we
334 * by default ignore all pids greater than the previous pid_max.
335 */
336 if (search_pid >= filtered_pids->pid_max)
337 return false;
338
339 return test_bit(search_pid, filtered_pids->pids);
340}
341
342/**
343 * trace_ignore_this_task - should a task be ignored for tracing
344 * @filtered_pids: The list of pids to check
345 * @task: The task that should be ignored if not filtered
346 *
347 * Checks if @task should be traced or not from @filtered_pids.
348 * Returns true if @task should *NOT* be traced.
349 * Returns false if @task should be traced.
350 */
351bool
352trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
353{
354 /*
355 * Return false, because if filtered_pids does not exist,
356 * all pids are good to trace.
357 */
358 if (!filtered_pids)
359 return false;
360
361 return !trace_find_filtered_pid(filtered_pids, task->pid);
362}
363
364/**
365 * trace_pid_filter_add_remove - Add or remove a task from a pid_list
366 * @pid_list: The list to modify
367 * @self: The current task for fork or NULL for exit
368 * @task: The task to add or remove
369 *
370 * If adding a task, if @self is defined, the task is only added if @self
371 * is also included in @pid_list. This happens on fork and tasks should
372 * only be added when the parent is listed. If @self is NULL, then the
373 * @task pid will be removed from the list, which would happen on exit
374 * of a task.
375 */
376void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
377 struct task_struct *self,
378 struct task_struct *task)
379{
380 if (!pid_list)
381 return;
382
383 /* For forks, we only add if the forking task is listed */
384 if (self) {
385 if (!trace_find_filtered_pid(pid_list, self->pid))
386 return;
387 }
388
389 /* Sorry, but we don't support pid_max changing after setting */
390 if (task->pid >= pid_list->pid_max)
391 return;
392
393 /* "self" is set for forks, and NULL for exits */
394 if (self)
395 set_bit(task->pid, pid_list->pids);
396 else
397 clear_bit(task->pid, pid_list->pids);
398}
399
5cc8976b
SRRH
400/**
401 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
402 * @pid_list: The pid list to show
403 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
404 * @pos: The position of the file
405 *
406 * This is used by the seq_file "next" operation to iterate the pids
407 * listed in a trace_pid_list structure.
408 *
409 * Returns the pid+1 as we want to display pid of zero, but NULL would
410 * stop the iteration.
411 */
412void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
413{
414 unsigned long pid = (unsigned long)v;
415
416 (*pos)++;
417
418 /* pid already is +1 of the actual prevous bit */
419 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
420
421 /* Return pid + 1 to allow zero to be represented */
422 if (pid < pid_list->pid_max)
423 return (void *)(pid + 1);
424
425 return NULL;
426}
427
428/**
429 * trace_pid_start - Used for seq_file to start reading pid lists
430 * @pid_list: The pid list to show
431 * @pos: The position of the file
432 *
433 * This is used by seq_file "start" operation to start the iteration
434 * of listing pids.
435 *
436 * Returns the pid+1 as we want to display pid of zero, but NULL would
437 * stop the iteration.
438 */
439void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
440{
441 unsigned long pid;
442 loff_t l = 0;
443
444 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
445 if (pid >= pid_list->pid_max)
446 return NULL;
447
448 /* Return pid + 1 so that zero can be the exit value */
449 for (pid++; pid && l < *pos;
450 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
451 ;
452 return (void *)pid;
453}
454
455/**
456 * trace_pid_show - show the current pid in seq_file processing
457 * @m: The seq_file structure to write into
458 * @v: A void pointer of the pid (+1) value to display
459 *
460 * Can be directly used by seq_file operations to display the current
461 * pid value.
462 */
463int trace_pid_show(struct seq_file *m, void *v)
464{
465 unsigned long pid = (unsigned long)v - 1;
466
467 seq_printf(m, "%lu\n", pid);
468 return 0;
469}
470
76c813e2
SRRH
471/* 128 should be much more than enough */
472#define PID_BUF_SIZE 127
473
474int trace_pid_write(struct trace_pid_list *filtered_pids,
475 struct trace_pid_list **new_pid_list,
476 const char __user *ubuf, size_t cnt)
477{
478 struct trace_pid_list *pid_list;
479 struct trace_parser parser;
480 unsigned long val;
481 int nr_pids = 0;
482 ssize_t read = 0;
483 ssize_t ret = 0;
484 loff_t pos;
485 pid_t pid;
486
487 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
488 return -ENOMEM;
489
490 /*
491 * Always recreate a new array. The write is an all or nothing
492 * operation. Always create a new array when adding new pids by
493 * the user. If the operation fails, then the current list is
494 * not modified.
495 */
496 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
497 if (!pid_list)
498 return -ENOMEM;
499
500 pid_list->pid_max = READ_ONCE(pid_max);
501
502 /* Only truncating will shrink pid_max */
503 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
504 pid_list->pid_max = filtered_pids->pid_max;
505
506 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
507 if (!pid_list->pids) {
508 kfree(pid_list);
509 return -ENOMEM;
510 }
511
512 if (filtered_pids) {
513 /* copy the current bits to the new max */
67f20b08
WY
514 for_each_set_bit(pid, filtered_pids->pids,
515 filtered_pids->pid_max) {
76c813e2 516 set_bit(pid, pid_list->pids);
76c813e2
SRRH
517 nr_pids++;
518 }
519 }
520
521 while (cnt > 0) {
522
523 pos = 0;
524
525 ret = trace_get_user(&parser, ubuf, cnt, &pos);
526 if (ret < 0 || !trace_parser_loaded(&parser))
527 break;
528
529 read += ret;
530 ubuf += ret;
531 cnt -= ret;
532
533 parser.buffer[parser.idx] = 0;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566}
567
a5a1d1c2 568static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
569{
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
9457158b 573 if (!buf->buffer)
37886f6a
SR
574 return trace_clock_local();
575
9457158b
AL
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
578
579 return ts;
580}
bc0c38d1 581
a5a1d1c2 582u64 ftrace_now(int cpu)
9457158b
AL
583{
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585}
586
10246fa3
SRRH
587/**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
9036990d
SR
596int tracing_is_enabled(void)
597{
10246fa3
SRRH
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
9036990d
SR
605}
606
4fcdae83 607/*
3928a8a2
SR
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
3f5a54e3
SR
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
4fcdae83 616 */
3928a8a2 617#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 618
3928a8a2 619static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 620
4fcdae83 621/* trace_types holds a link list of available tracers. */
bc0c38d1 622static struct tracer *trace_types __read_mostly;
4fcdae83 623
4fcdae83
SR
624/*
625 * trace_types_lock is used to protect the trace_types list.
4fcdae83 626 */
a8227415 627DEFINE_MUTEX(trace_types_lock);
4fcdae83 628
7e53bd42
LJ
629/*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651#ifdef CONFIG_SMP
652static DECLARE_RWSEM(all_cpu_access_lock);
653static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655static inline void trace_access_lock(int cpu)
656{
ae3b5093 657 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
ae3b5093 663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669}
670
671static inline void trace_access_unlock(int cpu)
672{
ae3b5093 673 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679}
680
681static inline void trace_access_lock_init(void)
682{
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687}
688
689#else
690
691static DEFINE_MUTEX(access_lock);
692
693static inline void trace_access_lock(int cpu)
694{
695 (void)cpu;
696 mutex_lock(&access_lock);
697}
698
699static inline void trace_access_unlock(int cpu)
700{
701 (void)cpu;
702 mutex_unlock(&access_lock);
703}
704
705static inline void trace_access_lock_init(void)
706{
707}
708
709#endif
710
d78a4614
SRRH
711#ifdef CONFIG_STACKTRACE
712static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
2d34f489
SRRH
715static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
73dddbb5
SRRH
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
ca475e83 719
d78a4614
SRRH
720#else
721static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724{
725}
2d34f489
SRRH
726static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
73dddbb5
SRRH
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
ca475e83
SRRH
730{
731}
732
d78a4614
SRRH
733#endif
734
3e9a8aad
SRRH
735static __always_inline void
736trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738{
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743}
744
745static __always_inline struct ring_buffer_event *
746__trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750{
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758}
759
5280bcef 760static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
761{
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775}
776
499e5470
SR
777/**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783void tracing_on(void)
784{
10246fa3 785 tracer_tracing_on(&global_trace);
499e5470
SR
786}
787EXPORT_SYMBOL_GPL(tracing_on);
788
52ffabe3
SRRH
789
790static __always_inline void
791__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792{
793 __this_cpu_write(trace_cmdline_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803}
804
09ae7234
SRRH
805/**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811int __trace_puts(unsigned long ip, const char *str, int size)
812{
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
8abfb872
J
818 int pc;
819
983f938a 820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
821 return 0;
822
8abfb872 823 pc = preempt_count();
09ae7234 824
3132e107
SRRH
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
09ae7234
SRRH
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
09ae7234
SRRH
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
2d34f489 850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
851
852 return size;
853}
854EXPORT_SYMBOL_GPL(__trace_puts);
855
856/**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861int __trace_bputs(unsigned long ip, const char *str)
862{
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
8abfb872
J
868 int pc;
869
983f938a 870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
f0160a5a
J
871 return 0;
872
8abfb872 873 pc = preempt_count();
09ae7234 874
3132e107
SRRH
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
09ae7234
SRRH
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
3e9a8aad
SRRH
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
09ae7234
SRRH
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
2d34f489 890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
09ae7234
SRRH
891
892 return 1;
893}
894EXPORT_SYMBOL_GPL(__trace_bputs);
895
ad909e21
SRRH
896#ifdef CONFIG_TRACER_SNAPSHOT
897/**
898 * trace_snapshot - take a snapshot of the current buffer.
899 *
900 * This causes a swap between the snapshot buffer and the current live
901 * tracing buffer. You can use this to take snapshots of the live
902 * trace when some condition is triggered, but continue to trace.
903 *
904 * Note, make sure to allocate the snapshot with either
905 * a tracing_snapshot_alloc(), or by doing it manually
906 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
907 *
908 * If the snapshot buffer is not allocated, it will stop tracing.
909 * Basically making a permanent snapshot.
910 */
911void tracing_snapshot(void)
912{
913 struct trace_array *tr = &global_trace;
914 struct tracer *tracer = tr->current_trace;
915 unsigned long flags;
916
1b22e382
SRRH
917 if (in_nmi()) {
918 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
919 internal_trace_puts("*** snapshot is being ignored ***\n");
920 return;
921 }
922
ad909e21 923 if (!tr->allocated_snapshot) {
ca268da6
SRRH
924 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
925 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
926 tracing_off();
927 return;
928 }
929
930 /* Note, snapshot can not be used when the tracer uses it */
931 if (tracer->use_max_tr) {
ca268da6
SRRH
932 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
933 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
934 return;
935 }
936
937 local_irq_save(flags);
938 update_max_tr(tr, current, smp_processor_id());
939 local_irq_restore(flags);
940}
1b22e382 941EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
942
943static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
944 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
945static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
946
947static int alloc_snapshot(struct trace_array *tr)
948{
949 int ret;
950
951 if (!tr->allocated_snapshot) {
952
953 /* allocate spare buffer */
954 ret = resize_buffer_duplicate_size(&tr->max_buffer,
955 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
956 if (ret < 0)
957 return ret;
958
959 tr->allocated_snapshot = true;
960 }
961
962 return 0;
963}
964
ad1438a0 965static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
966{
967 /*
968 * We don't free the ring buffer. instead, resize it because
969 * The max_tr ring buffer has some state (e.g. ring->clock) and
970 * we want preserve it.
971 */
972 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
973 set_buffer_entries(&tr->max_buffer, 1);
974 tracing_reset_online_cpus(&tr->max_buffer);
975 tr->allocated_snapshot = false;
976}
ad909e21 977
93e31ffb
TZ
978/**
979 * tracing_alloc_snapshot - allocate snapshot buffer.
980 *
981 * This only allocates the snapshot buffer if it isn't already
982 * allocated - it doesn't also take a snapshot.
983 *
984 * This is meant to be used in cases where the snapshot buffer needs
985 * to be set up for events that can't sleep but need to be able to
986 * trigger a snapshot.
987 */
988int tracing_alloc_snapshot(void)
989{
990 struct trace_array *tr = &global_trace;
991 int ret;
992
993 ret = alloc_snapshot(tr);
994 WARN_ON(ret < 0);
995
996 return ret;
997}
998EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
999
ad909e21
SRRH
1000/**
1001 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
1002 *
1003 * This is similar to trace_snapshot(), but it will allocate the
1004 * snapshot buffer if it isn't already allocated. Use this only
1005 * where it is safe to sleep, as the allocation may sleep.
1006 *
1007 * This causes a swap between the snapshot buffer and the current live
1008 * tracing buffer. You can use this to take snapshots of the live
1009 * trace when some condition is triggered, but continue to trace.
1010 */
1011void tracing_snapshot_alloc(void)
1012{
ad909e21
SRRH
1013 int ret;
1014
93e31ffb
TZ
1015 ret = tracing_alloc_snapshot();
1016 if (ret < 0)
3209cff4 1017 return;
ad909e21
SRRH
1018
1019 tracing_snapshot();
1020}
1b22e382 1021EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1022#else
1023void tracing_snapshot(void)
1024{
1025 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1026}
1b22e382 1027EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
1028int tracing_alloc_snapshot(void)
1029{
1030 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1031 return -ENODEV;
1032}
1033EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
1034void tracing_snapshot_alloc(void)
1035{
1036 /* Give warning */
1037 tracing_snapshot();
1038}
1b22e382 1039EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
1040#endif /* CONFIG_TRACER_SNAPSHOT */
1041
5280bcef 1042static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
1043{
1044 if (tr->trace_buffer.buffer)
1045 ring_buffer_record_off(tr->trace_buffer.buffer);
1046 /*
1047 * This flag is looked at when buffers haven't been allocated
1048 * yet, or by some tracers (like irqsoff), that just want to
1049 * know if the ring buffer has been disabled, but it can handle
1050 * races of where it gets disabled but we still do a record.
1051 * As the check is in the fast path of the tracers, it is more
1052 * important to be fast than accurate.
1053 */
1054 tr->buffer_disabled = 1;
1055 /* Make the flag seen by readers */
1056 smp_wmb();
1057}
1058
499e5470
SR
1059/**
1060 * tracing_off - turn off tracing buffers
1061 *
1062 * This function stops the tracing buffers from recording data.
1063 * It does not disable any overhead the tracers themselves may
1064 * be causing. This function simply causes all recording to
1065 * the ring buffers to fail.
1066 */
1067void tracing_off(void)
1068{
10246fa3 1069 tracer_tracing_off(&global_trace);
499e5470
SR
1070}
1071EXPORT_SYMBOL_GPL(tracing_off);
1072
de7edd31
SRRH
1073void disable_trace_on_warning(void)
1074{
1075 if (__disable_trace_on_warning)
1076 tracing_off();
1077}
1078
10246fa3
SRRH
1079/**
1080 * tracer_tracing_is_on - show real state of ring buffer enabled
1081 * @tr : the trace array to know if ring buffer is enabled
1082 *
1083 * Shows real state of the ring buffer if it is enabled or not.
1084 */
e7c15cd8 1085int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
1086{
1087 if (tr->trace_buffer.buffer)
1088 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1089 return !tr->buffer_disabled;
1090}
1091
499e5470
SR
1092/**
1093 * tracing_is_on - show state of ring buffers enabled
1094 */
1095int tracing_is_on(void)
1096{
10246fa3 1097 return tracer_tracing_is_on(&global_trace);
499e5470
SR
1098}
1099EXPORT_SYMBOL_GPL(tracing_is_on);
1100
3928a8a2 1101static int __init set_buf_size(char *str)
bc0c38d1 1102{
3928a8a2 1103 unsigned long buf_size;
c6caeeb1 1104
bc0c38d1
SR
1105 if (!str)
1106 return 0;
9d612bef 1107 buf_size = memparse(str, &str);
c6caeeb1 1108 /* nr_entries can not be zero */
9d612bef 1109 if (buf_size == 0)
c6caeeb1 1110 return 0;
3928a8a2 1111 trace_buf_size = buf_size;
bc0c38d1
SR
1112 return 1;
1113}
3928a8a2 1114__setup("trace_buf_size=", set_buf_size);
bc0c38d1 1115
0e950173
TB
1116static int __init set_tracing_thresh(char *str)
1117{
87abb3b1 1118 unsigned long threshold;
0e950173
TB
1119 int ret;
1120
1121 if (!str)
1122 return 0;
bcd83ea6 1123 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
1124 if (ret < 0)
1125 return 0;
87abb3b1 1126 tracing_thresh = threshold * 1000;
0e950173
TB
1127 return 1;
1128}
1129__setup("tracing_thresh=", set_tracing_thresh);
1130
57f50be1
SR
1131unsigned long nsecs_to_usecs(unsigned long nsecs)
1132{
1133 return nsecs / 1000;
1134}
1135
a3418a36
SRRH
1136/*
1137 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1138 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
1139 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1140 * of strings in the order that the enums were defined.
1141 */
1142#undef C
1143#define C(a, b) b
1144
4fcdae83 1145/* These must match the bit postions in trace_iterator_flags */
bc0c38d1 1146static const char *trace_options[] = {
a3418a36 1147 TRACE_FLAGS
bc0c38d1
SR
1148 NULL
1149};
1150
5079f326
Z
1151static struct {
1152 u64 (*func)(void);
1153 const char *name;
8be0709f 1154 int in_ns; /* is this clock in nanoseconds? */
5079f326 1155} trace_clocks[] = {
1b3e5c09
TG
1156 { trace_clock_local, "local", 1 },
1157 { trace_clock_global, "global", 1 },
1158 { trace_clock_counter, "counter", 0 },
e7fda6c4 1159 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
1160 { trace_clock, "perf", 1 },
1161 { ktime_get_mono_fast_ns, "mono", 1 },
aabfa5f2 1162 { ktime_get_raw_fast_ns, "mono_raw", 1 },
80ec3552 1163 { ktime_get_boot_fast_ns, "boot", 1 },
8cbd9cc6 1164 ARCH_TRACE_CLOCKS
5079f326
Z
1165};
1166
b63f39ea 1167/*
1168 * trace_parser_get_init - gets the buffer for trace parser
1169 */
1170int trace_parser_get_init(struct trace_parser *parser, int size)
1171{
1172 memset(parser, 0, sizeof(*parser));
1173
1174 parser->buffer = kmalloc(size, GFP_KERNEL);
1175 if (!parser->buffer)
1176 return 1;
1177
1178 parser->size = size;
1179 return 0;
1180}
1181
1182/*
1183 * trace_parser_put - frees the buffer for trace parser
1184 */
1185void trace_parser_put(struct trace_parser *parser)
1186{
1187 kfree(parser->buffer);
0e684b65 1188 parser->buffer = NULL;
b63f39ea 1189}
1190
1191/*
1192 * trace_get_user - reads the user input string separated by space
1193 * (matched by isspace(ch))
1194 *
1195 * For each string found the 'struct trace_parser' is updated,
1196 * and the function returns.
1197 *
1198 * Returns number of bytes read.
1199 *
1200 * See kernel/trace/trace.h for 'struct trace_parser' details.
1201 */
1202int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1203 size_t cnt, loff_t *ppos)
1204{
1205 char ch;
1206 size_t read = 0;
1207 ssize_t ret;
1208
1209 if (!*ppos)
1210 trace_parser_clear(parser);
1211
1212 ret = get_user(ch, ubuf++);
1213 if (ret)
1214 goto out;
1215
1216 read++;
1217 cnt--;
1218
1219 /*
1220 * The parser is not finished with the last write,
1221 * continue reading the user input without skipping spaces.
1222 */
1223 if (!parser->cont) {
1224 /* skip white space */
1225 while (cnt && isspace(ch)) {
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229 read++;
1230 cnt--;
1231 }
1232
1233 /* only spaces were written */
1234 if (isspace(ch)) {
1235 *ppos += read;
1236 ret = read;
1237 goto out;
1238 }
1239
1240 parser->idx = 0;
1241 }
1242
1243 /* read the non-space input */
1244 while (cnt && !isspace(ch)) {
3c235a33 1245 if (parser->idx < parser->size - 1)
b63f39ea 1246 parser->buffer[parser->idx++] = ch;
1247 else {
1248 ret = -EINVAL;
1249 goto out;
1250 }
1251 ret = get_user(ch, ubuf++);
1252 if (ret)
1253 goto out;
1254 read++;
1255 cnt--;
1256 }
1257
1258 /* We either got finished input or we have to wait for another call. */
1259 if (isspace(ch)) {
1260 parser->buffer[parser->idx] = 0;
1261 parser->cont = false;
057db848 1262 } else if (parser->idx < parser->size - 1) {
b63f39ea 1263 parser->cont = true;
1264 parser->buffer[parser->idx++] = ch;
057db848
SR
1265 } else {
1266 ret = -EINVAL;
1267 goto out;
b63f39ea 1268 }
1269
1270 *ppos += read;
1271 ret = read;
1272
1273out:
1274 return ret;
1275}
1276
3a161d99 1277/* TODO add a seq_buf_to_buffer() */
b8b94265 1278static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
1279{
1280 int len;
3c56819b 1281
5ac48378 1282 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
1283 return -EBUSY;
1284
5ac48378 1285 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1286 if (cnt > len)
1287 cnt = len;
3a161d99 1288 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1289
3a161d99 1290 s->seq.readpos += cnt;
3c56819b
EGM
1291 return cnt;
1292}
1293
0e950173
TB
1294unsigned long __read_mostly tracing_thresh;
1295
5d4a9dba 1296#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1297/*
1298 * Copy the new maximum trace into the separate maximum-trace
1299 * structure. (this way the maximum trace is permanently saved,
1300 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1301 */
1302static void
1303__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1304{
12883efb
SRRH
1305 struct trace_buffer *trace_buf = &tr->trace_buffer;
1306 struct trace_buffer *max_buf = &tr->max_buffer;
1307 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1308 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1309
12883efb
SRRH
1310 max_buf->cpu = cpu;
1311 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1312
6d9b3fa5 1313 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1314 max_data->critical_start = data->critical_start;
1315 max_data->critical_end = data->critical_end;
5d4a9dba 1316
1acaa1b2 1317 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1318 max_data->pid = tsk->pid;
f17a5194
SRRH
1319 /*
1320 * If tsk == current, then use current_uid(), as that does not use
1321 * RCU. The irq tracer can be called out of RCU scope.
1322 */
1323 if (tsk == current)
1324 max_data->uid = current_uid();
1325 else
1326 max_data->uid = task_uid(tsk);
1327
8248ac05
SR
1328 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1329 max_data->policy = tsk->policy;
1330 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1331
1332 /* record this tasks comm */
1333 tracing_record_cmdline(tsk);
1334}
1335
4fcdae83
SR
1336/**
1337 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1338 * @tr: tracer
1339 * @tsk: the task with the latency
1340 * @cpu: The cpu that initiated the trace.
1341 *
1342 * Flip the buffers between the @tr and the max_tr and record information
1343 * about which task was the cause of this latency.
1344 */
e309b41d 1345void
bc0c38d1
SR
1346update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1347{
2721e72d 1348 struct ring_buffer *buf;
bc0c38d1 1349
2b6080f2 1350 if (tr->stop_count)
b8de7bd1
SR
1351 return;
1352
4c11d7ae 1353 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1354
45ad21ca 1355 if (!tr->allocated_snapshot) {
debdd57f 1356 /* Only the nop tracer should hit this when disabling */
2b6080f2 1357 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1358 return;
debdd57f 1359 }
34600f0e 1360
0b9b12c1 1361 arch_spin_lock(&tr->max_lock);
3928a8a2 1362
12883efb
SRRH
1363 buf = tr->trace_buffer.buffer;
1364 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1365 tr->max_buffer.buffer = buf;
3928a8a2 1366
bc0c38d1 1367 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1368 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1369}
1370
1371/**
1372 * update_max_tr_single - only copy one trace over, and reset the rest
1373 * @tr - tracer
1374 * @tsk - task with the latency
1375 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1376 *
1377 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1378 */
e309b41d 1379void
bc0c38d1
SR
1380update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1381{
3928a8a2 1382 int ret;
bc0c38d1 1383
2b6080f2 1384 if (tr->stop_count)
b8de7bd1
SR
1385 return;
1386
4c11d7ae 1387 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1388 if (!tr->allocated_snapshot) {
2930e04d 1389 /* Only the nop tracer should hit this when disabling */
9e8529af 1390 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1391 return;
2930e04d 1392 }
ef710e10 1393
0b9b12c1 1394 arch_spin_lock(&tr->max_lock);
bc0c38d1 1395
12883efb 1396 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1397
e8165dbb
SR
1398 if (ret == -EBUSY) {
1399 /*
1400 * We failed to swap the buffer due to a commit taking
1401 * place on this CPU. We fail to record, but we reset
1402 * the max trace buffer (no one writes directly to it)
1403 * and flag that it failed.
1404 */
12883efb 1405 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1406 "Failed to swap buffers due to commit in progress\n");
1407 }
1408
e8165dbb 1409 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1410
1411 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1412 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1413}
5d4a9dba 1414#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1415
e30f53aa 1416static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1417{
15693458
SRRH
1418 /* Iterators are static, they should be filled or empty */
1419 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1420 return 0;
0d5c6e1c 1421
e30f53aa
RV
1422 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1423 full);
0d5c6e1c
SR
1424}
1425
f4e781c0 1426#ifdef CONFIG_FTRACE_STARTUP_TEST
9afecfbb
SRV
1427static bool selftests_can_run;
1428
1429struct trace_selftests {
1430 struct list_head list;
1431 struct tracer *type;
1432};
1433
1434static LIST_HEAD(postponed_selftests);
1435
1436static int save_selftest(struct tracer *type)
1437{
1438 struct trace_selftests *selftest;
1439
1440 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1441 if (!selftest)
1442 return -ENOMEM;
1443
1444 selftest->type = type;
1445 list_add(&selftest->list, &postponed_selftests);
1446 return 0;
1447}
1448
f4e781c0
SRRH
1449static int run_tracer_selftest(struct tracer *type)
1450{
1451 struct trace_array *tr = &global_trace;
1452 struct tracer *saved_tracer = tr->current_trace;
1453 int ret;
0d5c6e1c 1454
f4e781c0
SRRH
1455 if (!type->selftest || tracing_selftest_disabled)
1456 return 0;
0d5c6e1c 1457
9afecfbb
SRV
1458 /*
1459 * If a tracer registers early in boot up (before scheduling is
1460 * initialized and such), then do not run its selftests yet.
1461 * Instead, run it a little later in the boot process.
1462 */
1463 if (!selftests_can_run)
1464 return save_selftest(type);
1465
0d5c6e1c 1466 /*
f4e781c0
SRRH
1467 * Run a selftest on this tracer.
1468 * Here we reset the trace buffer, and set the current
1469 * tracer to be this tracer. The tracer can then run some
1470 * internal tracing to verify that everything is in order.
1471 * If we fail, we do not register this tracer.
0d5c6e1c 1472 */
f4e781c0 1473 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1474
f4e781c0
SRRH
1475 tr->current_trace = type;
1476
1477#ifdef CONFIG_TRACER_MAX_TRACE
1478 if (type->use_max_tr) {
1479 /* If we expanded the buffers, make sure the max is expanded too */
1480 if (ring_buffer_expanded)
1481 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1482 RING_BUFFER_ALL_CPUS);
1483 tr->allocated_snapshot = true;
1484 }
1485#endif
1486
1487 /* the test is responsible for initializing and enabling */
1488 pr_info("Testing tracer %s: ", type->name);
1489 ret = type->selftest(type, tr);
1490 /* the test is responsible for resetting too */
1491 tr->current_trace = saved_tracer;
1492 if (ret) {
1493 printk(KERN_CONT "FAILED!\n");
1494 /* Add the warning after printing 'FAILED' */
1495 WARN_ON(1);
1496 return -1;
1497 }
1498 /* Only reset on passing, to avoid touching corrupted buffers */
1499 tracing_reset_online_cpus(&tr->trace_buffer);
1500
1501#ifdef CONFIG_TRACER_MAX_TRACE
1502 if (type->use_max_tr) {
1503 tr->allocated_snapshot = false;
0d5c6e1c 1504
f4e781c0
SRRH
1505 /* Shrink the max buffer again */
1506 if (ring_buffer_expanded)
1507 ring_buffer_resize(tr->max_buffer.buffer, 1,
1508 RING_BUFFER_ALL_CPUS);
1509 }
1510#endif
1511
1512 printk(KERN_CONT "PASSED\n");
1513 return 0;
1514}
9afecfbb
SRV
1515
1516static __init int init_trace_selftests(void)
1517{
1518 struct trace_selftests *p, *n;
1519 struct tracer *t, **last;
1520 int ret;
1521
1522 selftests_can_run = true;
1523
1524 mutex_lock(&trace_types_lock);
1525
1526 if (list_empty(&postponed_selftests))
1527 goto out;
1528
1529 pr_info("Running postponed tracer tests:\n");
1530
1531 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1532 ret = run_tracer_selftest(p->type);
1533 /* If the test fails, then warn and remove from available_tracers */
1534 if (ret < 0) {
1535 WARN(1, "tracer: %s failed selftest, disabling\n",
1536 p->type->name);
1537 last = &trace_types;
1538 for (t = trace_types; t; t = t->next) {
1539 if (t == p->type) {
1540 *last = t->next;
1541 break;
1542 }
1543 last = &t->next;
1544 }
1545 }
1546 list_del(&p->list);
1547 kfree(p);
1548 }
1549
1550 out:
1551 mutex_unlock(&trace_types_lock);
1552
1553 return 0;
1554}
1555early_initcall(init_trace_selftests);
f4e781c0
SRRH
1556#else
1557static inline int run_tracer_selftest(struct tracer *type)
1558{
1559 return 0;
0d5c6e1c 1560}
f4e781c0 1561#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1562
41d9c0be
SRRH
1563static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1564
a4d1e688
JW
1565static void __init apply_trace_boot_options(void);
1566
4fcdae83
SR
1567/**
1568 * register_tracer - register a tracer with the ftrace system.
1569 * @type - the plugin for the tracer
1570 *
1571 * Register a new plugin tracer.
1572 */
a4d1e688 1573int __init register_tracer(struct tracer *type)
bc0c38d1
SR
1574{
1575 struct tracer *t;
bc0c38d1
SR
1576 int ret = 0;
1577
1578 if (!type->name) {
1579 pr_info("Tracer must have a name\n");
1580 return -1;
1581 }
1582
24a461d5 1583 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1584 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1585 return -1;
1586 }
1587
bc0c38d1 1588 mutex_lock(&trace_types_lock);
86fa2f60 1589
8e1b82e0
FW
1590 tracing_selftest_running = true;
1591
bc0c38d1
SR
1592 for (t = trace_types; t; t = t->next) {
1593 if (strcmp(type->name, t->name) == 0) {
1594 /* already found */
ee6c2c1b 1595 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1596 type->name);
1597 ret = -1;
1598 goto out;
1599 }
1600 }
1601
adf9f195
FW
1602 if (!type->set_flag)
1603 type->set_flag = &dummy_set_flag;
d39cdd20
CH
1604 if (!type->flags) {
1605 /*allocate a dummy tracer_flags*/
1606 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
c8ca003b
CH
1607 if (!type->flags) {
1608 ret = -ENOMEM;
1609 goto out;
1610 }
d39cdd20
CH
1611 type->flags->val = 0;
1612 type->flags->opts = dummy_tracer_opt;
1613 } else
adf9f195
FW
1614 if (!type->flags->opts)
1615 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1616
d39cdd20
CH
1617 /* store the tracer for __set_tracer_option */
1618 type->flags->trace = type;
1619
f4e781c0
SRRH
1620 ret = run_tracer_selftest(type);
1621 if (ret < 0)
1622 goto out;
60a11774 1623
bc0c38d1
SR
1624 type->next = trace_types;
1625 trace_types = type;
41d9c0be 1626 add_tracer_options(&global_trace, type);
60a11774 1627
bc0c38d1 1628 out:
8e1b82e0 1629 tracing_selftest_running = false;
bc0c38d1
SR
1630 mutex_unlock(&trace_types_lock);
1631
dac74940
SR
1632 if (ret || !default_bootup_tracer)
1633 goto out_unlock;
1634
ee6c2c1b 1635 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1636 goto out_unlock;
1637
1638 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1639 /* Do we want this tracer to start on bootup? */
607e2ea1 1640 tracing_set_tracer(&global_trace, type->name);
dac74940 1641 default_bootup_tracer = NULL;
a4d1e688
JW
1642
1643 apply_trace_boot_options();
1644
dac74940 1645 /* disable other selftests, since this will break it. */
55034cd6 1646 tracing_selftest_disabled = true;
b2821ae6 1647#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1648 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1649 type->name);
b2821ae6 1650#endif
b2821ae6 1651
dac74940 1652 out_unlock:
bc0c38d1
SR
1653 return ret;
1654}
1655
12883efb 1656void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1657{
12883efb 1658 struct ring_buffer *buffer = buf->buffer;
f633903a 1659
a5416411
HT
1660 if (!buffer)
1661 return;
1662
f633903a
SR
1663 ring_buffer_record_disable(buffer);
1664
1665 /* Make sure all commits have finished */
1666 synchronize_sched();
68179686 1667 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1668
1669 ring_buffer_record_enable(buffer);
1670}
1671
12883efb 1672void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1673{
12883efb 1674 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1675 int cpu;
1676
a5416411
HT
1677 if (!buffer)
1678 return;
1679
621968cd
SR
1680 ring_buffer_record_disable(buffer);
1681
1682 /* Make sure all commits have finished */
1683 synchronize_sched();
1684
9457158b 1685 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1686
1687 for_each_online_cpu(cpu)
68179686 1688 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1689
1690 ring_buffer_record_enable(buffer);
213cc060
PE
1691}
1692
09d8091c 1693/* Must have trace_types_lock held */
873c642f 1694void tracing_reset_all_online_cpus(void)
9456f0fa 1695{
873c642f
SRRH
1696 struct trace_array *tr;
1697
873c642f 1698 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1699 tracing_reset_online_cpus(&tr->trace_buffer);
1700#ifdef CONFIG_TRACER_MAX_TRACE
1701 tracing_reset_online_cpus(&tr->max_buffer);
1702#endif
873c642f 1703 }
9456f0fa
SR
1704}
1705
939c7a4f 1706#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1707#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1708static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1709struct saved_cmdlines_buffer {
1710 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1711 unsigned *map_cmdline_to_pid;
1712 unsigned cmdline_num;
1713 int cmdline_idx;
1714 char *saved_cmdlines;
1715};
1716static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1717
25b0b44a 1718/* temporary disable recording */
4fd27358 1719static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1720
939c7a4f
YY
1721static inline char *get_saved_cmdlines(int idx)
1722{
1723 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1724}
1725
1726static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1727{
939c7a4f
YY
1728 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1729}
1730
1731static int allocate_cmdlines_buffer(unsigned int val,
1732 struct saved_cmdlines_buffer *s)
1733{
1734 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1735 GFP_KERNEL);
1736 if (!s->map_cmdline_to_pid)
1737 return -ENOMEM;
1738
1739 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1740 if (!s->saved_cmdlines) {
1741 kfree(s->map_cmdline_to_pid);
1742 return -ENOMEM;
1743 }
1744
1745 s->cmdline_idx = 0;
1746 s->cmdline_num = val;
1747 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1748 sizeof(s->map_pid_to_cmdline));
1749 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1750 val * sizeof(*s->map_cmdline_to_pid));
1751
1752 return 0;
1753}
1754
1755static int trace_create_savedcmd(void)
1756{
1757 int ret;
1758
a6af8fbf 1759 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1760 if (!savedcmd)
1761 return -ENOMEM;
1762
1763 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1764 if (ret < 0) {
1765 kfree(savedcmd);
1766 savedcmd = NULL;
1767 return -ENOMEM;
1768 }
1769
1770 return 0;
bc0c38d1
SR
1771}
1772
b5130b1e
CE
1773int is_tracing_stopped(void)
1774{
2b6080f2 1775 return global_trace.stop_count;
b5130b1e
CE
1776}
1777
0f048701
SR
1778/**
1779 * tracing_start - quick start of the tracer
1780 *
1781 * If tracing is enabled but was stopped by tracing_stop,
1782 * this will start the tracer back up.
1783 */
1784void tracing_start(void)
1785{
1786 struct ring_buffer *buffer;
1787 unsigned long flags;
1788
1789 if (tracing_disabled)
1790 return;
1791
2b6080f2
SR
1792 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1793 if (--global_trace.stop_count) {
1794 if (global_trace.stop_count < 0) {
b06a8301
SR
1795 /* Someone screwed up their debugging */
1796 WARN_ON_ONCE(1);
2b6080f2 1797 global_trace.stop_count = 0;
b06a8301 1798 }
0f048701
SR
1799 goto out;
1800 }
1801
a2f80714 1802 /* Prevent the buffers from switching */
0b9b12c1 1803 arch_spin_lock(&global_trace.max_lock);
0f048701 1804
12883efb 1805 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1806 if (buffer)
1807 ring_buffer_record_enable(buffer);
1808
12883efb
SRRH
1809#ifdef CONFIG_TRACER_MAX_TRACE
1810 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1811 if (buffer)
1812 ring_buffer_record_enable(buffer);
12883efb 1813#endif
0f048701 1814
0b9b12c1 1815 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1816
0f048701 1817 out:
2b6080f2
SR
1818 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1819}
1820
1821static void tracing_start_tr(struct trace_array *tr)
1822{
1823 struct ring_buffer *buffer;
1824 unsigned long flags;
1825
1826 if (tracing_disabled)
1827 return;
1828
1829 /* If global, we need to also start the max tracer */
1830 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1831 return tracing_start();
1832
1833 raw_spin_lock_irqsave(&tr->start_lock, flags);
1834
1835 if (--tr->stop_count) {
1836 if (tr->stop_count < 0) {
1837 /* Someone screwed up their debugging */
1838 WARN_ON_ONCE(1);
1839 tr->stop_count = 0;
1840 }
1841 goto out;
1842 }
1843
12883efb 1844 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1845 if (buffer)
1846 ring_buffer_record_enable(buffer);
1847
1848 out:
1849 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1850}
1851
1852/**
1853 * tracing_stop - quick stop of the tracer
1854 *
1855 * Light weight way to stop tracing. Use in conjunction with
1856 * tracing_start.
1857 */
1858void tracing_stop(void)
1859{
1860 struct ring_buffer *buffer;
1861 unsigned long flags;
1862
2b6080f2
SR
1863 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1864 if (global_trace.stop_count++)
0f048701
SR
1865 goto out;
1866
a2f80714 1867 /* Prevent the buffers from switching */
0b9b12c1 1868 arch_spin_lock(&global_trace.max_lock);
a2f80714 1869
12883efb 1870 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1871 if (buffer)
1872 ring_buffer_record_disable(buffer);
1873
12883efb
SRRH
1874#ifdef CONFIG_TRACER_MAX_TRACE
1875 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1876 if (buffer)
1877 ring_buffer_record_disable(buffer);
12883efb 1878#endif
0f048701 1879
0b9b12c1 1880 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1881
0f048701 1882 out:
2b6080f2
SR
1883 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1884}
1885
1886static void tracing_stop_tr(struct trace_array *tr)
1887{
1888 struct ring_buffer *buffer;
1889 unsigned long flags;
1890
1891 /* If global, we need to also stop the max tracer */
1892 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1893 return tracing_stop();
1894
1895 raw_spin_lock_irqsave(&tr->start_lock, flags);
1896 if (tr->stop_count++)
1897 goto out;
1898
12883efb 1899 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1900 if (buffer)
1901 ring_buffer_record_disable(buffer);
1902
1903 out:
1904 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1905}
1906
e309b41d 1907void trace_stop_cmdline_recording(void);
bc0c38d1 1908
379cfdac 1909static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1910{
a635cf04 1911 unsigned pid, idx;
bc0c38d1
SR
1912
1913 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1914 return 0;
bc0c38d1
SR
1915
1916 /*
1917 * It's not the end of the world if we don't get
1918 * the lock, but we also don't want to spin
1919 * nor do we want to disable interrupts,
1920 * so if we miss here, then better luck next time.
1921 */
0199c4e6 1922 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1923 return 0;
bc0c38d1 1924
939c7a4f 1925 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1926 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1927 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1928
a635cf04
CE
1929 /*
1930 * Check whether the cmdline buffer at idx has a pid
1931 * mapped. We are going to overwrite that entry so we
1932 * need to clear the map_pid_to_cmdline. Otherwise we
1933 * would read the new comm for the old pid.
1934 */
939c7a4f 1935 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1936 if (pid != NO_CMDLINE_MAP)
939c7a4f 1937 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1938
939c7a4f
YY
1939 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1940 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1941
939c7a4f 1942 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1943 }
1944
939c7a4f 1945 set_cmdline(idx, tsk->comm);
bc0c38d1 1946
0199c4e6 1947 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1948
1949 return 1;
bc0c38d1
SR
1950}
1951
4c27e756 1952static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1953{
bc0c38d1
SR
1954 unsigned map;
1955
4ca53085
SR
1956 if (!pid) {
1957 strcpy(comm, "<idle>");
1958 return;
1959 }
bc0c38d1 1960
74bf4076
SR
1961 if (WARN_ON_ONCE(pid < 0)) {
1962 strcpy(comm, "<XXX>");
1963 return;
1964 }
1965
4ca53085
SR
1966 if (pid > PID_MAX_DEFAULT) {
1967 strcpy(comm, "<...>");
1968 return;
1969 }
bc0c38d1 1970
939c7a4f 1971 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1972 if (map != NO_CMDLINE_MAP)
939c7a4f 1973 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1974 else
1975 strcpy(comm, "<...>");
4c27e756
SRRH
1976}
1977
1978void trace_find_cmdline(int pid, char comm[])
1979{
1980 preempt_disable();
1981 arch_spin_lock(&trace_cmdline_lock);
1982
1983 __trace_find_cmdline(pid, comm);
bc0c38d1 1984
0199c4e6 1985 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1986 preempt_enable();
bc0c38d1
SR
1987}
1988
e309b41d 1989void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1990{
0fb9656d 1991 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1992 return;
1993
7ffbd48d
SR
1994 if (!__this_cpu_read(trace_cmdline_save))
1995 return;
1996
379cfdac
SRRH
1997 if (trace_save_cmdline(tsk))
1998 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1999}
2000
af0009fc
SRV
2001/*
2002 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2003 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2004 * simplifies those functions and keeps them in sync.
2005 */
2006enum print_line_t trace_handle_return(struct trace_seq *s)
2007{
2008 return trace_seq_has_overflowed(s) ?
2009 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2010}
2011EXPORT_SYMBOL_GPL(trace_handle_return);
2012
45dcd8b8 2013void
38697053
SR
2014tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2015 int pc)
bc0c38d1
SR
2016{
2017 struct task_struct *tsk = current;
bc0c38d1 2018
777e208d
SR
2019 entry->preempt_count = pc & 0xff;
2020 entry->pid = (tsk) ? tsk->pid : 0;
2021 entry->flags =
9244489a 2022#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 2023 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
2024#else
2025 TRACE_FLAG_IRQS_NOSUPPORT |
2026#endif
7e6867bf 2027 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
bc0c38d1 2028 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
c59f29cb 2029 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
2030 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2031 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 2032}
f413cdb8 2033EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 2034
e77405ad
SR
2035struct ring_buffer_event *
2036trace_buffer_lock_reserve(struct ring_buffer *buffer,
2037 int type,
2038 unsigned long len,
2039 unsigned long flags, int pc)
51a763dd 2040{
3e9a8aad 2041 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
0fc1b09f
SRRH
2042}
2043
2044DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2045DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2046static int trace_buffered_event_ref;
2047
2048/**
2049 * trace_buffered_event_enable - enable buffering events
2050 *
2051 * When events are being filtered, it is quicker to use a temporary
2052 * buffer to write the event data into if there's a likely chance
2053 * that it will not be committed. The discard of the ring buffer
2054 * is not as fast as committing, and is much slower than copying
2055 * a commit.
2056 *
2057 * When an event is to be filtered, allocate per cpu buffers to
2058 * write the event data into, and if the event is filtered and discarded
2059 * it is simply dropped, otherwise, the entire data is to be committed
2060 * in one shot.
2061 */
2062void trace_buffered_event_enable(void)
2063{
2064 struct ring_buffer_event *event;
2065 struct page *page;
2066 int cpu;
51a763dd 2067
0fc1b09f
SRRH
2068 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2069
2070 if (trace_buffered_event_ref++)
2071 return;
2072
2073 for_each_tracing_cpu(cpu) {
2074 page = alloc_pages_node(cpu_to_node(cpu),
2075 GFP_KERNEL | __GFP_NORETRY, 0);
2076 if (!page)
2077 goto failed;
2078
2079 event = page_address(page);
2080 memset(event, 0, sizeof(*event));
2081
2082 per_cpu(trace_buffered_event, cpu) = event;
2083
2084 preempt_disable();
2085 if (cpu == smp_processor_id() &&
2086 this_cpu_read(trace_buffered_event) !=
2087 per_cpu(trace_buffered_event, cpu))
2088 WARN_ON_ONCE(1);
2089 preempt_enable();
51a763dd
ACM
2090 }
2091
0fc1b09f
SRRH
2092 return;
2093 failed:
2094 trace_buffered_event_disable();
2095}
2096
2097static void enable_trace_buffered_event(void *data)
2098{
2099 /* Probably not needed, but do it anyway */
2100 smp_rmb();
2101 this_cpu_dec(trace_buffered_event_cnt);
2102}
2103
2104static void disable_trace_buffered_event(void *data)
2105{
2106 this_cpu_inc(trace_buffered_event_cnt);
2107}
2108
2109/**
2110 * trace_buffered_event_disable - disable buffering events
2111 *
2112 * When a filter is removed, it is faster to not use the buffered
2113 * events, and to commit directly into the ring buffer. Free up
2114 * the temp buffers when there are no more users. This requires
2115 * special synchronization with current events.
2116 */
2117void trace_buffered_event_disable(void)
2118{
2119 int cpu;
2120
2121 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2122
2123 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2124 return;
2125
2126 if (--trace_buffered_event_ref)
2127 return;
2128
2129 preempt_disable();
2130 /* For each CPU, set the buffer as used. */
2131 smp_call_function_many(tracing_buffer_mask,
2132 disable_trace_buffered_event, NULL, 1);
2133 preempt_enable();
2134
2135 /* Wait for all current users to finish */
2136 synchronize_sched();
2137
2138 for_each_tracing_cpu(cpu) {
2139 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2140 per_cpu(trace_buffered_event, cpu) = NULL;
2141 }
2142 /*
2143 * Make sure trace_buffered_event is NULL before clearing
2144 * trace_buffered_event_cnt.
2145 */
2146 smp_wmb();
2147
2148 preempt_disable();
2149 /* Do the work on each cpu */
2150 smp_call_function_many(tracing_buffer_mask,
2151 enable_trace_buffered_event, NULL, 1);
2152 preempt_enable();
51a763dd 2153}
51a763dd 2154
2c4a33ab
SRRH
2155static struct ring_buffer *temp_buffer;
2156
ccb469a1
SR
2157struct ring_buffer_event *
2158trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
7f1d2f82 2159 struct trace_event_file *trace_file,
ccb469a1
SR
2160 int type, unsigned long len,
2161 unsigned long flags, int pc)
2162{
2c4a33ab 2163 struct ring_buffer_event *entry;
0fc1b09f 2164 int val;
2c4a33ab 2165
7f1d2f82 2166 *current_rb = trace_file->tr->trace_buffer.buffer;
0fc1b09f
SRRH
2167
2168 if ((trace_file->flags &
2169 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2170 (entry = this_cpu_read(trace_buffered_event))) {
2171 /* Try to use the per cpu buffer first */
2172 val = this_cpu_inc_return(trace_buffered_event_cnt);
2173 if (val == 1) {
2174 trace_event_setup(entry, type, flags, pc);
2175 entry->array[0] = len;
2176 return entry;
2177 }
2178 this_cpu_dec(trace_buffered_event_cnt);
2179 }
2180
3e9a8aad
SRRH
2181 entry = __trace_buffer_lock_reserve(*current_rb,
2182 type, len, flags, pc);
2c4a33ab
SRRH
2183 /*
2184 * If tracing is off, but we have triggers enabled
2185 * we still need to look at the event data. Use the temp_buffer
2186 * to store the trace event for the tigger to use. It's recusive
2187 * safe and will not be recorded anywhere.
2188 */
5d6ad960 2189 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2c4a33ab 2190 *current_rb = temp_buffer;
3e9a8aad
SRRH
2191 entry = __trace_buffer_lock_reserve(*current_rb,
2192 type, len, flags, pc);
2c4a33ab
SRRH
2193 }
2194 return entry;
ccb469a1
SR
2195}
2196EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2197
42391745
SRRH
2198static DEFINE_SPINLOCK(tracepoint_iter_lock);
2199static DEFINE_MUTEX(tracepoint_printk_mutex);
2200
2201static void output_printk(struct trace_event_buffer *fbuffer)
2202{
2203 struct trace_event_call *event_call;
2204 struct trace_event *event;
2205 unsigned long flags;
2206 struct trace_iterator *iter = tracepoint_print_iter;
2207
2208 /* We should never get here if iter is NULL */
2209 if (WARN_ON_ONCE(!iter))
2210 return;
2211
2212 event_call = fbuffer->trace_file->event_call;
2213 if (!event_call || !event_call->event.funcs ||
2214 !event_call->event.funcs->trace)
2215 return;
2216
2217 event = &fbuffer->trace_file->event_call->event;
2218
2219 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2220 trace_seq_init(&iter->seq);
2221 iter->ent = fbuffer->entry;
2222 event_call->event.funcs->trace(iter, 0, event);
2223 trace_seq_putc(&iter->seq, 0);
2224 printk("%s", iter->seq.buffer);
2225
2226 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2227}
2228
2229int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2230 void __user *buffer, size_t *lenp,
2231 loff_t *ppos)
2232{
2233 int save_tracepoint_printk;
2234 int ret;
2235
2236 mutex_lock(&tracepoint_printk_mutex);
2237 save_tracepoint_printk = tracepoint_printk;
2238
2239 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2240
2241 /*
2242 * This will force exiting early, as tracepoint_printk
2243 * is always zero when tracepoint_printk_iter is not allocated
2244 */
2245 if (!tracepoint_print_iter)
2246 tracepoint_printk = 0;
2247
2248 if (save_tracepoint_printk == tracepoint_printk)
2249 goto out;
2250
2251 if (tracepoint_printk)
2252 static_key_enable(&tracepoint_printk_key.key);
2253 else
2254 static_key_disable(&tracepoint_printk_key.key);
2255
2256 out:
2257 mutex_unlock(&tracepoint_printk_mutex);
2258
2259 return ret;
2260}
2261
2262void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2263{
2264 if (static_key_false(&tracepoint_printk_key.key))
2265 output_printk(fbuffer);
2266
2267 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2268 fbuffer->event, fbuffer->entry,
2269 fbuffer->flags, fbuffer->pc);
2270}
2271EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2272
b7f0c959
SRRH
2273void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2274 struct ring_buffer *buffer,
0d5c6e1c
SR
2275 struct ring_buffer_event *event,
2276 unsigned long flags, int pc,
2277 struct pt_regs *regs)
1fd8df2c 2278{
7ffbd48d 2279 __buffer_unlock_commit(buffer, event);
1fd8df2c 2280
be54f69c
SRRH
2281 /*
2282 * If regs is not set, then skip the following callers:
2283 * trace_buffer_unlock_commit_regs
2284 * event_trigger_unlock_commit
2285 * trace_event_buffer_commit
2286 * trace_event_raw_event_sched_switch
2287 * Note, we can still get here via blktrace, wakeup tracer
2288 * and mmiotrace, but that's ok if they lose a function or
2289 * two. They are that meaningful.
2290 */
2291 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
1fd8df2c
MH
2292 ftrace_trace_userstack(buffer, flags, pc);
2293}
1fd8df2c 2294
52ffabe3
SRRH
2295/*
2296 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2297 */
2298void
2299trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2300 struct ring_buffer_event *event)
2301{
2302 __buffer_unlock_commit(buffer, event);
2303}
2304
478409dd
CZ
2305static void
2306trace_process_export(struct trace_export *export,
2307 struct ring_buffer_event *event)
2308{
2309 struct trace_entry *entry;
2310 unsigned int size = 0;
2311
2312 entry = ring_buffer_event_data(event);
2313 size = ring_buffer_event_length(event);
2314 export->write(entry, size);
2315}
2316
2317static DEFINE_MUTEX(ftrace_export_lock);
2318
2319static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2320
2321static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2322
2323static inline void ftrace_exports_enable(void)
2324{
2325 static_branch_enable(&ftrace_exports_enabled);
2326}
2327
2328static inline void ftrace_exports_disable(void)
2329{
2330 static_branch_disable(&ftrace_exports_enabled);
2331}
2332
2333void ftrace_exports(struct ring_buffer_event *event)
2334{
2335 struct trace_export *export;
2336
2337 preempt_disable_notrace();
2338
2339 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2340 while (export) {
2341 trace_process_export(export, event);
2342 export = rcu_dereference_raw_notrace(export->next);
2343 }
2344
2345 preempt_enable_notrace();
2346}
2347
2348static inline void
2349add_trace_export(struct trace_export **list, struct trace_export *export)
2350{
2351 rcu_assign_pointer(export->next, *list);
2352 /*
2353 * We are entering export into the list but another
2354 * CPU might be walking that list. We need to make sure
2355 * the export->next pointer is valid before another CPU sees
2356 * the export pointer included into the list.
2357 */
2358 rcu_assign_pointer(*list, export);
2359}
2360
2361static inline int
2362rm_trace_export(struct trace_export **list, struct trace_export *export)
2363{
2364 struct trace_export **p;
2365
2366 for (p = list; *p != NULL; p = &(*p)->next)
2367 if (*p == export)
2368 break;
2369
2370 if (*p != export)
2371 return -1;
2372
2373 rcu_assign_pointer(*p, (*p)->next);
2374
2375 return 0;
2376}
2377
2378static inline void
2379add_ftrace_export(struct trace_export **list, struct trace_export *export)
2380{
2381 if (*list == NULL)
2382 ftrace_exports_enable();
2383
2384 add_trace_export(list, export);
2385}
2386
2387static inline int
2388rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2389{
2390 int ret;
2391
2392 ret = rm_trace_export(list, export);
2393 if (*list == NULL)
2394 ftrace_exports_disable();
2395
2396 return ret;
2397}
2398
2399int register_ftrace_export(struct trace_export *export)
2400{
2401 if (WARN_ON_ONCE(!export->write))
2402 return -1;
2403
2404 mutex_lock(&ftrace_export_lock);
2405
2406 add_ftrace_export(&ftrace_exports_list, export);
2407
2408 mutex_unlock(&ftrace_export_lock);
2409
2410 return 0;
2411}
2412EXPORT_SYMBOL_GPL(register_ftrace_export);
2413
2414int unregister_ftrace_export(struct trace_export *export)
2415{
2416 int ret;
2417
2418 mutex_lock(&ftrace_export_lock);
2419
2420 ret = rm_ftrace_export(&ftrace_exports_list, export);
2421
2422 mutex_unlock(&ftrace_export_lock);
2423
2424 return ret;
2425}
2426EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2427
e309b41d 2428void
7be42151 2429trace_function(struct trace_array *tr,
38697053
SR
2430 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2431 int pc)
bc0c38d1 2432{
2425bcb9 2433 struct trace_event_call *call = &event_function;
12883efb 2434 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 2435 struct ring_buffer_event *event;
777e208d 2436 struct ftrace_entry *entry;
bc0c38d1 2437
3e9a8aad
SRRH
2438 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2439 flags, pc);
3928a8a2
SR
2440 if (!event)
2441 return;
2442 entry = ring_buffer_event_data(event);
777e208d
SR
2443 entry->ip = ip;
2444 entry->parent_ip = parent_ip;
e1112b4d 2445
478409dd
CZ
2446 if (!call_filter_check_discard(call, entry, buffer, event)) {
2447 if (static_branch_unlikely(&ftrace_exports_enabled))
2448 ftrace_exports(event);
7ffbd48d 2449 __buffer_unlock_commit(buffer, event);
478409dd 2450 }
bc0c38d1
SR
2451}
2452
c0a0d0d3 2453#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
2454
2455#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2456struct ftrace_stack {
2457 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2458};
2459
2460static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2461static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2462
e77405ad 2463static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 2464 unsigned long flags,
1fd8df2c 2465 int skip, int pc, struct pt_regs *regs)
86387f7e 2466{
2425bcb9 2467 struct trace_event_call *call = &event_kernel_stack;
3928a8a2 2468 struct ring_buffer_event *event;
777e208d 2469 struct stack_entry *entry;
86387f7e 2470 struct stack_trace trace;
4a9bd3f1
SR
2471 int use_stack;
2472 int size = FTRACE_STACK_ENTRIES;
2473
2474 trace.nr_entries = 0;
2475 trace.skip = skip;
2476
be54f69c
SRRH
2477 /*
2478 * Add two, for this function and the call to save_stack_trace()
2479 * If regs is set, then these functions will not be in the way.
2480 */
2481 if (!regs)
2482 trace.skip += 2;
2483
4a9bd3f1
SR
2484 /*
2485 * Since events can happen in NMIs there's no safe way to
2486 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2487 * or NMI comes in, it will just have to use the default
2488 * FTRACE_STACK_SIZE.
2489 */
2490 preempt_disable_notrace();
2491
82146529 2492 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
2493 /*
2494 * We don't need any atomic variables, just a barrier.
2495 * If an interrupt comes in, we don't care, because it would
2496 * have exited and put the counter back to what we want.
2497 * We just need a barrier to keep gcc from moving things
2498 * around.
2499 */
2500 barrier();
2501 if (use_stack == 1) {
bdffd893 2502 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
2503 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2504
2505 if (regs)
2506 save_stack_trace_regs(regs, &trace);
2507 else
2508 save_stack_trace(&trace);
2509
2510 if (trace.nr_entries > size)
2511 size = trace.nr_entries;
2512 } else
2513 /* From now on, use_stack is a boolean */
2514 use_stack = 0;
2515
2516 size *= sizeof(unsigned long);
86387f7e 2517
3e9a8aad
SRRH
2518 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2519 sizeof(*entry) + size, flags, pc);
3928a8a2 2520 if (!event)
4a9bd3f1
SR
2521 goto out;
2522 entry = ring_buffer_event_data(event);
86387f7e 2523
4a9bd3f1
SR
2524 memset(&entry->caller, 0, size);
2525
2526 if (use_stack)
2527 memcpy(&entry->caller, trace.entries,
2528 trace.nr_entries * sizeof(unsigned long));
2529 else {
2530 trace.max_entries = FTRACE_STACK_ENTRIES;
2531 trace.entries = entry->caller;
2532 if (regs)
2533 save_stack_trace_regs(regs, &trace);
2534 else
2535 save_stack_trace(&trace);
2536 }
2537
2538 entry->size = trace.nr_entries;
86387f7e 2539
f306cc82 2540 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2541 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
2542
2543 out:
2544 /* Again, don't let gcc optimize things here */
2545 barrier();
82146529 2546 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
2547 preempt_enable_notrace();
2548
f0a920d5
IM
2549}
2550
2d34f489
SRRH
2551static inline void ftrace_trace_stack(struct trace_array *tr,
2552 struct ring_buffer *buffer,
73dddbb5
SRRH
2553 unsigned long flags,
2554 int skip, int pc, struct pt_regs *regs)
53614991 2555{
2d34f489 2556 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
53614991
SR
2557 return;
2558
73dddbb5 2559 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
53614991
SR
2560}
2561
c0a0d0d3
FW
2562void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2563 int pc)
38697053 2564{
12883efb 2565 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
2566}
2567
03889384
SR
2568/**
2569 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 2570 * @skip: Number of functions to skip (helper handlers)
03889384 2571 */
c142be8e 2572void trace_dump_stack(int skip)
03889384
SR
2573{
2574 unsigned long flags;
2575
2576 if (tracing_disabled || tracing_selftest_running)
e36c5458 2577 return;
03889384
SR
2578
2579 local_save_flags(flags);
2580
c142be8e
SRRH
2581 /*
2582 * Skip 3 more, seems to get us at the caller of
2583 * this function.
2584 */
2585 skip += 3;
2586 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2587 flags, skip, preempt_count(), NULL);
03889384
SR
2588}
2589
91e86e56
SR
2590static DEFINE_PER_CPU(int, user_stack_count);
2591
e77405ad
SR
2592void
2593ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 2594{
2425bcb9 2595 struct trace_event_call *call = &event_user_stack;
8d7c6a96 2596 struct ring_buffer_event *event;
02b67518
TE
2597 struct userstack_entry *entry;
2598 struct stack_trace trace;
02b67518 2599
983f938a 2600 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
02b67518
TE
2601 return;
2602
b6345879
SR
2603 /*
2604 * NMIs can not handle page faults, even with fix ups.
2605 * The save user stack can (and often does) fault.
2606 */
2607 if (unlikely(in_nmi()))
2608 return;
02b67518 2609
91e86e56
SR
2610 /*
2611 * prevent recursion, since the user stack tracing may
2612 * trigger other kernel events.
2613 */
2614 preempt_disable();
2615 if (__this_cpu_read(user_stack_count))
2616 goto out;
2617
2618 __this_cpu_inc(user_stack_count);
2619
3e9a8aad
SRRH
2620 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2621 sizeof(*entry), flags, pc);
02b67518 2622 if (!event)
1dbd1951 2623 goto out_drop_count;
02b67518 2624 entry = ring_buffer_event_data(event);
02b67518 2625
48659d31 2626 entry->tgid = current->tgid;
02b67518
TE
2627 memset(&entry->caller, 0, sizeof(entry->caller));
2628
2629 trace.nr_entries = 0;
2630 trace.max_entries = FTRACE_STACK_ENTRIES;
2631 trace.skip = 0;
2632 trace.entries = entry->caller;
2633
2634 save_stack_trace_user(&trace);
f306cc82 2635 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 2636 __buffer_unlock_commit(buffer, event);
91e86e56 2637
1dbd1951 2638 out_drop_count:
91e86e56 2639 __this_cpu_dec(user_stack_count);
91e86e56
SR
2640 out:
2641 preempt_enable();
02b67518
TE
2642}
2643
4fd27358
HE
2644#ifdef UNUSED
2645static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 2646{
7be42151 2647 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 2648}
4fd27358 2649#endif /* UNUSED */
02b67518 2650
c0a0d0d3
FW
2651#endif /* CONFIG_STACKTRACE */
2652
07d777fe
SR
2653/* created for use with alloc_percpu */
2654struct trace_buffer_struct {
e2ace001
AL
2655 int nesting;
2656 char buffer[4][TRACE_BUF_SIZE];
07d777fe
SR
2657};
2658
2659static struct trace_buffer_struct *trace_percpu_buffer;
07d777fe
SR
2660
2661/*
e2ace001
AL
2662 * Thise allows for lockless recording. If we're nested too deeply, then
2663 * this returns NULL.
07d777fe
SR
2664 */
2665static char *get_trace_buf(void)
2666{
e2ace001 2667 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
07d777fe 2668
e2ace001 2669 if (!buffer || buffer->nesting >= 4)
07d777fe
SR
2670 return NULL;
2671
e2ace001
AL
2672 return &buffer->buffer[buffer->nesting++][0];
2673}
2674
2675static void put_trace_buf(void)
2676{
2677 this_cpu_dec(trace_percpu_buffer->nesting);
07d777fe
SR
2678}
2679
2680static int alloc_percpu_trace_buffer(void)
2681{
2682 struct trace_buffer_struct *buffers;
07d777fe
SR
2683
2684 buffers = alloc_percpu(struct trace_buffer_struct);
e2ace001
AL
2685 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2686 return -ENOMEM;
07d777fe
SR
2687
2688 trace_percpu_buffer = buffers;
07d777fe 2689 return 0;
07d777fe
SR
2690}
2691
81698831
SR
2692static int buffers_allocated;
2693
07d777fe
SR
2694void trace_printk_init_buffers(void)
2695{
07d777fe
SR
2696 if (buffers_allocated)
2697 return;
2698
2699 if (alloc_percpu_trace_buffer())
2700 return;
2701
2184db46
SR
2702 /* trace_printk() is for debug use only. Don't use it in production. */
2703
a395d6a7
JP
2704 pr_warn("\n");
2705 pr_warn("**********************************************************\n");
2706 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2707 pr_warn("** **\n");
2708 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2709 pr_warn("** **\n");
2710 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2711 pr_warn("** unsafe for production use. **\n");
2712 pr_warn("** **\n");
2713 pr_warn("** If you see this message and you are not debugging **\n");
2714 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2715 pr_warn("** **\n");
2716 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2717 pr_warn("**********************************************************\n");
07d777fe 2718
b382ede6
SR
2719 /* Expand the buffers to set size */
2720 tracing_update_buffers();
2721
07d777fe 2722 buffers_allocated = 1;
81698831
SR
2723
2724 /*
2725 * trace_printk_init_buffers() can be called by modules.
2726 * If that happens, then we need to start cmdline recording
2727 * directly here. If the global_trace.buffer is already
2728 * allocated here, then this was called by module code.
2729 */
12883efb 2730 if (global_trace.trace_buffer.buffer)
81698831
SR
2731 tracing_start_cmdline_record();
2732}
2733
2734void trace_printk_start_comm(void)
2735{
2736 /* Start tracing comms if trace printk is set */
2737 if (!buffers_allocated)
2738 return;
2739 tracing_start_cmdline_record();
2740}
2741
2742static void trace_printk_start_stop_comm(int enabled)
2743{
2744 if (!buffers_allocated)
2745 return;
2746
2747 if (enabled)
2748 tracing_start_cmdline_record();
2749 else
2750 tracing_stop_cmdline_record();
07d777fe
SR
2751}
2752
769b0441 2753/**
48ead020 2754 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2755 *
2756 */
40ce74f1 2757int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2758{
2425bcb9 2759 struct trace_event_call *call = &event_bprint;
769b0441 2760 struct ring_buffer_event *event;
e77405ad 2761 struct ring_buffer *buffer;
769b0441 2762 struct trace_array *tr = &global_trace;
48ead020 2763 struct bprint_entry *entry;
769b0441 2764 unsigned long flags;
07d777fe
SR
2765 char *tbuffer;
2766 int len = 0, size, pc;
769b0441
FW
2767
2768 if (unlikely(tracing_selftest_running || tracing_disabled))
2769 return 0;
2770
2771 /* Don't pollute graph traces with trace_vprintk internals */
2772 pause_graph_tracing();
2773
2774 pc = preempt_count();
5168ae50 2775 preempt_disable_notrace();
769b0441 2776
07d777fe
SR
2777 tbuffer = get_trace_buf();
2778 if (!tbuffer) {
2779 len = 0;
e2ace001 2780 goto out_nobuffer;
07d777fe 2781 }
769b0441 2782
07d777fe 2783 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2784
07d777fe
SR
2785 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2786 goto out;
769b0441 2787
07d777fe 2788 local_save_flags(flags);
769b0441 2789 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2790 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
2791 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2792 flags, pc);
769b0441 2793 if (!event)
07d777fe 2794 goto out;
769b0441
FW
2795 entry = ring_buffer_event_data(event);
2796 entry->ip = ip;
769b0441
FW
2797 entry->fmt = fmt;
2798
07d777fe 2799 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2800 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2801 __buffer_unlock_commit(buffer, event);
2d34f489 2802 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
d931369b 2803 }
769b0441 2804
769b0441 2805out:
e2ace001
AL
2806 put_trace_buf();
2807
2808out_nobuffer:
5168ae50 2809 preempt_enable_notrace();
769b0441
FW
2810 unpause_graph_tracing();
2811
2812 return len;
2813}
48ead020
FW
2814EXPORT_SYMBOL_GPL(trace_vbprintk);
2815
12883efb
SRRH
2816static int
2817__trace_array_vprintk(struct ring_buffer *buffer,
2818 unsigned long ip, const char *fmt, va_list args)
48ead020 2819{
2425bcb9 2820 struct trace_event_call *call = &event_print;
48ead020 2821 struct ring_buffer_event *event;
07d777fe 2822 int len = 0, size, pc;
48ead020 2823 struct print_entry *entry;
07d777fe
SR
2824 unsigned long flags;
2825 char *tbuffer;
48ead020
FW
2826
2827 if (tracing_disabled || tracing_selftest_running)
2828 return 0;
2829
07d777fe
SR
2830 /* Don't pollute graph traces with trace_vprintk internals */
2831 pause_graph_tracing();
2832
48ead020
FW
2833 pc = preempt_count();
2834 preempt_disable_notrace();
48ead020 2835
07d777fe
SR
2836
2837 tbuffer = get_trace_buf();
2838 if (!tbuffer) {
2839 len = 0;
e2ace001 2840 goto out_nobuffer;
07d777fe 2841 }
48ead020 2842
3558a5ac 2843 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2844
07d777fe 2845 local_save_flags(flags);
48ead020 2846 size = sizeof(*entry) + len + 1;
3e9a8aad
SRRH
2847 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2848 flags, pc);
48ead020 2849 if (!event)
07d777fe 2850 goto out;
48ead020 2851 entry = ring_buffer_event_data(event);
c13d2f7c 2852 entry->ip = ip;
48ead020 2853
3558a5ac 2854 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2855 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2856 __buffer_unlock_commit(buffer, event);
2d34f489 2857 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
d931369b 2858 }
e2ace001
AL
2859
2860out:
2861 put_trace_buf();
2862
2863out_nobuffer:
48ead020 2864 preempt_enable_notrace();
07d777fe 2865 unpause_graph_tracing();
48ead020
FW
2866
2867 return len;
2868}
659372d3 2869
12883efb
SRRH
2870int trace_array_vprintk(struct trace_array *tr,
2871 unsigned long ip, const char *fmt, va_list args)
2872{
2873 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2874}
2875
2876int trace_array_printk(struct trace_array *tr,
2877 unsigned long ip, const char *fmt, ...)
2878{
2879 int ret;
2880 va_list ap;
2881
983f938a 2882 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2883 return 0;
2884
2885 va_start(ap, fmt);
2886 ret = trace_array_vprintk(tr, ip, fmt, ap);
2887 va_end(ap);
2888 return ret;
2889}
2890
2891int trace_array_printk_buf(struct ring_buffer *buffer,
2892 unsigned long ip, const char *fmt, ...)
2893{
2894 int ret;
2895 va_list ap;
2896
983f938a 2897 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
12883efb
SRRH
2898 return 0;
2899
2900 va_start(ap, fmt);
2901 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2902 va_end(ap);
2903 return ret;
2904}
2905
659372d3
SR
2906int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2907{
a813a159 2908 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2909}
769b0441
FW
2910EXPORT_SYMBOL_GPL(trace_vprintk);
2911
e2ac8ef5 2912static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2913{
6d158a81
SR
2914 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2915
5a90f577 2916 iter->idx++;
6d158a81
SR
2917 if (buf_iter)
2918 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2919}
2920
e309b41d 2921static struct trace_entry *
bc21b478
SR
2922peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2923 unsigned long *lost_events)
dd0e545f 2924{
3928a8a2 2925 struct ring_buffer_event *event;
6d158a81 2926 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2927
d769041f
SR
2928 if (buf_iter)
2929 event = ring_buffer_iter_peek(buf_iter, ts);
2930 else
12883efb 2931 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2932 lost_events);
d769041f 2933
4a9bd3f1
SR
2934 if (event) {
2935 iter->ent_size = ring_buffer_event_length(event);
2936 return ring_buffer_event_data(event);
2937 }
2938 iter->ent_size = 0;
2939 return NULL;
dd0e545f 2940}
d769041f 2941
dd0e545f 2942static struct trace_entry *
bc21b478
SR
2943__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2944 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2945{
12883efb 2946 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2947 struct trace_entry *ent, *next = NULL;
aa27497c 2948 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2949 int cpu_file = iter->cpu_file;
3928a8a2 2950 u64 next_ts = 0, ts;
bc0c38d1 2951 int next_cpu = -1;
12b5da34 2952 int next_size = 0;
bc0c38d1
SR
2953 int cpu;
2954
b04cc6b1
FW
2955 /*
2956 * If we are in a per_cpu trace file, don't bother by iterating over
2957 * all cpu and peek directly.
2958 */
ae3b5093 2959 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2960 if (ring_buffer_empty_cpu(buffer, cpu_file))
2961 return NULL;
bc21b478 2962 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2963 if (ent_cpu)
2964 *ent_cpu = cpu_file;
2965
2966 return ent;
2967 }
2968
ab46428c 2969 for_each_tracing_cpu(cpu) {
dd0e545f 2970
3928a8a2
SR
2971 if (ring_buffer_empty_cpu(buffer, cpu))
2972 continue;
dd0e545f 2973
bc21b478 2974 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2975
cdd31cd2
IM
2976 /*
2977 * Pick the entry with the smallest timestamp:
2978 */
3928a8a2 2979 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2980 next = ent;
2981 next_cpu = cpu;
3928a8a2 2982 next_ts = ts;
bc21b478 2983 next_lost = lost_events;
12b5da34 2984 next_size = iter->ent_size;
bc0c38d1
SR
2985 }
2986 }
2987
12b5da34
SR
2988 iter->ent_size = next_size;
2989
bc0c38d1
SR
2990 if (ent_cpu)
2991 *ent_cpu = next_cpu;
2992
3928a8a2
SR
2993 if (ent_ts)
2994 *ent_ts = next_ts;
2995
bc21b478
SR
2996 if (missing_events)
2997 *missing_events = next_lost;
2998
bc0c38d1
SR
2999 return next;
3000}
3001
dd0e545f 3002/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
3003struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3004 int *ent_cpu, u64 *ent_ts)
bc0c38d1 3005{
bc21b478 3006 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
3007}
3008
3009/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 3010void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 3011{
bc21b478
SR
3012 iter->ent = __find_next_entry(iter, &iter->cpu,
3013 &iter->lost_events, &iter->ts);
dd0e545f 3014
3928a8a2 3015 if (iter->ent)
e2ac8ef5 3016 trace_iterator_increment(iter);
dd0e545f 3017
3928a8a2 3018 return iter->ent ? iter : NULL;
b3806b43 3019}
bc0c38d1 3020
e309b41d 3021static void trace_consume(struct trace_iterator *iter)
b3806b43 3022{
12883efb 3023 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 3024 &iter->lost_events);
bc0c38d1
SR
3025}
3026
e309b41d 3027static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
3028{
3029 struct trace_iterator *iter = m->private;
bc0c38d1 3030 int i = (int)*pos;
4e3c3333 3031 void *ent;
bc0c38d1 3032
a63ce5b3
SR
3033 WARN_ON_ONCE(iter->leftover);
3034
bc0c38d1
SR
3035 (*pos)++;
3036
3037 /* can't go backwards */
3038 if (iter->idx > i)
3039 return NULL;
3040
3041 if (iter->idx < 0)
955b61e5 3042 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3043 else
3044 ent = iter;
3045
3046 while (ent && iter->idx < i)
955b61e5 3047 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
3048
3049 iter->pos = *pos;
3050
bc0c38d1
SR
3051 return ent;
3052}
3053
955b61e5 3054void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 3055{
2f26ebd5
SR
3056 struct ring_buffer_event *event;
3057 struct ring_buffer_iter *buf_iter;
3058 unsigned long entries = 0;
3059 u64 ts;
3060
12883efb 3061 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 3062
6d158a81
SR
3063 buf_iter = trace_buffer_iter(iter, cpu);
3064 if (!buf_iter)
2f26ebd5
SR
3065 return;
3066
2f26ebd5
SR
3067 ring_buffer_iter_reset(buf_iter);
3068
3069 /*
3070 * We could have the case with the max latency tracers
3071 * that a reset never took place on a cpu. This is evident
3072 * by the timestamp being before the start of the buffer.
3073 */
3074 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 3075 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
3076 break;
3077 entries++;
3078 ring_buffer_read(buf_iter, NULL);
3079 }
3080
12883efb 3081 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
3082}
3083
d7350c3f 3084/*
d7350c3f
FW
3085 * The current tracer is copied to avoid a global locking
3086 * all around.
3087 */
bc0c38d1
SR
3088static void *s_start(struct seq_file *m, loff_t *pos)
3089{
3090 struct trace_iterator *iter = m->private;
2b6080f2 3091 struct trace_array *tr = iter->tr;
b04cc6b1 3092 int cpu_file = iter->cpu_file;
bc0c38d1
SR
3093 void *p = NULL;
3094 loff_t l = 0;
3928a8a2 3095 int cpu;
bc0c38d1 3096
2fd196ec
HT
3097 /*
3098 * copy the tracer to avoid using a global lock all around.
3099 * iter->trace is a copy of current_trace, the pointer to the
3100 * name may be used instead of a strcmp(), as iter->trace->name
3101 * will point to the same string as current_trace->name.
3102 */
bc0c38d1 3103 mutex_lock(&trace_types_lock);
2b6080f2
SR
3104 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3105 *iter->trace = *tr->current_trace;
d7350c3f 3106 mutex_unlock(&trace_types_lock);
bc0c38d1 3107
12883efb 3108#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3109 if (iter->snapshot && iter->trace->use_max_tr)
3110 return ERR_PTR(-EBUSY);
12883efb 3111#endif
debdd57f
HT
3112
3113 if (!iter->snapshot)
3114 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 3115
bc0c38d1
SR
3116 if (*pos != iter->pos) {
3117 iter->ent = NULL;
3118 iter->cpu = 0;
3119 iter->idx = -1;
3120
ae3b5093 3121 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3122 for_each_tracing_cpu(cpu)
2f26ebd5 3123 tracing_iter_reset(iter, cpu);
b04cc6b1 3124 } else
2f26ebd5 3125 tracing_iter_reset(iter, cpu_file);
bc0c38d1 3126
ac91d854 3127 iter->leftover = 0;
bc0c38d1
SR
3128 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3129 ;
3130
3131 } else {
a63ce5b3
SR
3132 /*
3133 * If we overflowed the seq_file before, then we want
3134 * to just reuse the trace_seq buffer again.
3135 */
3136 if (iter->leftover)
3137 p = iter;
3138 else {
3139 l = *pos - 1;
3140 p = s_next(m, p, &l);
3141 }
bc0c38d1
SR
3142 }
3143
4f535968 3144 trace_event_read_lock();
7e53bd42 3145 trace_access_lock(cpu_file);
bc0c38d1
SR
3146 return p;
3147}
3148
3149static void s_stop(struct seq_file *m, void *p)
3150{
7e53bd42
LJ
3151 struct trace_iterator *iter = m->private;
3152
12883efb 3153#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
3154 if (iter->snapshot && iter->trace->use_max_tr)
3155 return;
12883efb 3156#endif
debdd57f
HT
3157
3158 if (!iter->snapshot)
3159 atomic_dec(&trace_record_cmdline_disabled);
12883efb 3160
7e53bd42 3161 trace_access_unlock(iter->cpu_file);
4f535968 3162 trace_event_read_unlock();
bc0c38d1
SR
3163}
3164
39eaf7ef 3165static void
12883efb
SRRH
3166get_total_entries(struct trace_buffer *buf,
3167 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
3168{
3169 unsigned long count;
3170 int cpu;
3171
3172 *total = 0;
3173 *entries = 0;
3174
3175 for_each_tracing_cpu(cpu) {
12883efb 3176 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
3177 /*
3178 * If this buffer has skipped entries, then we hold all
3179 * entries for the trace and we need to ignore the
3180 * ones before the time stamp.
3181 */
12883efb
SRRH
3182 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3183 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
3184 /* total is the same as the entries */
3185 *total += count;
3186 } else
3187 *total += count +
12883efb 3188 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
3189 *entries += count;
3190 }
3191}
3192
e309b41d 3193static void print_lat_help_header(struct seq_file *m)
bc0c38d1 3194{
d79ac28f
RV
3195 seq_puts(m, "# _------=> CPU# \n"
3196 "# / _-----=> irqs-off \n"
3197 "# | / _----=> need-resched \n"
3198 "# || / _---=> hardirq/softirq \n"
3199 "# ||| / _--=> preempt-depth \n"
3200 "# |||| / delay \n"
3201 "# cmd pid ||||| time | caller \n"
3202 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
3203}
3204
12883efb 3205static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 3206{
39eaf7ef
SR
3207 unsigned long total;
3208 unsigned long entries;
3209
12883efb 3210 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
3211 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3212 entries, total, num_online_cpus());
3213 seq_puts(m, "#\n");
3214}
3215
12883efb 3216static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 3217{
12883efb 3218 print_event_info(buf, m);
d79ac28f
RV
3219 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
3220 "# | | | | |\n");
bc0c38d1
SR
3221}
3222
12883efb 3223static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 3224{
12883efb 3225 print_event_info(buf, m);
d79ac28f
RV
3226 seq_puts(m, "# _-----=> irqs-off\n"
3227 "# / _----=> need-resched\n"
3228 "# | / _---=> hardirq/softirq\n"
3229 "# || / _--=> preempt-depth\n"
3230 "# ||| / delay\n"
3231 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
3232 "# | | | |||| | |\n");
77271ce4 3233}
bc0c38d1 3234
62b915f1 3235void
bc0c38d1
SR
3236print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3237{
983f938a 3238 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
3239 struct trace_buffer *buf = iter->trace_buffer;
3240 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 3241 struct tracer *type = iter->trace;
39eaf7ef
SR
3242 unsigned long entries;
3243 unsigned long total;
bc0c38d1
SR
3244 const char *name = "preemption";
3245
d840f718 3246 name = type->name;
bc0c38d1 3247
12883efb 3248 get_total_entries(buf, &total, &entries);
bc0c38d1 3249
888b55dc 3250 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 3251 name, UTS_RELEASE);
888b55dc 3252 seq_puts(m, "# -----------------------------------"
bc0c38d1 3253 "---------------------------------\n");
888b55dc 3254 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 3255 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 3256 nsecs_to_usecs(data->saved_latency),
bc0c38d1 3257 entries,
4c11d7ae 3258 total,
12883efb 3259 buf->cpu,
bc0c38d1
SR
3260#if defined(CONFIG_PREEMPT_NONE)
3261 "server",
3262#elif defined(CONFIG_PREEMPT_VOLUNTARY)
3263 "desktop",
b5c21b45 3264#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
3265 "preempt",
3266#else
3267 "unknown",
3268#endif
3269 /* These are reserved for later use */
3270 0, 0, 0, 0);
3271#ifdef CONFIG_SMP
3272 seq_printf(m, " #P:%d)\n", num_online_cpus());
3273#else
3274 seq_puts(m, ")\n");
3275#endif
888b55dc
KM
3276 seq_puts(m, "# -----------------\n");
3277 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 3278 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
3279 data->comm, data->pid,
3280 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 3281 data->policy, data->rt_priority);
888b55dc 3282 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
3283
3284 if (data->critical_start) {
888b55dc 3285 seq_puts(m, "# => started at: ");
214023c3
SR
3286 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3287 trace_print_seq(m, &iter->seq);
888b55dc 3288 seq_puts(m, "\n# => ended at: ");
214023c3
SR
3289 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3290 trace_print_seq(m, &iter->seq);
8248ac05 3291 seq_puts(m, "\n#\n");
bc0c38d1
SR
3292 }
3293
888b55dc 3294 seq_puts(m, "#\n");
bc0c38d1
SR
3295}
3296
a309720c
SR
3297static void test_cpu_buff_start(struct trace_iterator *iter)
3298{
3299 struct trace_seq *s = &iter->seq;
983f938a 3300 struct trace_array *tr = iter->tr;
a309720c 3301
983f938a 3302 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
12ef7d44
SR
3303 return;
3304
3305 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3306 return;
3307
919cd979 3308 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
3309 return;
3310
12883efb 3311 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
3312 return;
3313
919cd979
SL
3314 if (iter->started)
3315 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
3316
3317 /* Don't print started cpu buffer for the first entry of the trace */
3318 if (iter->idx > 1)
3319 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3320 iter->cpu);
a309720c
SR
3321}
3322
2c4f035f 3323static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 3324{
983f938a 3325 struct trace_array *tr = iter->tr;
214023c3 3326 struct trace_seq *s = &iter->seq;
983f938a 3327 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 3328 struct trace_entry *entry;
f633cef0 3329 struct trace_event *event;
bc0c38d1 3330
4e3c3333 3331 entry = iter->ent;
dd0e545f 3332
a309720c
SR
3333 test_cpu_buff_start(iter);
3334
c4a8e8be 3335 event = ftrace_find_event(entry->type);
bc0c38d1 3336
983f938a 3337 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3338 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3339 trace_print_lat_context(iter);
3340 else
3341 trace_print_context(iter);
c4a8e8be 3342 }
bc0c38d1 3343
19a7fe20
SRRH
3344 if (trace_seq_has_overflowed(s))
3345 return TRACE_TYPE_PARTIAL_LINE;
3346
268ccda0 3347 if (event)
a9a57763 3348 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 3349
19a7fe20 3350 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 3351
19a7fe20 3352 return trace_handle_return(s);
bc0c38d1
SR
3353}
3354
2c4f035f 3355static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3 3356{
983f938a 3357 struct trace_array *tr = iter->tr;
f9896bf3
IM
3358 struct trace_seq *s = &iter->seq;
3359 struct trace_entry *entry;
f633cef0 3360 struct trace_event *event;
f9896bf3
IM
3361
3362 entry = iter->ent;
dd0e545f 3363
983f938a 3364 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
19a7fe20
SRRH
3365 trace_seq_printf(s, "%d %d %llu ",
3366 entry->pid, iter->cpu, iter->ts);
3367
3368 if (trace_seq_has_overflowed(s))
3369 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 3370
f633cef0 3371 event = ftrace_find_event(entry->type);
268ccda0 3372 if (event)
a9a57763 3373 return event->funcs->raw(iter, 0, event);
d9793bd8 3374
19a7fe20 3375 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 3376
19a7fe20 3377 return trace_handle_return(s);
f9896bf3
IM
3378}
3379
2c4f035f 3380static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec 3381{
983f938a 3382 struct trace_array *tr = iter->tr;
5e3ca0ec
IM
3383 struct trace_seq *s = &iter->seq;
3384 unsigned char newline = '\n';
3385 struct trace_entry *entry;
f633cef0 3386 struct trace_event *event;
5e3ca0ec
IM
3387
3388 entry = iter->ent;
dd0e545f 3389
983f938a 3390 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3391 SEQ_PUT_HEX_FIELD(s, entry->pid);
3392 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3393 SEQ_PUT_HEX_FIELD(s, iter->ts);
3394 if (trace_seq_has_overflowed(s))
3395 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3396 }
5e3ca0ec 3397
f633cef0 3398 event = ftrace_find_event(entry->type);
268ccda0 3399 if (event) {
a9a57763 3400 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
3401 if (ret != TRACE_TYPE_HANDLED)
3402 return ret;
3403 }
7104f300 3404
19a7fe20 3405 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 3406
19a7fe20 3407 return trace_handle_return(s);
5e3ca0ec
IM
3408}
3409
2c4f035f 3410static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa 3411{
983f938a 3412 struct trace_array *tr = iter->tr;
cb0f12aa
IM
3413 struct trace_seq *s = &iter->seq;
3414 struct trace_entry *entry;
f633cef0 3415 struct trace_event *event;
cb0f12aa
IM
3416
3417 entry = iter->ent;
dd0e545f 3418
983f938a 3419 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
3420 SEQ_PUT_FIELD(s, entry->pid);
3421 SEQ_PUT_FIELD(s, iter->cpu);
3422 SEQ_PUT_FIELD(s, iter->ts);
3423 if (trace_seq_has_overflowed(s))
3424 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 3425 }
cb0f12aa 3426
f633cef0 3427 event = ftrace_find_event(entry->type);
a9a57763
SR
3428 return event ? event->funcs->binary(iter, 0, event) :
3429 TRACE_TYPE_HANDLED;
cb0f12aa
IM
3430}
3431
62b915f1 3432int trace_empty(struct trace_iterator *iter)
bc0c38d1 3433{
6d158a81 3434 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
3435 int cpu;
3436
9aba60fe 3437 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 3438 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 3439 cpu = iter->cpu_file;
6d158a81
SR
3440 buf_iter = trace_buffer_iter(iter, cpu);
3441 if (buf_iter) {
3442 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
3443 return 0;
3444 } else {
12883efb 3445 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
3446 return 0;
3447 }
3448 return 1;
3449 }
3450
ab46428c 3451 for_each_tracing_cpu(cpu) {
6d158a81
SR
3452 buf_iter = trace_buffer_iter(iter, cpu);
3453 if (buf_iter) {
3454 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
3455 return 0;
3456 } else {
12883efb 3457 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
3458 return 0;
3459 }
bc0c38d1 3460 }
d769041f 3461
797d3712 3462 return 1;
bc0c38d1
SR
3463}
3464
4f535968 3465/* Called with trace_event_read_lock() held. */
955b61e5 3466enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 3467{
983f938a
SRRH
3468 struct trace_array *tr = iter->tr;
3469 unsigned long trace_flags = tr->trace_flags;
2c4f035f
FW
3470 enum print_line_t ret;
3471
19a7fe20
SRRH
3472 if (iter->lost_events) {
3473 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3474 iter->cpu, iter->lost_events);
3475 if (trace_seq_has_overflowed(&iter->seq))
3476 return TRACE_TYPE_PARTIAL_LINE;
3477 }
bc21b478 3478
2c4f035f
FW
3479 if (iter->trace && iter->trace->print_line) {
3480 ret = iter->trace->print_line(iter);
3481 if (ret != TRACE_TYPE_UNHANDLED)
3482 return ret;
3483 }
72829bc3 3484
09ae7234
SRRH
3485 if (iter->ent->type == TRACE_BPUTS &&
3486 trace_flags & TRACE_ITER_PRINTK &&
3487 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3488 return trace_print_bputs_msg_only(iter);
3489
48ead020
FW
3490 if (iter->ent->type == TRACE_BPRINT &&
3491 trace_flags & TRACE_ITER_PRINTK &&
3492 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3493 return trace_print_bprintk_msg_only(iter);
48ead020 3494
66896a85
FW
3495 if (iter->ent->type == TRACE_PRINT &&
3496 trace_flags & TRACE_ITER_PRINTK &&
3497 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 3498 return trace_print_printk_msg_only(iter);
66896a85 3499
cb0f12aa
IM
3500 if (trace_flags & TRACE_ITER_BIN)
3501 return print_bin_fmt(iter);
3502
5e3ca0ec
IM
3503 if (trace_flags & TRACE_ITER_HEX)
3504 return print_hex_fmt(iter);
3505
f9896bf3
IM
3506 if (trace_flags & TRACE_ITER_RAW)
3507 return print_raw_fmt(iter);
3508
f9896bf3
IM
3509 return print_trace_fmt(iter);
3510}
3511
7e9a49ef
JO
3512void trace_latency_header(struct seq_file *m)
3513{
3514 struct trace_iterator *iter = m->private;
983f938a 3515 struct trace_array *tr = iter->tr;
7e9a49ef
JO
3516
3517 /* print nothing if the buffers are empty */
3518 if (trace_empty(iter))
3519 return;
3520
3521 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3522 print_trace_header(m, iter);
3523
983f938a 3524 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
7e9a49ef
JO
3525 print_lat_help_header(m);
3526}
3527
62b915f1
JO
3528void trace_default_header(struct seq_file *m)
3529{
3530 struct trace_iterator *iter = m->private;
983f938a
SRRH
3531 struct trace_array *tr = iter->tr;
3532 unsigned long trace_flags = tr->trace_flags;
62b915f1 3533
f56e7f8e
JO
3534 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3535 return;
3536
62b915f1
JO
3537 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3538 /* print nothing if the buffers are empty */
3539 if (trace_empty(iter))
3540 return;
3541 print_trace_header(m, iter);
3542 if (!(trace_flags & TRACE_ITER_VERBOSE))
3543 print_lat_help_header(m);
3544 } else {
77271ce4
SR
3545 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3546 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 3547 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 3548 else
12883efb 3549 print_func_help_header(iter->trace_buffer, m);
77271ce4 3550 }
62b915f1
JO
3551 }
3552}
3553
e0a413f6
SR
3554static void test_ftrace_alive(struct seq_file *m)
3555{
3556 if (!ftrace_is_dead())
3557 return;
d79ac28f
RV
3558 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3559 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
3560}
3561
d8741e2e 3562#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 3563static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 3564{
d79ac28f
RV
3565 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3566 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3567 "# Takes a snapshot of the main buffer.\n"
3568 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3569 "# (Doesn't have to be '2' works with any number that\n"
3570 "# is not a '0' or '1')\n");
d8741e2e 3571}
f1affcaa
SRRH
3572
3573static void show_snapshot_percpu_help(struct seq_file *m)
3574{
fa6f0cc7 3575 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 3576#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
3577 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3578 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 3579#else
d79ac28f
RV
3580 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3581 "# Must use main snapshot file to allocate.\n");
f1affcaa 3582#endif
d79ac28f
RV
3583 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3584 "# (Doesn't have to be '2' works with any number that\n"
3585 "# is not a '0' or '1')\n");
f1affcaa
SRRH
3586}
3587
d8741e2e
SRRH
3588static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3589{
45ad21ca 3590 if (iter->tr->allocated_snapshot)
fa6f0cc7 3591 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 3592 else
fa6f0cc7 3593 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 3594
fa6f0cc7 3595 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
3596 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3597 show_snapshot_main_help(m);
3598 else
3599 show_snapshot_percpu_help(m);
d8741e2e
SRRH
3600}
3601#else
3602/* Should never be called */
3603static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3604#endif
3605
bc0c38d1
SR
3606static int s_show(struct seq_file *m, void *v)
3607{
3608 struct trace_iterator *iter = v;
a63ce5b3 3609 int ret;
bc0c38d1
SR
3610
3611 if (iter->ent == NULL) {
3612 if (iter->tr) {
3613 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3614 seq_puts(m, "#\n");
e0a413f6 3615 test_ftrace_alive(m);
bc0c38d1 3616 }
d8741e2e
SRRH
3617 if (iter->snapshot && trace_empty(iter))
3618 print_snapshot_help(m, iter);
3619 else if (iter->trace && iter->trace->print_header)
8bba1bf5 3620 iter->trace->print_header(m);
62b915f1
JO
3621 else
3622 trace_default_header(m);
3623
a63ce5b3
SR
3624 } else if (iter->leftover) {
3625 /*
3626 * If we filled the seq_file buffer earlier, we
3627 * want to just show it now.
3628 */
3629 ret = trace_print_seq(m, &iter->seq);
3630
3631 /* ret should this time be zero, but you never know */
3632 iter->leftover = ret;
3633
bc0c38d1 3634 } else {
f9896bf3 3635 print_trace_line(iter);
a63ce5b3
SR
3636 ret = trace_print_seq(m, &iter->seq);
3637 /*
3638 * If we overflow the seq_file buffer, then it will
3639 * ask us for this data again at start up.
3640 * Use that instead.
3641 * ret is 0 if seq_file write succeeded.
3642 * -1 otherwise.
3643 */
3644 iter->leftover = ret;
bc0c38d1
SR
3645 }
3646
3647 return 0;
3648}
3649
649e9c70
ON
3650/*
3651 * Should be used after trace_array_get(), trace_types_lock
3652 * ensures that i_cdev was already initialized.
3653 */
3654static inline int tracing_get_cpu(struct inode *inode)
3655{
3656 if (inode->i_cdev) /* See trace_create_cpu_file() */
3657 return (long)inode->i_cdev - 1;
3658 return RING_BUFFER_ALL_CPUS;
3659}
3660
88e9d34c 3661static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3662 .start = s_start,
3663 .next = s_next,
3664 .stop = s_stop,
3665 .show = s_show,
bc0c38d1
SR
3666};
3667
e309b41d 3668static struct trace_iterator *
6484c71c 3669__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3670{
6484c71c 3671 struct trace_array *tr = inode->i_private;
bc0c38d1 3672 struct trace_iterator *iter;
50e18b94 3673 int cpu;
bc0c38d1 3674
85a2f9b4
SR
3675 if (tracing_disabled)
3676 return ERR_PTR(-ENODEV);
60a11774 3677
50e18b94 3678 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3679 if (!iter)
3680 return ERR_PTR(-ENOMEM);
bc0c38d1 3681
72917235 3682 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
6d158a81 3683 GFP_KERNEL);
93574fcc
DC
3684 if (!iter->buffer_iter)
3685 goto release;
3686
d7350c3f
FW
3687 /*
3688 * We make a copy of the current tracer to avoid concurrent
3689 * changes on it while we are reading.
3690 */
bc0c38d1 3691 mutex_lock(&trace_types_lock);
d7350c3f 3692 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3693 if (!iter->trace)
d7350c3f 3694 goto fail;
85a2f9b4 3695
2b6080f2 3696 *iter->trace = *tr->current_trace;
d7350c3f 3697
79f55997 3698 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3699 goto fail;
3700
12883efb
SRRH
3701 iter->tr = tr;
3702
3703#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3704 /* Currently only the top directory has a snapshot */
3705 if (tr->current_trace->print_max || snapshot)
12883efb 3706 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3707 else
12883efb
SRRH
3708#endif
3709 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3710 iter->snapshot = snapshot;
bc0c38d1 3711 iter->pos = -1;
6484c71c 3712 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3713 mutex_init(&iter->mutex);
bc0c38d1 3714
8bba1bf5
MM
3715 /* Notify the tracer early; before we stop tracing. */
3716 if (iter->trace && iter->trace->open)
a93751ca 3717 iter->trace->open(iter);
8bba1bf5 3718
12ef7d44 3719 /* Annotate start of buffers if we had overruns */
12883efb 3720 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3721 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3722
8be0709f 3723 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3724 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3725 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3726
debdd57f
HT
3727 /* stop the trace while dumping if we are not opening "snapshot" */
3728 if (!iter->snapshot)
2b6080f2 3729 tracing_stop_tr(tr);
2f26ebd5 3730
ae3b5093 3731 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3732 for_each_tracing_cpu(cpu) {
b04cc6b1 3733 iter->buffer_iter[cpu] =
12883efb 3734 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3735 }
3736 ring_buffer_read_prepare_sync();
3737 for_each_tracing_cpu(cpu) {
3738 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3739 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3740 }
3741 } else {
3742 cpu = iter->cpu_file;
3928a8a2 3743 iter->buffer_iter[cpu] =
12883efb 3744 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3745 ring_buffer_read_prepare_sync();
3746 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3747 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3748 }
3749
bc0c38d1
SR
3750 mutex_unlock(&trace_types_lock);
3751
bc0c38d1 3752 return iter;
3928a8a2 3753
d7350c3f 3754 fail:
3928a8a2 3755 mutex_unlock(&trace_types_lock);
d7350c3f 3756 kfree(iter->trace);
6d158a81 3757 kfree(iter->buffer_iter);
93574fcc 3758release:
50e18b94
JO
3759 seq_release_private(inode, file);
3760 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3761}
3762
3763int tracing_open_generic(struct inode *inode, struct file *filp)
3764{
60a11774
SR
3765 if (tracing_disabled)
3766 return -ENODEV;
3767
bc0c38d1
SR
3768 filp->private_data = inode->i_private;
3769 return 0;
3770}
3771
2e86421d
GB
3772bool tracing_is_disabled(void)
3773{
3774 return (tracing_disabled) ? true: false;
3775}
3776
7b85af63
SRRH
3777/*
3778 * Open and update trace_array ref count.
3779 * Must have the current trace_array passed to it.
3780 */
dcc30223 3781static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3782{
3783 struct trace_array *tr = inode->i_private;
3784
3785 if (tracing_disabled)
3786 return -ENODEV;
3787
3788 if (trace_array_get(tr) < 0)
3789 return -ENODEV;
3790
3791 filp->private_data = inode->i_private;
3792
3793 return 0;
7b85af63
SRRH
3794}
3795
4fd27358 3796static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3797{
6484c71c 3798 struct trace_array *tr = inode->i_private;
907f2784 3799 struct seq_file *m = file->private_data;
4acd4d00 3800 struct trace_iterator *iter;
3928a8a2 3801 int cpu;
bc0c38d1 3802
ff451961 3803 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3804 trace_array_put(tr);
4acd4d00 3805 return 0;
ff451961 3806 }
4acd4d00 3807
6484c71c 3808 /* Writes do not use seq_file */
4acd4d00 3809 iter = m->private;
bc0c38d1 3810 mutex_lock(&trace_types_lock);
a695cb58 3811
3928a8a2
SR
3812 for_each_tracing_cpu(cpu) {
3813 if (iter->buffer_iter[cpu])
3814 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3815 }
3816
bc0c38d1
SR
3817 if (iter->trace && iter->trace->close)
3818 iter->trace->close(iter);
3819
debdd57f
HT
3820 if (!iter->snapshot)
3821 /* reenable tracing if it was previously enabled */
2b6080f2 3822 tracing_start_tr(tr);
f77d09a3
AL
3823
3824 __trace_array_put(tr);
3825
bc0c38d1
SR
3826 mutex_unlock(&trace_types_lock);
3827
d7350c3f 3828 mutex_destroy(&iter->mutex);
b0dfa978 3829 free_cpumask_var(iter->started);
d7350c3f 3830 kfree(iter->trace);
6d158a81 3831 kfree(iter->buffer_iter);
50e18b94 3832 seq_release_private(inode, file);
ff451961 3833
bc0c38d1
SR
3834 return 0;
3835}
3836
7b85af63
SRRH
3837static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3838{
3839 struct trace_array *tr = inode->i_private;
3840
3841 trace_array_put(tr);
bc0c38d1
SR
3842 return 0;
3843}
3844
7b85af63
SRRH
3845static int tracing_single_release_tr(struct inode *inode, struct file *file)
3846{
3847 struct trace_array *tr = inode->i_private;
3848
3849 trace_array_put(tr);
3850
3851 return single_release(inode, file);
3852}
3853
bc0c38d1
SR
3854static int tracing_open(struct inode *inode, struct file *file)
3855{
6484c71c 3856 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3857 struct trace_iterator *iter;
3858 int ret = 0;
bc0c38d1 3859
ff451961
SRRH
3860 if (trace_array_get(tr) < 0)
3861 return -ENODEV;
3862
4acd4d00 3863 /* If this file was open for write, then erase contents */
6484c71c
ON
3864 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3865 int cpu = tracing_get_cpu(inode);
3866
3867 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3868 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3869 else
6484c71c 3870 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3871 }
bc0c38d1 3872
4acd4d00 3873 if (file->f_mode & FMODE_READ) {
6484c71c 3874 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3875 if (IS_ERR(iter))
3876 ret = PTR_ERR(iter);
983f938a 3877 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4acd4d00
SR
3878 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3879 }
ff451961
SRRH
3880
3881 if (ret < 0)
3882 trace_array_put(tr);
3883
bc0c38d1
SR
3884 return ret;
3885}
3886
607e2ea1
SRRH
3887/*
3888 * Some tracers are not suitable for instance buffers.
3889 * A tracer is always available for the global array (toplevel)
3890 * or if it explicitly states that it is.
3891 */
3892static bool
3893trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3894{
3895 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3896}
3897
3898/* Find the next tracer that this trace array may use */
3899static struct tracer *
3900get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3901{
3902 while (t && !trace_ok_for_array(t, tr))
3903 t = t->next;
3904
3905 return t;
3906}
3907
e309b41d 3908static void *
bc0c38d1
SR
3909t_next(struct seq_file *m, void *v, loff_t *pos)
3910{
607e2ea1 3911 struct trace_array *tr = m->private;
f129e965 3912 struct tracer *t = v;
bc0c38d1
SR
3913
3914 (*pos)++;
3915
3916 if (t)
607e2ea1 3917 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3918
bc0c38d1
SR
3919 return t;
3920}
3921
3922static void *t_start(struct seq_file *m, loff_t *pos)
3923{
607e2ea1 3924 struct trace_array *tr = m->private;
f129e965 3925 struct tracer *t;
bc0c38d1
SR
3926 loff_t l = 0;
3927
3928 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3929
3930 t = get_tracer_for_array(tr, trace_types);
3931 for (; t && l < *pos; t = t_next(m, t, &l))
3932 ;
bc0c38d1
SR
3933
3934 return t;
3935}
3936
3937static void t_stop(struct seq_file *m, void *p)
3938{
3939 mutex_unlock(&trace_types_lock);
3940}
3941
3942static int t_show(struct seq_file *m, void *v)
3943{
3944 struct tracer *t = v;
3945
3946 if (!t)
3947 return 0;
3948
fa6f0cc7 3949 seq_puts(m, t->name);
bc0c38d1
SR
3950 if (t->next)
3951 seq_putc(m, ' ');
3952 else
3953 seq_putc(m, '\n');
3954
3955 return 0;
3956}
3957
88e9d34c 3958static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3959 .start = t_start,
3960 .next = t_next,
3961 .stop = t_stop,
3962 .show = t_show,
bc0c38d1
SR
3963};
3964
3965static int show_traces_open(struct inode *inode, struct file *file)
3966{
607e2ea1
SRRH
3967 struct trace_array *tr = inode->i_private;
3968 struct seq_file *m;
3969 int ret;
3970
60a11774
SR
3971 if (tracing_disabled)
3972 return -ENODEV;
3973
607e2ea1
SRRH
3974 ret = seq_open(file, &show_traces_seq_ops);
3975 if (ret)
3976 return ret;
3977
3978 m = file->private_data;
3979 m->private = tr;
3980
3981 return 0;
bc0c38d1
SR
3982}
3983
4acd4d00
SR
3984static ssize_t
3985tracing_write_stub(struct file *filp, const char __user *ubuf,
3986 size_t count, loff_t *ppos)
3987{
3988 return count;
3989}
3990
098c879e 3991loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3992{
098c879e
SRRH
3993 int ret;
3994
364829b1 3995 if (file->f_mode & FMODE_READ)
098c879e 3996 ret = seq_lseek(file, offset, whence);
364829b1 3997 else
098c879e
SRRH
3998 file->f_pos = ret = 0;
3999
4000 return ret;
364829b1
SP
4001}
4002
5e2336a0 4003static const struct file_operations tracing_fops = {
4bf39a94
IM
4004 .open = tracing_open,
4005 .read = seq_read,
4acd4d00 4006 .write = tracing_write_stub,
098c879e 4007 .llseek = tracing_lseek,
4bf39a94 4008 .release = tracing_release,
bc0c38d1
SR
4009};
4010
5e2336a0 4011static const struct file_operations show_traces_fops = {
c7078de1
IM
4012 .open = show_traces_open,
4013 .read = seq_read,
4014 .release = seq_release,
b444786f 4015 .llseek = seq_lseek,
c7078de1
IM
4016};
4017
36dfe925
IM
4018/*
4019 * The tracer itself will not take this lock, but still we want
4020 * to provide a consistent cpumask to user-space:
4021 */
4022static DEFINE_MUTEX(tracing_cpumask_update_lock);
4023
4024/*
4025 * Temporary storage for the character representation of the
4026 * CPU bitmask (and one more byte for the newline):
4027 */
4028static char mask_str[NR_CPUS + 1];
4029
c7078de1
IM
4030static ssize_t
4031tracing_cpumask_read(struct file *filp, char __user *ubuf,
4032 size_t count, loff_t *ppos)
4033{
ccfe9e42 4034 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 4035 int len;
c7078de1
IM
4036
4037 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 4038
1a40243b
TH
4039 len = snprintf(mask_str, count, "%*pb\n",
4040 cpumask_pr_args(tr->tracing_cpumask));
4041 if (len >= count) {
36dfe925
IM
4042 count = -EINVAL;
4043 goto out_err;
4044 }
36dfe925
IM
4045 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
4046
4047out_err:
c7078de1
IM
4048 mutex_unlock(&tracing_cpumask_update_lock);
4049
4050 return count;
4051}
4052
4053static ssize_t
4054tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4055 size_t count, loff_t *ppos)
4056{
ccfe9e42 4057 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 4058 cpumask_var_t tracing_cpumask_new;
2b6080f2 4059 int err, cpu;
9e01c1b7
RR
4060
4061 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4062 return -ENOMEM;
c7078de1 4063
9e01c1b7 4064 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 4065 if (err)
36dfe925
IM
4066 goto err_unlock;
4067
215368e8
LZ
4068 mutex_lock(&tracing_cpumask_update_lock);
4069
a5e25883 4070 local_irq_disable();
0b9b12c1 4071 arch_spin_lock(&tr->max_lock);
ab46428c 4072 for_each_tracing_cpu(cpu) {
36dfe925
IM
4073 /*
4074 * Increase/decrease the disabled counter if we are
4075 * about to flip a bit in the cpumask:
4076 */
ccfe9e42 4077 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4078 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4079 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4080 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 4081 }
ccfe9e42 4082 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 4083 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
4084 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4085 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
4086 }
4087 }
0b9b12c1 4088 arch_spin_unlock(&tr->max_lock);
a5e25883 4089 local_irq_enable();
36dfe925 4090
ccfe9e42 4091 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
4092
4093 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 4094 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
4095
4096 return count;
36dfe925
IM
4097
4098err_unlock:
215368e8 4099 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
4100
4101 return err;
c7078de1
IM
4102}
4103
5e2336a0 4104static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 4105 .open = tracing_open_generic_tr,
c7078de1
IM
4106 .read = tracing_cpumask_read,
4107 .write = tracing_cpumask_write,
ccfe9e42 4108 .release = tracing_release_generic_tr,
b444786f 4109 .llseek = generic_file_llseek,
bc0c38d1
SR
4110};
4111
fdb372ed 4112static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 4113{
d8e83d26 4114 struct tracer_opt *trace_opts;
2b6080f2 4115 struct trace_array *tr = m->private;
d8e83d26 4116 u32 tracer_flags;
d8e83d26 4117 int i;
adf9f195 4118
d8e83d26 4119 mutex_lock(&trace_types_lock);
2b6080f2
SR
4120 tracer_flags = tr->current_trace->flags->val;
4121 trace_opts = tr->current_trace->flags->opts;
d8e83d26 4122
bc0c38d1 4123 for (i = 0; trace_options[i]; i++) {
983f938a 4124 if (tr->trace_flags & (1 << i))
fdb372ed 4125 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 4126 else
fdb372ed 4127 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
4128 }
4129
adf9f195
FW
4130 for (i = 0; trace_opts[i].name; i++) {
4131 if (tracer_flags & trace_opts[i].bit)
fdb372ed 4132 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 4133 else
fdb372ed 4134 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 4135 }
d8e83d26 4136 mutex_unlock(&trace_types_lock);
adf9f195 4137
fdb372ed 4138 return 0;
bc0c38d1 4139}
bc0c38d1 4140
8c1a49ae 4141static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
4142 struct tracer_flags *tracer_flags,
4143 struct tracer_opt *opts, int neg)
4144{
d39cdd20 4145 struct tracer *trace = tracer_flags->trace;
8d18eaaf 4146 int ret;
bc0c38d1 4147
8c1a49ae 4148 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
4149 if (ret)
4150 return ret;
4151
4152 if (neg)
4153 tracer_flags->val &= ~opts->bit;
4154 else
4155 tracer_flags->val |= opts->bit;
4156 return 0;
bc0c38d1
SR
4157}
4158
adf9f195 4159/* Try to assign a tracer specific option */
8c1a49ae 4160static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 4161{
8c1a49ae 4162 struct tracer *trace = tr->current_trace;
7770841e 4163 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 4164 struct tracer_opt *opts = NULL;
8d18eaaf 4165 int i;
adf9f195 4166
7770841e
Z
4167 for (i = 0; tracer_flags->opts[i].name; i++) {
4168 opts = &tracer_flags->opts[i];
adf9f195 4169
8d18eaaf 4170 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 4171 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 4172 }
adf9f195 4173
8d18eaaf 4174 return -EINVAL;
adf9f195
FW
4175}
4176
613f04a0
SRRH
4177/* Some tracers require overwrite to stay enabled */
4178int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4179{
4180 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4181 return -1;
4182
4183 return 0;
4184}
4185
2b6080f2 4186int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
4187{
4188 /* do nothing if flag is already set */
983f938a 4189 if (!!(tr->trace_flags & mask) == !!enabled)
613f04a0
SRRH
4190 return 0;
4191
4192 /* Give the tracer a chance to approve the change */
2b6080f2 4193 if (tr->current_trace->flag_changed)
bf6065b5 4194 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 4195 return -EINVAL;
af4617bd
SR
4196
4197 if (enabled)
983f938a 4198 tr->trace_flags |= mask;
af4617bd 4199 else
983f938a 4200 tr->trace_flags &= ~mask;
e870e9a1
LZ
4201
4202 if (mask == TRACE_ITER_RECORD_CMD)
4203 trace_event_enable_cmd_record(enabled);
750912fa 4204
c37775d5
SR
4205 if (mask == TRACE_ITER_EVENT_FORK)
4206 trace_event_follow_fork(tr, enabled);
4207
1e10486f
NK
4208 if (mask == TRACE_ITER_FUNC_FORK)
4209 ftrace_pid_follow_fork(tr, enabled);
4210
80902822 4211 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 4212 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 4213#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 4214 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
4215#endif
4216 }
81698831 4217
b9f9108c 4218 if (mask == TRACE_ITER_PRINTK) {
81698831 4219 trace_printk_start_stop_comm(enabled);
b9f9108c
SRRH
4220 trace_printk_control(enabled);
4221 }
613f04a0
SRRH
4222
4223 return 0;
af4617bd
SR
4224}
4225
2b6080f2 4226static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 4227{
8d18eaaf 4228 char *cmp;
bc0c38d1 4229 int neg = 0;
613f04a0 4230 int ret = -ENODEV;
bc0c38d1 4231 int i;
a4d1e688 4232 size_t orig_len = strlen(option);
bc0c38d1 4233
7bcfaf54 4234 cmp = strstrip(option);
bc0c38d1 4235
8d18eaaf 4236 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
4237 neg = 1;
4238 cmp += 2;
4239 }
4240
69d34da2
SRRH
4241 mutex_lock(&trace_types_lock);
4242
bc0c38d1 4243 for (i = 0; trace_options[i]; i++) {
8d18eaaf 4244 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 4245 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
4246 break;
4247 }
4248 }
adf9f195
FW
4249
4250 /* If no option could be set, test the specific tracer options */
69d34da2 4251 if (!trace_options[i])
8c1a49ae 4252 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
4253
4254 mutex_unlock(&trace_types_lock);
bc0c38d1 4255
a4d1e688
JW
4256 /*
4257 * If the first trailing whitespace is replaced with '\0' by strstrip,
4258 * turn it back into a space.
4259 */
4260 if (orig_len > strlen(option))
4261 option[strlen(option)] = ' ';
4262
7bcfaf54
SR
4263 return ret;
4264}
4265
a4d1e688
JW
4266static void __init apply_trace_boot_options(void)
4267{
4268 char *buf = trace_boot_options_buf;
4269 char *option;
4270
4271 while (true) {
4272 option = strsep(&buf, ",");
4273
4274 if (!option)
4275 break;
a4d1e688 4276
43ed3843
SRRH
4277 if (*option)
4278 trace_set_options(&global_trace, option);
a4d1e688
JW
4279
4280 /* Put back the comma to allow this to be called again */
4281 if (buf)
4282 *(buf - 1) = ',';
4283 }
4284}
4285
7bcfaf54
SR
4286static ssize_t
4287tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4288 size_t cnt, loff_t *ppos)
4289{
2b6080f2
SR
4290 struct seq_file *m = filp->private_data;
4291 struct trace_array *tr = m->private;
7bcfaf54 4292 char buf[64];
613f04a0 4293 int ret;
7bcfaf54
SR
4294
4295 if (cnt >= sizeof(buf))
4296 return -EINVAL;
4297
4afe6495 4298 if (copy_from_user(buf, ubuf, cnt))
7bcfaf54
SR
4299 return -EFAULT;
4300
a8dd2176
SR
4301 buf[cnt] = 0;
4302
2b6080f2 4303 ret = trace_set_options(tr, buf);
613f04a0
SRRH
4304 if (ret < 0)
4305 return ret;
7bcfaf54 4306
cf8517cf 4307 *ppos += cnt;
bc0c38d1
SR
4308
4309 return cnt;
4310}
4311
fdb372ed
LZ
4312static int tracing_trace_options_open(struct inode *inode, struct file *file)
4313{
7b85af63 4314 struct trace_array *tr = inode->i_private;
f77d09a3 4315 int ret;
7b85af63 4316
fdb372ed
LZ
4317 if (tracing_disabled)
4318 return -ENODEV;
2b6080f2 4319
7b85af63
SRRH
4320 if (trace_array_get(tr) < 0)
4321 return -ENODEV;
4322
f77d09a3
AL
4323 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4324 if (ret < 0)
4325 trace_array_put(tr);
4326
4327 return ret;
fdb372ed
LZ
4328}
4329
5e2336a0 4330static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
4331 .open = tracing_trace_options_open,
4332 .read = seq_read,
4333 .llseek = seq_lseek,
7b85af63 4334 .release = tracing_single_release_tr,
ee6bce52 4335 .write = tracing_trace_options_write,
bc0c38d1
SR
4336};
4337
7bd2f24c
IM
4338static const char readme_msg[] =
4339 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
4340 "# echo 0 > tracing_on : quick way to disable tracing\n"
4341 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4342 " Important files:\n"
4343 " trace\t\t\t- The static contents of the buffer\n"
4344 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4345 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4346 " current_tracer\t- function and latency tracers\n"
4347 " available_tracers\t- list of configured tracers for current_tracer\n"
4348 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4349 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4350 " trace_clock\t\t-change the clock used to order events\n"
4351 " local: Per cpu clock but may not be synced across CPUs\n"
4352 " global: Synced across CPUs but slows tracing down.\n"
4353 " counter: Not a clock, but just an increment\n"
4354 " uptime: Jiffy counter from time of boot\n"
4355 " perf: Same clock that perf events use\n"
4356#ifdef CONFIG_X86_64
4357 " x86-tsc: TSC cycle counter\n"
4358#endif
4359 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
fa32e855 4360 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
22f45649
SRRH
4361 " tracing_cpumask\t- Limit which CPUs to trace\n"
4362 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4363 "\t\t\t Remove sub-buffer with rmdir\n"
4364 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
4365 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4366 "\t\t\t option name\n"
939c7a4f 4367 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
4368#ifdef CONFIG_DYNAMIC_FTRACE
4369 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
4370 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4371 "\t\t\t functions\n"
60f1d5e3 4372 "\t accepts: func_full_name or glob-matching-pattern\n"
71485c45
SRRH
4373 "\t modules: Can select a group via module\n"
4374 "\t Format: :mod:<module-name>\n"
4375 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4376 "\t triggers: a command to perform when function is hit\n"
4377 "\t Format: <function>:<trigger>[:count]\n"
4378 "\t trigger: traceon, traceoff\n"
4379 "\t\t enable_event:<system>:<event>\n"
4380 "\t\t disable_event:<system>:<event>\n"
22f45649 4381#ifdef CONFIG_STACKTRACE
71485c45 4382 "\t\t stacktrace\n"
22f45649
SRRH
4383#endif
4384#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4385 "\t\t snapshot\n"
22f45649 4386#endif
17a280ea
SRRH
4387 "\t\t dump\n"
4388 "\t\t cpudump\n"
71485c45
SRRH
4389 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4390 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4391 "\t The first one will disable tracing every time do_fault is hit\n"
4392 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4393 "\t The first time do trap is hit and it disables tracing, the\n"
4394 "\t counter will decrement to 2. If tracing is already disabled,\n"
4395 "\t the counter will not decrement. It only decrements when the\n"
4396 "\t trigger did work\n"
4397 "\t To remove trigger without count:\n"
4398 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4399 "\t To remove trigger with a count:\n"
4400 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 4401 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
4402 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4403 "\t modules: Can select a group via module command :mod:\n"
4404 "\t Does not accept triggers\n"
22f45649
SRRH
4405#endif /* CONFIG_DYNAMIC_FTRACE */
4406#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
4407 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4408 "\t\t (function)\n"
22f45649
SRRH
4409#endif
4410#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4411 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 4412 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
4413 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4414#endif
4415#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
4416 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4417 "\t\t\t snapshot buffer. Read the contents for more\n"
4418 "\t\t\t information\n"
22f45649 4419#endif
991821c8 4420#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
4421 " stack_trace\t\t- Shows the max stack trace when active\n"
4422 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
4423 "\t\t\t Write into this file to reset the max size (trigger a\n"
4424 "\t\t\t new trace)\n"
22f45649 4425#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
4426 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4427 "\t\t\t traces\n"
22f45649 4428#endif
991821c8 4429#endif /* CONFIG_STACK_TRACER */
6b0b7551 4430#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4431 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4432 "\t\t\t Write into this file to define/undefine new trace events.\n"
4433#endif
6b0b7551 4434#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4435 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4436 "\t\t\t Write into this file to define/undefine new trace events.\n"
4437#endif
6b0b7551 4438#if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
86425625
MH
4439 "\t accepts: event-definitions (one definition per line)\n"
4440 "\t Format: p|r[:[<group>/]<event>] <place> [<args>]\n"
4441 "\t -:[<group>/]<event>\n"
6b0b7551 4442#ifdef CONFIG_KPROBE_EVENTS
86425625
MH
4443 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4444#endif
6b0b7551 4445#ifdef CONFIG_UPROBE_EVENTS
86425625
MH
4446 "\t place: <path>:<offset>\n"
4447#endif
4448 "\t args: <name>=fetcharg[:type]\n"
4449 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4450 "\t $stack<index>, $stack, $retval, $comm\n"
4451 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string,\n"
4452 "\t b<bit-width>@<bit-offset>/<container-size>\n"
4453#endif
26f25564
TZ
4454 " events/\t\t- Directory containing all trace event subsystems:\n"
4455 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4456 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
4457 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4458 "\t\t\t events\n"
26f25564 4459 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
4460 " events/<system>/<event>/\t- Directory containing control files for\n"
4461 "\t\t\t <event>:\n"
26f25564
TZ
4462 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4463 " filter\t\t- If set, only events passing filter are traced\n"
4464 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
4465 "\t Format: <trigger>[:count][if <filter>]\n"
4466 "\t trigger: traceon, traceoff\n"
4467 "\t enable_event:<system>:<event>\n"
4468 "\t disable_event:<system>:<event>\n"
d0bad49b
TZ
4469#ifdef CONFIG_HIST_TRIGGERS
4470 "\t enable_hist:<system>:<event>\n"
4471 "\t disable_hist:<system>:<event>\n"
4472#endif
26f25564 4473#ifdef CONFIG_STACKTRACE
71485c45 4474 "\t\t stacktrace\n"
26f25564
TZ
4475#endif
4476#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 4477 "\t\t snapshot\n"
7ef224d1
TZ
4478#endif
4479#ifdef CONFIG_HIST_TRIGGERS
4480 "\t\t hist (see below)\n"
26f25564 4481#endif
71485c45
SRRH
4482 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4483 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4484 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4485 "\t events/block/block_unplug/trigger\n"
4486 "\t The first disables tracing every time block_unplug is hit.\n"
4487 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4488 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4489 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4490 "\t Like function triggers, the counter is only decremented if it\n"
4491 "\t enabled or disabled tracing.\n"
4492 "\t To remove a trigger without a count:\n"
4493 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4494 "\t To remove a trigger with a count:\n"
4495 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4496 "\t Filters can be ignored when removing a trigger.\n"
7ef224d1
TZ
4497#ifdef CONFIG_HIST_TRIGGERS
4498 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
76a3b0c8 4499 "\t Format: hist:keys=<field1[,field2,...]>\n"
f2606835 4500 "\t [:values=<field1[,field2,...]>]\n"
e62347d2 4501 "\t [:sort=<field1[,field2,...]>]\n"
7ef224d1 4502 "\t [:size=#entries]\n"
e86ae9ba 4503 "\t [:pause][:continue][:clear]\n"
5463bfda 4504 "\t [:name=histname1]\n"
7ef224d1
TZ
4505 "\t [if <filter>]\n\n"
4506 "\t When a matching event is hit, an entry is added to a hash\n"
f2606835
TZ
4507 "\t table using the key(s) and value(s) named, and the value of a\n"
4508 "\t sum called 'hitcount' is incremented. Keys and values\n"
4509 "\t correspond to fields in the event's format description. Keys\n"
69a0200c
TZ
4510 "\t can be any field, or the special string 'stacktrace'.\n"
4511 "\t Compound keys consisting of up to two fields can be specified\n"
4512 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4513 "\t fields. Sort keys consisting of up to two fields can be\n"
4514 "\t specified using the 'sort' keyword. The sort direction can\n"
4515 "\t be modified by appending '.descending' or '.ascending' to a\n"
4516 "\t sort field. The 'size' parameter can be used to specify more\n"
5463bfda
TZ
4517 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4518 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4519 "\t its histogram data will be shared with other triggers of the\n"
4520 "\t same name, and trigger hits will update this common data.\n\n"
7ef224d1 4521 "\t Reading the 'hist' file for the event will dump the hash\n"
52a7f16d
TZ
4522 "\t table in its entirety to stdout. If there are multiple hist\n"
4523 "\t triggers attached to an event, there will be a table for each\n"
5463bfda
TZ
4524 "\t trigger in the output. The table displayed for a named\n"
4525 "\t trigger will be the same as any other instance having the\n"
4526 "\t same name. The default format used to display a given field\n"
4527 "\t can be modified by appending any of the following modifiers\n"
4528 "\t to the field name, as applicable:\n\n"
c6afad49
TZ
4529 "\t .hex display a number as a hex value\n"
4530 "\t .sym display an address as a symbol\n"
6b4827ad 4531 "\t .sym-offset display an address as a symbol and offset\n"
31696198
TZ
4532 "\t .execname display a common_pid as a program name\n"
4533 "\t .syscall display a syscall id as a syscall name\n\n"
4b94f5b7 4534 "\t .log2 display log2 value rather than raw number\n\n"
83e99914
TZ
4535 "\t The 'pause' parameter can be used to pause an existing hist\n"
4536 "\t trigger or to start a hist trigger but not log any events\n"
4537 "\t until told to do so. 'continue' can be used to start or\n"
4538 "\t restart a paused hist trigger.\n\n"
e86ae9ba
TZ
4539 "\t The 'clear' parameter will clear the contents of a running\n"
4540 "\t hist trigger and leave its current paused/active state\n"
4541 "\t unchanged.\n\n"
d0bad49b
TZ
4542 "\t The enable_hist and disable_hist triggers can be used to\n"
4543 "\t have one event conditionally start and stop another event's\n"
4544 "\t already-attached hist trigger. The syntax is analagous to\n"
4545 "\t the enable_event and disable_event triggers.\n"
7ef224d1 4546#endif
7bd2f24c
IM
4547;
4548
4549static ssize_t
4550tracing_readme_read(struct file *filp, char __user *ubuf,
4551 size_t cnt, loff_t *ppos)
4552{
4553 return simple_read_from_buffer(ubuf, cnt, ppos,
4554 readme_msg, strlen(readme_msg));
4555}
4556
5e2336a0 4557static const struct file_operations tracing_readme_fops = {
c7078de1
IM
4558 .open = tracing_open_generic,
4559 .read = tracing_readme_read,
b444786f 4560 .llseek = generic_file_llseek,
7bd2f24c
IM
4561};
4562
42584c81
YY
4563static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4564{
4565 unsigned int *ptr = v;
69abe6a5 4566
42584c81
YY
4567 if (*pos || m->count)
4568 ptr++;
69abe6a5 4569
42584c81 4570 (*pos)++;
69abe6a5 4571
939c7a4f
YY
4572 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4573 ptr++) {
42584c81
YY
4574 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4575 continue;
69abe6a5 4576
42584c81
YY
4577 return ptr;
4578 }
69abe6a5 4579
42584c81
YY
4580 return NULL;
4581}
4582
4583static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4584{
4585 void *v;
4586 loff_t l = 0;
69abe6a5 4587
4c27e756
SRRH
4588 preempt_disable();
4589 arch_spin_lock(&trace_cmdline_lock);
4590
939c7a4f 4591 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
4592 while (l <= *pos) {
4593 v = saved_cmdlines_next(m, v, &l);
4594 if (!v)
4595 return NULL;
69abe6a5
AP
4596 }
4597
42584c81
YY
4598 return v;
4599}
4600
4601static void saved_cmdlines_stop(struct seq_file *m, void *v)
4602{
4c27e756
SRRH
4603 arch_spin_unlock(&trace_cmdline_lock);
4604 preempt_enable();
42584c81 4605}
69abe6a5 4606
42584c81
YY
4607static int saved_cmdlines_show(struct seq_file *m, void *v)
4608{
4609 char buf[TASK_COMM_LEN];
4610 unsigned int *pid = v;
69abe6a5 4611
4c27e756 4612 __trace_find_cmdline(*pid, buf);
42584c81
YY
4613 seq_printf(m, "%d %s\n", *pid, buf);
4614 return 0;
4615}
4616
4617static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4618 .start = saved_cmdlines_start,
4619 .next = saved_cmdlines_next,
4620 .stop = saved_cmdlines_stop,
4621 .show = saved_cmdlines_show,
4622};
4623
4624static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4625{
4626 if (tracing_disabled)
4627 return -ENODEV;
4628
4629 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
4630}
4631
4632static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
4633 .open = tracing_saved_cmdlines_open,
4634 .read = seq_read,
4635 .llseek = seq_lseek,
4636 .release = seq_release,
69abe6a5
AP
4637};
4638
939c7a4f
YY
4639static ssize_t
4640tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4641 size_t cnt, loff_t *ppos)
4642{
4643 char buf[64];
4644 int r;
4645
4646 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 4647 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
4648 arch_spin_unlock(&trace_cmdline_lock);
4649
4650 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4651}
4652
4653static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4654{
4655 kfree(s->saved_cmdlines);
4656 kfree(s->map_cmdline_to_pid);
4657 kfree(s);
4658}
4659
4660static int tracing_resize_saved_cmdlines(unsigned int val)
4661{
4662 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4663
a6af8fbf 4664 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
4665 if (!s)
4666 return -ENOMEM;
4667
4668 if (allocate_cmdlines_buffer(val, s) < 0) {
4669 kfree(s);
4670 return -ENOMEM;
4671 }
4672
4673 arch_spin_lock(&trace_cmdline_lock);
4674 savedcmd_temp = savedcmd;
4675 savedcmd = s;
4676 arch_spin_unlock(&trace_cmdline_lock);
4677 free_saved_cmdlines_buffer(savedcmd_temp);
4678
4679 return 0;
4680}
4681
4682static ssize_t
4683tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4684 size_t cnt, loff_t *ppos)
4685{
4686 unsigned long val;
4687 int ret;
4688
4689 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4690 if (ret)
4691 return ret;
4692
4693 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4694 if (!val || val > PID_MAX_DEFAULT)
4695 return -EINVAL;
4696
4697 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4698 if (ret < 0)
4699 return ret;
4700
4701 *ppos += cnt;
4702
4703 return cnt;
4704}
4705
4706static const struct file_operations tracing_saved_cmdlines_size_fops = {
4707 .open = tracing_open_generic,
4708 .read = tracing_saved_cmdlines_size_read,
4709 .write = tracing_saved_cmdlines_size_write,
4710};
4711
9828413d
SRRH
4712#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4713static union trace_enum_map_item *
4714update_enum_map(union trace_enum_map_item *ptr)
4715{
4716 if (!ptr->map.enum_string) {
4717 if (ptr->tail.next) {
4718 ptr = ptr->tail.next;
4719 /* Set ptr to the next real item (skip head) */
4720 ptr++;
4721 } else
4722 return NULL;
4723 }
4724 return ptr;
4725}
4726
4727static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4728{
4729 union trace_enum_map_item *ptr = v;
4730
4731 /*
4732 * Paranoid! If ptr points to end, we don't want to increment past it.
4733 * This really should never happen.
4734 */
4735 ptr = update_enum_map(ptr);
4736 if (WARN_ON_ONCE(!ptr))
4737 return NULL;
4738
4739 ptr++;
4740
4741 (*pos)++;
4742
4743 ptr = update_enum_map(ptr);
4744
4745 return ptr;
4746}
4747
4748static void *enum_map_start(struct seq_file *m, loff_t *pos)
4749{
4750 union trace_enum_map_item *v;
4751 loff_t l = 0;
4752
4753 mutex_lock(&trace_enum_mutex);
4754
4755 v = trace_enum_maps;
4756 if (v)
4757 v++;
4758
4759 while (v && l < *pos) {
4760 v = enum_map_next(m, v, &l);
4761 }
4762
4763 return v;
4764}
4765
4766static void enum_map_stop(struct seq_file *m, void *v)
4767{
4768 mutex_unlock(&trace_enum_mutex);
4769}
4770
4771static int enum_map_show(struct seq_file *m, void *v)
4772{
4773 union trace_enum_map_item *ptr = v;
4774
4775 seq_printf(m, "%s %ld (%s)\n",
4776 ptr->map.enum_string, ptr->map.enum_value,
4777 ptr->map.system);
4778
4779 return 0;
4780}
4781
4782static const struct seq_operations tracing_enum_map_seq_ops = {
4783 .start = enum_map_start,
4784 .next = enum_map_next,
4785 .stop = enum_map_stop,
4786 .show = enum_map_show,
4787};
4788
4789static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4790{
4791 if (tracing_disabled)
4792 return -ENODEV;
4793
4794 return seq_open(filp, &tracing_enum_map_seq_ops);
4795}
4796
4797static const struct file_operations tracing_enum_map_fops = {
4798 .open = tracing_enum_map_open,
4799 .read = seq_read,
4800 .llseek = seq_lseek,
4801 .release = seq_release,
4802};
4803
4804static inline union trace_enum_map_item *
4805trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4806{
4807 /* Return tail of array given the head */
4808 return ptr + ptr->head.length + 1;
4809}
4810
4811static void
4812trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4813 int len)
4814{
4815 struct trace_enum_map **stop;
4816 struct trace_enum_map **map;
4817 union trace_enum_map_item *map_array;
4818 union trace_enum_map_item *ptr;
4819
4820 stop = start + len;
4821
4822 /*
4823 * The trace_enum_maps contains the map plus a head and tail item,
4824 * where the head holds the module and length of array, and the
4825 * tail holds a pointer to the next list.
4826 */
4827 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4828 if (!map_array) {
a395d6a7 4829 pr_warn("Unable to allocate trace enum mapping\n");
9828413d
SRRH
4830 return;
4831 }
4832
4833 mutex_lock(&trace_enum_mutex);
4834
4835 if (!trace_enum_maps)
4836 trace_enum_maps = map_array;
4837 else {
4838 ptr = trace_enum_maps;
4839 for (;;) {
4840 ptr = trace_enum_jmp_to_tail(ptr);
4841 if (!ptr->tail.next)
4842 break;
4843 ptr = ptr->tail.next;
4844
4845 }
4846 ptr->tail.next = map_array;
4847 }
4848 map_array->head.mod = mod;
4849 map_array->head.length = len;
4850 map_array++;
4851
4852 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4853 map_array->map = **map;
4854 map_array++;
4855 }
4856 memset(map_array, 0, sizeof(*map_array));
4857
4858 mutex_unlock(&trace_enum_mutex);
4859}
4860
4861static void trace_create_enum_file(struct dentry *d_tracer)
4862{
4863 trace_create_file("enum_map", 0444, d_tracer,
4864 NULL, &tracing_enum_map_fops);
4865}
4866
4867#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4868static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4869static inline void trace_insert_enum_map_file(struct module *mod,
4870 struct trace_enum_map **start, int len) { }
4871#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4872
4873static void trace_insert_enum_map(struct module *mod,
4874 struct trace_enum_map **start, int len)
0c564a53
SRRH
4875{
4876 struct trace_enum_map **map;
0c564a53
SRRH
4877
4878 if (len <= 0)
4879 return;
4880
4881 map = start;
4882
4883 trace_event_enum_update(map, len);
9828413d
SRRH
4884
4885 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4886}
4887
bc0c38d1
SR
4888static ssize_t
4889tracing_set_trace_read(struct file *filp, char __user *ubuf,
4890 size_t cnt, loff_t *ppos)
4891{
2b6080f2 4892 struct trace_array *tr = filp->private_data;
ee6c2c1b 4893 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4894 int r;
4895
4896 mutex_lock(&trace_types_lock);
2b6080f2 4897 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4898 mutex_unlock(&trace_types_lock);
4899
4bf39a94 4900 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4901}
4902
b6f11df2
ACM
4903int tracer_init(struct tracer *t, struct trace_array *tr)
4904{
12883efb 4905 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4906 return t->init(tr);
4907}
4908
12883efb 4909static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4910{
4911 int cpu;
737223fb 4912
438ced17 4913 for_each_tracing_cpu(cpu)
12883efb 4914 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4915}
4916
12883efb 4917#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4918/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4919static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4920 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4921{
4922 int cpu, ret = 0;
4923
4924 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4925 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4926 ret = ring_buffer_resize(trace_buf->buffer,
4927 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4928 if (ret < 0)
4929 break;
12883efb
SRRH
4930 per_cpu_ptr(trace_buf->data, cpu)->entries =
4931 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4932 }
4933 } else {
12883efb
SRRH
4934 ret = ring_buffer_resize(trace_buf->buffer,
4935 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4936 if (ret == 0)
12883efb
SRRH
4937 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4938 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4939 }
4940
4941 return ret;
4942}
12883efb 4943#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4944
2b6080f2
SR
4945static int __tracing_resize_ring_buffer(struct trace_array *tr,
4946 unsigned long size, int cpu)
73c5162a
SR
4947{
4948 int ret;
4949
4950 /*
4951 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4952 * we use the size that was given, and we can forget about
4953 * expanding it later.
73c5162a 4954 */
55034cd6 4955 ring_buffer_expanded = true;
73c5162a 4956
b382ede6 4957 /* May be called before buffers are initialized */
12883efb 4958 if (!tr->trace_buffer.buffer)
b382ede6
SR
4959 return 0;
4960
12883efb 4961 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4962 if (ret < 0)
4963 return ret;
4964
12883efb 4965#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4966 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4967 !tr->current_trace->use_max_tr)
ef710e10
KM
4968 goto out;
4969
12883efb 4970 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4971 if (ret < 0) {
12883efb
SRRH
4972 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4973 &tr->trace_buffer, cpu);
73c5162a 4974 if (r < 0) {
a123c52b
SR
4975 /*
4976 * AARGH! We are left with different
4977 * size max buffer!!!!
4978 * The max buffer is our "snapshot" buffer.
4979 * When a tracer needs a snapshot (one of the
4980 * latency tracers), it swaps the max buffer
4981 * with the saved snap shot. We succeeded to
4982 * update the size of the main buffer, but failed to
4983 * update the size of the max buffer. But when we tried
4984 * to reset the main buffer to the original size, we
4985 * failed there too. This is very unlikely to
4986 * happen, but if it does, warn and kill all
4987 * tracing.
4988 */
73c5162a
SR
4989 WARN_ON(1);
4990 tracing_disabled = 1;
4991 }
4992 return ret;
4993 }
4994
438ced17 4995 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4996 set_buffer_entries(&tr->max_buffer, size);
438ced17 4997 else
12883efb 4998 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4999
ef710e10 5000 out:
12883efb
SRRH
5001#endif /* CONFIG_TRACER_MAX_TRACE */
5002
438ced17 5003 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 5004 set_buffer_entries(&tr->trace_buffer, size);
438ced17 5005 else
12883efb 5006 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
5007
5008 return ret;
5009}
5010
2b6080f2
SR
5011static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5012 unsigned long size, int cpu_id)
4f271a2a 5013{
83f40318 5014 int ret = size;
4f271a2a
VN
5015
5016 mutex_lock(&trace_types_lock);
5017
438ced17
VN
5018 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5019 /* make sure, this cpu is enabled in the mask */
5020 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5021 ret = -EINVAL;
5022 goto out;
5023 }
5024 }
4f271a2a 5025
2b6080f2 5026 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
5027 if (ret < 0)
5028 ret = -ENOMEM;
5029
438ced17 5030out:
4f271a2a
VN
5031 mutex_unlock(&trace_types_lock);
5032
5033 return ret;
5034}
5035
ef710e10 5036
1852fcce
SR
5037/**
5038 * tracing_update_buffers - used by tracing facility to expand ring buffers
5039 *
5040 * To save on memory when the tracing is never used on a system with it
5041 * configured in. The ring buffers are set to a minimum size. But once
5042 * a user starts to use the tracing facility, then they need to grow
5043 * to their default size.
5044 *
5045 * This function is to be called when a tracer is about to be used.
5046 */
5047int tracing_update_buffers(void)
5048{
5049 int ret = 0;
5050
1027fcb2 5051 mutex_lock(&trace_types_lock);
1852fcce 5052 if (!ring_buffer_expanded)
2b6080f2 5053 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 5054 RING_BUFFER_ALL_CPUS);
1027fcb2 5055 mutex_unlock(&trace_types_lock);
1852fcce
SR
5056
5057 return ret;
5058}
5059
577b785f
SR
5060struct trace_option_dentry;
5061
37aea98b 5062static void
2b6080f2 5063create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f 5064
6b450d25
SRRH
5065/*
5066 * Used to clear out the tracer before deletion of an instance.
5067 * Must have trace_types_lock held.
5068 */
5069static void tracing_set_nop(struct trace_array *tr)
5070{
5071 if (tr->current_trace == &nop_trace)
5072 return;
5073
50512ab5 5074 tr->current_trace->enabled--;
6b450d25
SRRH
5075
5076 if (tr->current_trace->reset)
5077 tr->current_trace->reset(tr);
5078
5079 tr->current_trace = &nop_trace;
5080}
5081
41d9c0be 5082static void add_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 5083{
09d23a1d
SRRH
5084 /* Only enable if the directory has been created already. */
5085 if (!tr->dir)
5086 return;
5087
37aea98b 5088 create_trace_option_files(tr, t);
09d23a1d
SRRH
5089}
5090
5091static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5092{
bc0c38d1 5093 struct tracer *t;
12883efb 5094#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5095 bool had_max_tr;
12883efb 5096#endif
d9e54076 5097 int ret = 0;
bc0c38d1 5098
1027fcb2
SR
5099 mutex_lock(&trace_types_lock);
5100
73c5162a 5101 if (!ring_buffer_expanded) {
2b6080f2 5102 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 5103 RING_BUFFER_ALL_CPUS);
73c5162a 5104 if (ret < 0)
59f586db 5105 goto out;
73c5162a
SR
5106 ret = 0;
5107 }
5108
bc0c38d1
SR
5109 for (t = trace_types; t; t = t->next) {
5110 if (strcmp(t->name, buf) == 0)
5111 break;
5112 }
c2931e05
FW
5113 if (!t) {
5114 ret = -EINVAL;
5115 goto out;
5116 }
2b6080f2 5117 if (t == tr->current_trace)
bc0c38d1
SR
5118 goto out;
5119
607e2ea1
SRRH
5120 /* Some tracers are only allowed for the top level buffer */
5121 if (!trace_ok_for_array(t, tr)) {
5122 ret = -EINVAL;
5123 goto out;
5124 }
5125
cf6ab6d9
SRRH
5126 /* If trace pipe files are being read, we can't change the tracer */
5127 if (tr->current_trace->ref) {
5128 ret = -EBUSY;
5129 goto out;
5130 }
5131
9f029e83 5132 trace_branch_disable();
613f04a0 5133
50512ab5 5134 tr->current_trace->enabled--;
613f04a0 5135
2b6080f2
SR
5136 if (tr->current_trace->reset)
5137 tr->current_trace->reset(tr);
34600f0e 5138
12883efb 5139 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 5140 tr->current_trace = &nop_trace;
34600f0e 5141
45ad21ca
SRRH
5142#ifdef CONFIG_TRACER_MAX_TRACE
5143 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
5144
5145 if (had_max_tr && !t->use_max_tr) {
5146 /*
5147 * We need to make sure that the update_max_tr sees that
5148 * current_trace changed to nop_trace to keep it from
5149 * swapping the buffers after we resize it.
5150 * The update_max_tr is called from interrupts disabled
5151 * so a synchronized_sched() is sufficient.
5152 */
5153 synchronize_sched();
3209cff4 5154 free_snapshot(tr);
ef710e10 5155 }
12883efb 5156#endif
12883efb
SRRH
5157
5158#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 5159 if (t->use_max_tr && !had_max_tr) {
3209cff4 5160 ret = alloc_snapshot(tr);
d60da506
HT
5161 if (ret < 0)
5162 goto out;
ef710e10 5163 }
12883efb 5164#endif
577b785f 5165
1c80025a 5166 if (t->init) {
b6f11df2 5167 ret = tracer_init(t, tr);
1c80025a
FW
5168 if (ret)
5169 goto out;
5170 }
bc0c38d1 5171
2b6080f2 5172 tr->current_trace = t;
50512ab5 5173 tr->current_trace->enabled++;
9f029e83 5174 trace_branch_enable(tr);
bc0c38d1
SR
5175 out:
5176 mutex_unlock(&trace_types_lock);
5177
d9e54076
PZ
5178 return ret;
5179}
5180
5181static ssize_t
5182tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5183 size_t cnt, loff_t *ppos)
5184{
607e2ea1 5185 struct trace_array *tr = filp->private_data;
ee6c2c1b 5186 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
5187 int i;
5188 size_t ret;
e6e7a65a
FW
5189 int err;
5190
5191 ret = cnt;
d9e54076 5192
ee6c2c1b
LZ
5193 if (cnt > MAX_TRACER_SIZE)
5194 cnt = MAX_TRACER_SIZE;
d9e54076 5195
4afe6495 5196 if (copy_from_user(buf, ubuf, cnt))
d9e54076
PZ
5197 return -EFAULT;
5198
5199 buf[cnt] = 0;
5200
5201 /* strip ending whitespace. */
5202 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5203 buf[i] = 0;
5204
607e2ea1 5205 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
5206 if (err)
5207 return err;
d9e54076 5208
cf8517cf 5209 *ppos += ret;
bc0c38d1 5210
c2931e05 5211 return ret;
bc0c38d1
SR
5212}
5213
5214static ssize_t
6508fa76
SF
5215tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5216 size_t cnt, loff_t *ppos)
bc0c38d1 5217{
bc0c38d1
SR
5218 char buf[64];
5219 int r;
5220
cffae437 5221 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 5222 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
5223 if (r > sizeof(buf))
5224 r = sizeof(buf);
4bf39a94 5225 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
5226}
5227
5228static ssize_t
6508fa76
SF
5229tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5230 size_t cnt, loff_t *ppos)
bc0c38d1 5231{
5e39841c 5232 unsigned long val;
c6caeeb1 5233 int ret;
bc0c38d1 5234
22fe9b54
PH
5235 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5236 if (ret)
c6caeeb1 5237 return ret;
bc0c38d1
SR
5238
5239 *ptr = val * 1000;
5240
5241 return cnt;
5242}
5243
6508fa76
SF
5244static ssize_t
5245tracing_thresh_read(struct file *filp, char __user *ubuf,
5246 size_t cnt, loff_t *ppos)
5247{
5248 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5249}
5250
5251static ssize_t
5252tracing_thresh_write(struct file *filp, const char __user *ubuf,
5253 size_t cnt, loff_t *ppos)
5254{
5255 struct trace_array *tr = filp->private_data;
5256 int ret;
5257
5258 mutex_lock(&trace_types_lock);
5259 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5260 if (ret < 0)
5261 goto out;
5262
5263 if (tr->current_trace->update_thresh) {
5264 ret = tr->current_trace->update_thresh(tr);
5265 if (ret < 0)
5266 goto out;
5267 }
5268
5269 ret = cnt;
5270out:
5271 mutex_unlock(&trace_types_lock);
5272
5273 return ret;
5274}
5275
f971cc9a 5276#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
e428abbb 5277
6508fa76
SF
5278static ssize_t
5279tracing_max_lat_read(struct file *filp, char __user *ubuf,
5280 size_t cnt, loff_t *ppos)
5281{
5282 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5283}
5284
5285static ssize_t
5286tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5287 size_t cnt, loff_t *ppos)
5288{
5289 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5290}
5291
e428abbb
CG
5292#endif
5293
b3806b43
SR
5294static int tracing_open_pipe(struct inode *inode, struct file *filp)
5295{
15544209 5296 struct trace_array *tr = inode->i_private;
b3806b43 5297 struct trace_iterator *iter;
b04cc6b1 5298 int ret = 0;
b3806b43
SR
5299
5300 if (tracing_disabled)
5301 return -ENODEV;
5302
7b85af63
SRRH
5303 if (trace_array_get(tr) < 0)
5304 return -ENODEV;
5305
b04cc6b1
FW
5306 mutex_lock(&trace_types_lock);
5307
b3806b43
SR
5308 /* create a buffer to store the information to pass to userspace */
5309 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
5310 if (!iter) {
5311 ret = -ENOMEM;
f77d09a3 5312 __trace_array_put(tr);
b04cc6b1
FW
5313 goto out;
5314 }
b3806b43 5315
3a161d99 5316 trace_seq_init(&iter->seq);
d716ff71 5317 iter->trace = tr->current_trace;
d7350c3f 5318
4462344e 5319 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 5320 ret = -ENOMEM;
d7350c3f 5321 goto fail;
4462344e
RR
5322 }
5323
a309720c 5324 /* trace pipe does not show start of buffer */
4462344e 5325 cpumask_setall(iter->started);
a309720c 5326
983f938a 5327 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
112f38a7
SR
5328 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5329
8be0709f 5330 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 5331 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
5332 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5333
15544209
ON
5334 iter->tr = tr;
5335 iter->trace_buffer = &tr->trace_buffer;
5336 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 5337 mutex_init(&iter->mutex);
b3806b43
SR
5338 filp->private_data = iter;
5339
107bad8b
SR
5340 if (iter->trace->pipe_open)
5341 iter->trace->pipe_open(iter);
107bad8b 5342
b444786f 5343 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
5344
5345 tr->current_trace->ref++;
b04cc6b1
FW
5346out:
5347 mutex_unlock(&trace_types_lock);
5348 return ret;
d7350c3f
FW
5349
5350fail:
5351 kfree(iter->trace);
5352 kfree(iter);
7b85af63 5353 __trace_array_put(tr);
d7350c3f
FW
5354 mutex_unlock(&trace_types_lock);
5355 return ret;
b3806b43
SR
5356}
5357
5358static int tracing_release_pipe(struct inode *inode, struct file *file)
5359{
5360 struct trace_iterator *iter = file->private_data;
15544209 5361 struct trace_array *tr = inode->i_private;
b3806b43 5362
b04cc6b1
FW
5363 mutex_lock(&trace_types_lock);
5364
cf6ab6d9
SRRH
5365 tr->current_trace->ref--;
5366
29bf4a5e 5367 if (iter->trace->pipe_close)
c521efd1
SR
5368 iter->trace->pipe_close(iter);
5369
b04cc6b1
FW
5370 mutex_unlock(&trace_types_lock);
5371
4462344e 5372 free_cpumask_var(iter->started);
d7350c3f 5373 mutex_destroy(&iter->mutex);
b3806b43 5374 kfree(iter);
b3806b43 5375
7b85af63
SRRH
5376 trace_array_put(tr);
5377
b3806b43
SR
5378 return 0;
5379}
5380
2a2cc8f7 5381static unsigned int
cc60cdc9 5382trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 5383{
983f938a
SRRH
5384 struct trace_array *tr = iter->tr;
5385
15693458
SRRH
5386 /* Iterators are static, they should be filled or empty */
5387 if (trace_buffer_iter(iter, iter->cpu_file))
5388 return POLLIN | POLLRDNORM;
2a2cc8f7 5389
983f938a 5390 if (tr->trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
5391 /*
5392 * Always select as readable when in blocking mode
5393 */
5394 return POLLIN | POLLRDNORM;
15693458 5395 else
12883efb 5396 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 5397 filp, poll_table);
2a2cc8f7 5398}
2a2cc8f7 5399
cc60cdc9
SR
5400static unsigned int
5401tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5402{
5403 struct trace_iterator *iter = filp->private_data;
5404
5405 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
5406}
5407
d716ff71 5408/* Must be called with iter->mutex held. */
ff98781b 5409static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
5410{
5411 struct trace_iterator *iter = filp->private_data;
8b8b3683 5412 int ret;
b3806b43 5413
b3806b43 5414 while (trace_empty(iter)) {
2dc8f095 5415
107bad8b 5416 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 5417 return -EAGAIN;
107bad8b 5418 }
2dc8f095 5419
b3806b43 5420 /*
250bfd3d 5421 * We block until we read something and tracing is disabled.
b3806b43
SR
5422 * We still block if tracing is disabled, but we have never
5423 * read anything. This allows a user to cat this file, and
5424 * then enable tracing. But after we have read something,
5425 * we give an EOF when tracing is again disabled.
5426 *
5427 * iter->pos will be 0 if we haven't read anything.
5428 */
10246fa3 5429 if (!tracing_is_on() && iter->pos)
b3806b43 5430 break;
f4874261
SRRH
5431
5432 mutex_unlock(&iter->mutex);
5433
e30f53aa 5434 ret = wait_on_pipe(iter, false);
f4874261
SRRH
5435
5436 mutex_lock(&iter->mutex);
5437
8b8b3683
SRRH
5438 if (ret)
5439 return ret;
b3806b43
SR
5440 }
5441
ff98781b
EGM
5442 return 1;
5443}
5444
5445/*
5446 * Consumer reader.
5447 */
5448static ssize_t
5449tracing_read_pipe(struct file *filp, char __user *ubuf,
5450 size_t cnt, loff_t *ppos)
5451{
5452 struct trace_iterator *iter = filp->private_data;
5453 ssize_t sret;
5454
d7350c3f
FW
5455 /*
5456 * Avoid more than one consumer on a single file descriptor
5457 * This is just a matter of traces coherency, the ring buffer itself
5458 * is protected.
5459 */
5460 mutex_lock(&iter->mutex);
1245800c
SRRH
5461
5462 /* return any leftover data */
5463 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5464 if (sret != -EBUSY)
5465 goto out;
5466
5467 trace_seq_init(&iter->seq);
5468
ff98781b
EGM
5469 if (iter->trace->read) {
5470 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5471 if (sret)
5472 goto out;
5473 }
5474
5475waitagain:
5476 sret = tracing_wait_pipe(filp);
5477 if (sret <= 0)
5478 goto out;
5479
b3806b43 5480 /* stop when tracing is finished */
ff98781b
EGM
5481 if (trace_empty(iter)) {
5482 sret = 0;
107bad8b 5483 goto out;
ff98781b 5484 }
b3806b43
SR
5485
5486 if (cnt >= PAGE_SIZE)
5487 cnt = PAGE_SIZE - 1;
5488
53d0aa77 5489 /* reset all but tr, trace, and overruns */
53d0aa77
SR
5490 memset(&iter->seq, 0,
5491 sizeof(struct trace_iterator) -
5492 offsetof(struct trace_iterator, seq));
ed5467da 5493 cpumask_clear(iter->started);
4823ed7e 5494 iter->pos = -1;
b3806b43 5495
4f535968 5496 trace_event_read_lock();
7e53bd42 5497 trace_access_lock(iter->cpu_file);
955b61e5 5498 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 5499 enum print_line_t ret;
5ac48378 5500 int save_len = iter->seq.seq.len;
088b1e42 5501
f9896bf3 5502 ret = print_trace_line(iter);
2c4f035f 5503 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 5504 /* don't print partial lines */
5ac48378 5505 iter->seq.seq.len = save_len;
b3806b43 5506 break;
088b1e42 5507 }
b91facc3
FW
5508 if (ret != TRACE_TYPE_NO_CONSUME)
5509 trace_consume(iter);
b3806b43 5510
5ac48378 5511 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 5512 break;
ee5e51f5
JO
5513
5514 /*
5515 * Setting the full flag means we reached the trace_seq buffer
5516 * size and we should leave by partial output condition above.
5517 * One of the trace_seq_* functions is not used properly.
5518 */
5519 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5520 iter->ent->type);
b3806b43 5521 }
7e53bd42 5522 trace_access_unlock(iter->cpu_file);
4f535968 5523 trace_event_read_unlock();
b3806b43 5524
b3806b43 5525 /* Now copy what we have to the user */
6c6c2796 5526 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 5527 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 5528 trace_seq_init(&iter->seq);
9ff4b974
PP
5529
5530 /*
25985edc 5531 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
5532 * entries, go back to wait for more entries.
5533 */
6c6c2796 5534 if (sret == -EBUSY)
9ff4b974 5535 goto waitagain;
b3806b43 5536
107bad8b 5537out:
d7350c3f 5538 mutex_unlock(&iter->mutex);
107bad8b 5539
6c6c2796 5540 return sret;
b3806b43
SR
5541}
5542
3c56819b
EGM
5543static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5544 unsigned int idx)
5545{
5546 __free_page(spd->pages[idx]);
5547}
5548
28dfef8f 5549static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 5550 .can_merge = 0,
34cd4998 5551 .confirm = generic_pipe_buf_confirm,
92fdd98c 5552 .release = generic_pipe_buf_release,
34cd4998
SR
5553 .steal = generic_pipe_buf_steal,
5554 .get = generic_pipe_buf_get,
3c56819b
EGM
5555};
5556
34cd4998 5557static size_t
fa7c7f6e 5558tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
5559{
5560 size_t count;
74f06bb7 5561 int save_len;
34cd4998
SR
5562 int ret;
5563
5564 /* Seq buffer is page-sized, exactly what we need. */
5565 for (;;) {
74f06bb7 5566 save_len = iter->seq.seq.len;
34cd4998 5567 ret = print_trace_line(iter);
74f06bb7
SRRH
5568
5569 if (trace_seq_has_overflowed(&iter->seq)) {
5570 iter->seq.seq.len = save_len;
34cd4998
SR
5571 break;
5572 }
74f06bb7
SRRH
5573
5574 /*
5575 * This should not be hit, because it should only
5576 * be set if the iter->seq overflowed. But check it
5577 * anyway to be safe.
5578 */
34cd4998 5579 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
5580 iter->seq.seq.len = save_len;
5581 break;
5582 }
5583
5ac48378 5584 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
5585 if (rem < count) {
5586 rem = 0;
5587 iter->seq.seq.len = save_len;
34cd4998
SR
5588 break;
5589 }
5590
74e7ff8c
LJ
5591 if (ret != TRACE_TYPE_NO_CONSUME)
5592 trace_consume(iter);
34cd4998 5593 rem -= count;
955b61e5 5594 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
5595 rem = 0;
5596 iter->ent = NULL;
5597 break;
5598 }
5599 }
5600
5601 return rem;
5602}
5603
3c56819b
EGM
5604static ssize_t tracing_splice_read_pipe(struct file *filp,
5605 loff_t *ppos,
5606 struct pipe_inode_info *pipe,
5607 size_t len,
5608 unsigned int flags)
5609{
35f3d14d
JA
5610 struct page *pages_def[PIPE_DEF_BUFFERS];
5611 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
5612 struct trace_iterator *iter = filp->private_data;
5613 struct splice_pipe_desc spd = {
35f3d14d
JA
5614 .pages = pages_def,
5615 .partial = partial_def,
34cd4998 5616 .nr_pages = 0, /* This gets updated below. */
047fe360 5617 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
5618 .flags = flags,
5619 .ops = &tracing_pipe_buf_ops,
5620 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
5621 };
5622 ssize_t ret;
34cd4998 5623 size_t rem;
3c56819b
EGM
5624 unsigned int i;
5625
35f3d14d
JA
5626 if (splice_grow_spd(pipe, &spd))
5627 return -ENOMEM;
5628
d7350c3f 5629 mutex_lock(&iter->mutex);
3c56819b
EGM
5630
5631 if (iter->trace->splice_read) {
5632 ret = iter->trace->splice_read(iter, filp,
5633 ppos, pipe, len, flags);
5634 if (ret)
34cd4998 5635 goto out_err;
3c56819b
EGM
5636 }
5637
5638 ret = tracing_wait_pipe(filp);
5639 if (ret <= 0)
34cd4998 5640 goto out_err;
3c56819b 5641
955b61e5 5642 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 5643 ret = -EFAULT;
34cd4998 5644 goto out_err;
3c56819b
EGM
5645 }
5646
4f535968 5647 trace_event_read_lock();
7e53bd42 5648 trace_access_lock(iter->cpu_file);
4f535968 5649
3c56819b 5650 /* Fill as many pages as possible. */
a786c06d 5651 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
5652 spd.pages[i] = alloc_page(GFP_KERNEL);
5653 if (!spd.pages[i])
34cd4998 5654 break;
3c56819b 5655
fa7c7f6e 5656 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
5657
5658 /* Copy the data into the page, so we can start over. */
5659 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 5660 page_address(spd.pages[i]),
5ac48378 5661 trace_seq_used(&iter->seq));
3c56819b 5662 if (ret < 0) {
35f3d14d 5663 __free_page(spd.pages[i]);
3c56819b
EGM
5664 break;
5665 }
35f3d14d 5666 spd.partial[i].offset = 0;
5ac48378 5667 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 5668
f9520750 5669 trace_seq_init(&iter->seq);
3c56819b
EGM
5670 }
5671
7e53bd42 5672 trace_access_unlock(iter->cpu_file);
4f535968 5673 trace_event_read_unlock();
d7350c3f 5674 mutex_unlock(&iter->mutex);
3c56819b
EGM
5675
5676 spd.nr_pages = i;
5677
a29054d9
SRRH
5678 if (i)
5679 ret = splice_to_pipe(pipe, &spd);
5680 else
5681 ret = 0;
35f3d14d 5682out:
047fe360 5683 splice_shrink_spd(&spd);
35f3d14d 5684 return ret;
3c56819b 5685
34cd4998 5686out_err:
d7350c3f 5687 mutex_unlock(&iter->mutex);
35f3d14d 5688 goto out;
3c56819b
EGM
5689}
5690
a98a3c3f
SR
5691static ssize_t
5692tracing_entries_read(struct file *filp, char __user *ubuf,
5693 size_t cnt, loff_t *ppos)
5694{
0bc392ee
ON
5695 struct inode *inode = file_inode(filp);
5696 struct trace_array *tr = inode->i_private;
5697 int cpu = tracing_get_cpu(inode);
438ced17
VN
5698 char buf[64];
5699 int r = 0;
5700 ssize_t ret;
a98a3c3f 5701
db526ca3 5702 mutex_lock(&trace_types_lock);
438ced17 5703
0bc392ee 5704 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
5705 int cpu, buf_size_same;
5706 unsigned long size;
5707
5708 size = 0;
5709 buf_size_same = 1;
5710 /* check if all cpu sizes are same */
5711 for_each_tracing_cpu(cpu) {
5712 /* fill in the size from first enabled cpu */
5713 if (size == 0)
12883efb
SRRH
5714 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5715 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
5716 buf_size_same = 0;
5717 break;
5718 }
5719 }
5720
5721 if (buf_size_same) {
5722 if (!ring_buffer_expanded)
5723 r = sprintf(buf, "%lu (expanded: %lu)\n",
5724 size >> 10,
5725 trace_buf_size >> 10);
5726 else
5727 r = sprintf(buf, "%lu\n", size >> 10);
5728 } else
5729 r = sprintf(buf, "X\n");
5730 } else
0bc392ee 5731 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 5732
db526ca3
SR
5733 mutex_unlock(&trace_types_lock);
5734
438ced17
VN
5735 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5736 return ret;
a98a3c3f
SR
5737}
5738
5739static ssize_t
5740tracing_entries_write(struct file *filp, const char __user *ubuf,
5741 size_t cnt, loff_t *ppos)
5742{
0bc392ee
ON
5743 struct inode *inode = file_inode(filp);
5744 struct trace_array *tr = inode->i_private;
a98a3c3f 5745 unsigned long val;
4f271a2a 5746 int ret;
a98a3c3f 5747
22fe9b54
PH
5748 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5749 if (ret)
c6caeeb1 5750 return ret;
a98a3c3f
SR
5751
5752 /* must have at least 1 entry */
5753 if (!val)
5754 return -EINVAL;
5755
1696b2b0
SR
5756 /* value is in KB */
5757 val <<= 10;
0bc392ee 5758 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
5759 if (ret < 0)
5760 return ret;
a98a3c3f 5761
cf8517cf 5762 *ppos += cnt;
a98a3c3f 5763
4f271a2a
VN
5764 return cnt;
5765}
bf5e6519 5766
f81ab074
VN
5767static ssize_t
5768tracing_total_entries_read(struct file *filp, char __user *ubuf,
5769 size_t cnt, loff_t *ppos)
5770{
5771 struct trace_array *tr = filp->private_data;
5772 char buf[64];
5773 int r, cpu;
5774 unsigned long size = 0, expanded_size = 0;
5775
5776 mutex_lock(&trace_types_lock);
5777 for_each_tracing_cpu(cpu) {
12883efb 5778 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5779 if (!ring_buffer_expanded)
5780 expanded_size += trace_buf_size >> 10;
5781 }
5782 if (ring_buffer_expanded)
5783 r = sprintf(buf, "%lu\n", size);
5784 else
5785 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5786 mutex_unlock(&trace_types_lock);
5787
5788 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5789}
5790
4f271a2a
VN
5791static ssize_t
5792tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5793 size_t cnt, loff_t *ppos)
5794{
5795 /*
5796 * There is no need to read what the user has written, this function
5797 * is just to make sure that there is no error when "echo" is used
5798 */
5799
5800 *ppos += cnt;
a98a3c3f
SR
5801
5802 return cnt;
5803}
5804
4f271a2a
VN
5805static int
5806tracing_free_buffer_release(struct inode *inode, struct file *filp)
5807{
2b6080f2
SR
5808 struct trace_array *tr = inode->i_private;
5809
cf30cf67 5810 /* disable tracing ? */
983f938a 5811 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5812 tracer_tracing_off(tr);
4f271a2a 5813 /* resize the ring buffer to 0 */
2b6080f2 5814 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5815
7b85af63
SRRH
5816 trace_array_put(tr);
5817
4f271a2a
VN
5818 return 0;
5819}
5820
5bf9a1ee
PP
5821static ssize_t
5822tracing_mark_write(struct file *filp, const char __user *ubuf,
5823 size_t cnt, loff_t *fpos)
5824{
2d71619c 5825 struct trace_array *tr = filp->private_data;
d696b58c
SR
5826 struct ring_buffer_event *event;
5827 struct ring_buffer *buffer;
5828 struct print_entry *entry;
5829 unsigned long irq_flags;
656c7f0d 5830 const char faulted[] = "<faulted>";
d696b58c 5831 ssize_t written;
d696b58c
SR
5832 int size;
5833 int len;
fa32e855 5834
656c7f0d
SRRH
5835/* Used in tracing_mark_raw_write() as well */
5836#define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
5bf9a1ee 5837
c76f0694 5838 if (tracing_disabled)
5bf9a1ee
PP
5839 return -EINVAL;
5840
983f938a 5841 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5224c3a3
MSB
5842 return -EINVAL;
5843
5bf9a1ee
PP
5844 if (cnt > TRACE_BUF_SIZE)
5845 cnt = TRACE_BUF_SIZE;
5846
d696b58c 5847 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5848
d696b58c 5849 local_save_flags(irq_flags);
656c7f0d 5850 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
d696b58c 5851
656c7f0d
SRRH
5852 /* If less than "<faulted>", then make sure we can still add that */
5853 if (cnt < FAULTED_SIZE)
5854 size += FAULTED_SIZE - cnt;
d696b58c 5855
2d71619c 5856 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
5857 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5858 irq_flags, preempt_count());
656c7f0d 5859 if (unlikely(!event))
d696b58c 5860 /* Ring buffer disabled, return as if not open for write */
656c7f0d 5861 return -EBADF;
d696b58c
SR
5862
5863 entry = ring_buffer_event_data(event);
5864 entry->ip = _THIS_IP_;
5865
656c7f0d
SRRH
5866 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
5867 if (len) {
5868 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5869 cnt = FAULTED_SIZE;
5870 written = -EFAULT;
c13d2f7c 5871 } else
656c7f0d
SRRH
5872 written = cnt;
5873 len = cnt;
5bf9a1ee 5874
d696b58c
SR
5875 if (entry->buf[cnt - 1] != '\n') {
5876 entry->buf[cnt] = '\n';
5877 entry->buf[cnt + 1] = '\0';
5878 } else
5879 entry->buf[cnt] = '\0';
5880
7ffbd48d 5881 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5882
656c7f0d
SRRH
5883 if (written > 0)
5884 *fpos += written;
5bf9a1ee 5885
fa32e855
SR
5886 return written;
5887}
5888
5889/* Limit it for now to 3K (including tag) */
5890#define RAW_DATA_MAX_SIZE (1024*3)
5891
5892static ssize_t
5893tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
5894 size_t cnt, loff_t *fpos)
5895{
5896 struct trace_array *tr = filp->private_data;
5897 struct ring_buffer_event *event;
5898 struct ring_buffer *buffer;
5899 struct raw_data_entry *entry;
656c7f0d 5900 const char faulted[] = "<faulted>";
fa32e855 5901 unsigned long irq_flags;
fa32e855 5902 ssize_t written;
fa32e855
SR
5903 int size;
5904 int len;
5905
656c7f0d
SRRH
5906#define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
5907
fa32e855
SR
5908 if (tracing_disabled)
5909 return -EINVAL;
5910
5911 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
5912 return -EINVAL;
5913
5914 /* The marker must at least have a tag id */
5915 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
5916 return -EINVAL;
5917
5918 if (cnt > TRACE_BUF_SIZE)
5919 cnt = TRACE_BUF_SIZE;
5920
5921 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5922
fa32e855
SR
5923 local_save_flags(irq_flags);
5924 size = sizeof(*entry) + cnt;
656c7f0d
SRRH
5925 if (cnt < FAULT_SIZE_ID)
5926 size += FAULT_SIZE_ID - cnt;
5927
fa32e855 5928 buffer = tr->trace_buffer.buffer;
3e9a8aad
SRRH
5929 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
5930 irq_flags, preempt_count());
656c7f0d 5931 if (!event)
fa32e855 5932 /* Ring buffer disabled, return as if not open for write */
656c7f0d 5933 return -EBADF;
fa32e855
SR
5934
5935 entry = ring_buffer_event_data(event);
5936
656c7f0d
SRRH
5937 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
5938 if (len) {
5939 entry->id = -1;
5940 memcpy(&entry->buf, faulted, FAULTED_SIZE);
5941 written = -EFAULT;
fa32e855 5942 } else
656c7f0d 5943 written = cnt;
fa32e855
SR
5944
5945 __buffer_unlock_commit(buffer, event);
5946
656c7f0d
SRRH
5947 if (written > 0)
5948 *fpos += written;
1aa54bca
MS
5949
5950 return written;
5bf9a1ee
PP
5951}
5952
13f16d20 5953static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5954{
2b6080f2 5955 struct trace_array *tr = m->private;
5079f326
Z
5956 int i;
5957
5958 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5959 seq_printf(m,
5079f326 5960 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5961 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5962 i == tr->clock_id ? "]" : "");
13f16d20 5963 seq_putc(m, '\n');
5079f326 5964
13f16d20 5965 return 0;
5079f326
Z
5966}
5967
e1e232ca 5968static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5969{
5079f326
Z
5970 int i;
5971
5079f326
Z
5972 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5973 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5974 break;
5975 }
5976 if (i == ARRAY_SIZE(trace_clocks))
5977 return -EINVAL;
5978
5079f326
Z
5979 mutex_lock(&trace_types_lock);
5980
2b6080f2
SR
5981 tr->clock_id = i;
5982
12883efb 5983 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5984
60303ed3
DS
5985 /*
5986 * New clock may not be consistent with the previous clock.
5987 * Reset the buffer so that it doesn't have incomparable timestamps.
5988 */
9457158b 5989 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5990
5991#ifdef CONFIG_TRACER_MAX_TRACE
5992 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5993 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5994 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5995#endif
60303ed3 5996
5079f326
Z
5997 mutex_unlock(&trace_types_lock);
5998
e1e232ca
SR
5999 return 0;
6000}
6001
6002static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6003 size_t cnt, loff_t *fpos)
6004{
6005 struct seq_file *m = filp->private_data;
6006 struct trace_array *tr = m->private;
6007 char buf[64];
6008 const char *clockstr;
6009 int ret;
6010
6011 if (cnt >= sizeof(buf))
6012 return -EINVAL;
6013
4afe6495 6014 if (copy_from_user(buf, ubuf, cnt))
e1e232ca
SR
6015 return -EFAULT;
6016
6017 buf[cnt] = 0;
6018
6019 clockstr = strstrip(buf);
6020
6021 ret = tracing_set_clock(tr, clockstr);
6022 if (ret)
6023 return ret;
6024
5079f326
Z
6025 *fpos += cnt;
6026
6027 return cnt;
6028}
6029
13f16d20
LZ
6030static int tracing_clock_open(struct inode *inode, struct file *file)
6031{
7b85af63
SRRH
6032 struct trace_array *tr = inode->i_private;
6033 int ret;
6034
13f16d20
LZ
6035 if (tracing_disabled)
6036 return -ENODEV;
2b6080f2 6037
7b85af63
SRRH
6038 if (trace_array_get(tr))
6039 return -ENODEV;
6040
6041 ret = single_open(file, tracing_clock_show, inode->i_private);
6042 if (ret < 0)
6043 trace_array_put(tr);
6044
6045 return ret;
13f16d20
LZ
6046}
6047
6de58e62
SRRH
6048struct ftrace_buffer_info {
6049 struct trace_iterator iter;
6050 void *spare;
6051 unsigned int read;
6052};
6053
debdd57f
HT
6054#ifdef CONFIG_TRACER_SNAPSHOT
6055static int tracing_snapshot_open(struct inode *inode, struct file *file)
6056{
6484c71c 6057 struct trace_array *tr = inode->i_private;
debdd57f 6058 struct trace_iterator *iter;
2b6080f2 6059 struct seq_file *m;
debdd57f
HT
6060 int ret = 0;
6061
ff451961
SRRH
6062 if (trace_array_get(tr) < 0)
6063 return -ENODEV;
6064
debdd57f 6065 if (file->f_mode & FMODE_READ) {
6484c71c 6066 iter = __tracing_open(inode, file, true);
debdd57f
HT
6067 if (IS_ERR(iter))
6068 ret = PTR_ERR(iter);
2b6080f2
SR
6069 } else {
6070 /* Writes still need the seq_file to hold the private data */
f77d09a3 6071 ret = -ENOMEM;
2b6080f2
SR
6072 m = kzalloc(sizeof(*m), GFP_KERNEL);
6073 if (!m)
f77d09a3 6074 goto out;
2b6080f2
SR
6075 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6076 if (!iter) {
6077 kfree(m);
f77d09a3 6078 goto out;
2b6080f2 6079 }
f77d09a3
AL
6080 ret = 0;
6081
ff451961 6082 iter->tr = tr;
6484c71c
ON
6083 iter->trace_buffer = &tr->max_buffer;
6084 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
6085 m->private = iter;
6086 file->private_data = m;
debdd57f 6087 }
f77d09a3 6088out:
ff451961
SRRH
6089 if (ret < 0)
6090 trace_array_put(tr);
6091
debdd57f
HT
6092 return ret;
6093}
6094
6095static ssize_t
6096tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6097 loff_t *ppos)
6098{
2b6080f2
SR
6099 struct seq_file *m = filp->private_data;
6100 struct trace_iterator *iter = m->private;
6101 struct trace_array *tr = iter->tr;
debdd57f
HT
6102 unsigned long val;
6103 int ret;
6104
6105 ret = tracing_update_buffers();
6106 if (ret < 0)
6107 return ret;
6108
6109 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6110 if (ret)
6111 return ret;
6112
6113 mutex_lock(&trace_types_lock);
6114
2b6080f2 6115 if (tr->current_trace->use_max_tr) {
debdd57f
HT
6116 ret = -EBUSY;
6117 goto out;
6118 }
6119
6120 switch (val) {
6121 case 0:
f1affcaa
SRRH
6122 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6123 ret = -EINVAL;
6124 break;
debdd57f 6125 }
3209cff4
SRRH
6126 if (tr->allocated_snapshot)
6127 free_snapshot(tr);
debdd57f
HT
6128 break;
6129 case 1:
f1affcaa
SRRH
6130/* Only allow per-cpu swap if the ring buffer supports it */
6131#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6132 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6133 ret = -EINVAL;
6134 break;
6135 }
6136#endif
45ad21ca 6137 if (!tr->allocated_snapshot) {
3209cff4 6138 ret = alloc_snapshot(tr);
debdd57f
HT
6139 if (ret < 0)
6140 break;
debdd57f 6141 }
debdd57f
HT
6142 local_irq_disable();
6143 /* Now, we're going to swap */
f1affcaa 6144 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 6145 update_max_tr(tr, current, smp_processor_id());
f1affcaa 6146 else
ce9bae55 6147 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
6148 local_irq_enable();
6149 break;
6150 default:
45ad21ca 6151 if (tr->allocated_snapshot) {
f1affcaa
SRRH
6152 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6153 tracing_reset_online_cpus(&tr->max_buffer);
6154 else
6155 tracing_reset(&tr->max_buffer, iter->cpu_file);
6156 }
debdd57f
HT
6157 break;
6158 }
6159
6160 if (ret >= 0) {
6161 *ppos += cnt;
6162 ret = cnt;
6163 }
6164out:
6165 mutex_unlock(&trace_types_lock);
6166 return ret;
6167}
2b6080f2
SR
6168
6169static int tracing_snapshot_release(struct inode *inode, struct file *file)
6170{
6171 struct seq_file *m = file->private_data;
ff451961
SRRH
6172 int ret;
6173
6174 ret = tracing_release(inode, file);
2b6080f2
SR
6175
6176 if (file->f_mode & FMODE_READ)
ff451961 6177 return ret;
2b6080f2
SR
6178
6179 /* If write only, the seq_file is just a stub */
6180 if (m)
6181 kfree(m->private);
6182 kfree(m);
6183
6184 return 0;
6185}
6186
6de58e62
SRRH
6187static int tracing_buffers_open(struct inode *inode, struct file *filp);
6188static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6189 size_t count, loff_t *ppos);
6190static int tracing_buffers_release(struct inode *inode, struct file *file);
6191static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6192 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6193
6194static int snapshot_raw_open(struct inode *inode, struct file *filp)
6195{
6196 struct ftrace_buffer_info *info;
6197 int ret;
6198
6199 ret = tracing_buffers_open(inode, filp);
6200 if (ret < 0)
6201 return ret;
6202
6203 info = filp->private_data;
6204
6205 if (info->iter.trace->use_max_tr) {
6206 tracing_buffers_release(inode, filp);
6207 return -EBUSY;
6208 }
6209
6210 info->iter.snapshot = true;
6211 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6212
6213 return ret;
6214}
6215
debdd57f
HT
6216#endif /* CONFIG_TRACER_SNAPSHOT */
6217
6218
6508fa76
SF
6219static const struct file_operations tracing_thresh_fops = {
6220 .open = tracing_open_generic,
6221 .read = tracing_thresh_read,
6222 .write = tracing_thresh_write,
6223 .llseek = generic_file_llseek,
6224};
6225
f971cc9a 6226#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5e2336a0 6227static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
6228 .open = tracing_open_generic,
6229 .read = tracing_max_lat_read,
6230 .write = tracing_max_lat_write,
b444786f 6231 .llseek = generic_file_llseek,
bc0c38d1 6232};
e428abbb 6233#endif
bc0c38d1 6234
5e2336a0 6235static const struct file_operations set_tracer_fops = {
4bf39a94
IM
6236 .open = tracing_open_generic,
6237 .read = tracing_set_trace_read,
6238 .write = tracing_set_trace_write,
b444786f 6239 .llseek = generic_file_llseek,
bc0c38d1
SR
6240};
6241
5e2336a0 6242static const struct file_operations tracing_pipe_fops = {
4bf39a94 6243 .open = tracing_open_pipe,
2a2cc8f7 6244 .poll = tracing_poll_pipe,
4bf39a94 6245 .read = tracing_read_pipe,
3c56819b 6246 .splice_read = tracing_splice_read_pipe,
4bf39a94 6247 .release = tracing_release_pipe,
b444786f 6248 .llseek = no_llseek,
b3806b43
SR
6249};
6250
5e2336a0 6251static const struct file_operations tracing_entries_fops = {
0bc392ee 6252 .open = tracing_open_generic_tr,
a98a3c3f
SR
6253 .read = tracing_entries_read,
6254 .write = tracing_entries_write,
b444786f 6255 .llseek = generic_file_llseek,
0bc392ee 6256 .release = tracing_release_generic_tr,
a98a3c3f
SR
6257};
6258
f81ab074 6259static const struct file_operations tracing_total_entries_fops = {
7b85af63 6260 .open = tracing_open_generic_tr,
f81ab074
VN
6261 .read = tracing_total_entries_read,
6262 .llseek = generic_file_llseek,
7b85af63 6263 .release = tracing_release_generic_tr,
f81ab074
VN
6264};
6265
4f271a2a 6266static const struct file_operations tracing_free_buffer_fops = {
7b85af63 6267 .open = tracing_open_generic_tr,
4f271a2a
VN
6268 .write = tracing_free_buffer_write,
6269 .release = tracing_free_buffer_release,
6270};
6271
5e2336a0 6272static const struct file_operations tracing_mark_fops = {
7b85af63 6273 .open = tracing_open_generic_tr,
5bf9a1ee 6274 .write = tracing_mark_write,
b444786f 6275 .llseek = generic_file_llseek,
7b85af63 6276 .release = tracing_release_generic_tr,
5bf9a1ee
PP
6277};
6278
fa32e855
SR
6279static const struct file_operations tracing_mark_raw_fops = {
6280 .open = tracing_open_generic_tr,
6281 .write = tracing_mark_raw_write,
6282 .llseek = generic_file_llseek,
6283 .release = tracing_release_generic_tr,
6284};
6285
5079f326 6286static const struct file_operations trace_clock_fops = {
13f16d20
LZ
6287 .open = tracing_clock_open,
6288 .read = seq_read,
6289 .llseek = seq_lseek,
7b85af63 6290 .release = tracing_single_release_tr,
5079f326
Z
6291 .write = tracing_clock_write,
6292};
6293
debdd57f
HT
6294#ifdef CONFIG_TRACER_SNAPSHOT
6295static const struct file_operations snapshot_fops = {
6296 .open = tracing_snapshot_open,
6297 .read = seq_read,
6298 .write = tracing_snapshot_write,
098c879e 6299 .llseek = tracing_lseek,
2b6080f2 6300 .release = tracing_snapshot_release,
debdd57f 6301};
debdd57f 6302
6de58e62
SRRH
6303static const struct file_operations snapshot_raw_fops = {
6304 .open = snapshot_raw_open,
6305 .read = tracing_buffers_read,
6306 .release = tracing_buffers_release,
6307 .splice_read = tracing_buffers_splice_read,
6308 .llseek = no_llseek,
2cadf913
SR
6309};
6310
6de58e62
SRRH
6311#endif /* CONFIG_TRACER_SNAPSHOT */
6312
2cadf913
SR
6313static int tracing_buffers_open(struct inode *inode, struct file *filp)
6314{
46ef2be0 6315 struct trace_array *tr = inode->i_private;
2cadf913 6316 struct ftrace_buffer_info *info;
7b85af63 6317 int ret;
2cadf913
SR
6318
6319 if (tracing_disabled)
6320 return -ENODEV;
6321
7b85af63
SRRH
6322 if (trace_array_get(tr) < 0)
6323 return -ENODEV;
6324
2cadf913 6325 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
6326 if (!info) {
6327 trace_array_put(tr);
2cadf913 6328 return -ENOMEM;
7b85af63 6329 }
2cadf913 6330
a695cb58
SRRH
6331 mutex_lock(&trace_types_lock);
6332
cc60cdc9 6333 info->iter.tr = tr;
46ef2be0 6334 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 6335 info->iter.trace = tr->current_trace;
12883efb 6336 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 6337 info->spare = NULL;
2cadf913 6338 /* Force reading ring buffer for first read */
cc60cdc9 6339 info->read = (unsigned int)-1;
2cadf913
SR
6340
6341 filp->private_data = info;
6342
cf6ab6d9
SRRH
6343 tr->current_trace->ref++;
6344
a695cb58
SRRH
6345 mutex_unlock(&trace_types_lock);
6346
7b85af63
SRRH
6347 ret = nonseekable_open(inode, filp);
6348 if (ret < 0)
6349 trace_array_put(tr);
6350
6351 return ret;
2cadf913
SR
6352}
6353
cc60cdc9
SR
6354static unsigned int
6355tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6356{
6357 struct ftrace_buffer_info *info = filp->private_data;
6358 struct trace_iterator *iter = &info->iter;
6359
6360 return trace_poll(iter, filp, poll_table);
6361}
6362
2cadf913
SR
6363static ssize_t
6364tracing_buffers_read(struct file *filp, char __user *ubuf,
6365 size_t count, loff_t *ppos)
6366{
6367 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 6368 struct trace_iterator *iter = &info->iter;
2cadf913 6369 ssize_t ret;
6de58e62 6370 ssize_t size;
2cadf913 6371
2dc5d12b
SR
6372 if (!count)
6373 return 0;
6374
6de58e62 6375#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6376 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6377 return -EBUSY;
6de58e62
SRRH
6378#endif
6379
ddd538f3 6380 if (!info->spare)
12883efb
SRRH
6381 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6382 iter->cpu_file);
ddd538f3 6383 if (!info->spare)
d716ff71 6384 return -ENOMEM;
ddd538f3 6385
2cadf913
SR
6386 /* Do we have previous read data to read? */
6387 if (info->read < PAGE_SIZE)
6388 goto read;
6389
b627344f 6390 again:
cc60cdc9 6391 trace_access_lock(iter->cpu_file);
12883efb 6392 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
6393 &info->spare,
6394 count,
cc60cdc9
SR
6395 iter->cpu_file, 0);
6396 trace_access_unlock(iter->cpu_file);
2cadf913 6397
b627344f
SR
6398 if (ret < 0) {
6399 if (trace_empty(iter)) {
d716ff71
SRRH
6400 if ((filp->f_flags & O_NONBLOCK))
6401 return -EAGAIN;
6402
e30f53aa 6403 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
6404 if (ret)
6405 return ret;
6406
b627344f
SR
6407 goto again;
6408 }
d716ff71 6409 return 0;
b627344f 6410 }
436fc280 6411
436fc280 6412 info->read = 0;
b627344f 6413 read:
2cadf913
SR
6414 size = PAGE_SIZE - info->read;
6415 if (size > count)
6416 size = count;
6417
6418 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
6419 if (ret == size)
6420 return -EFAULT;
6421
2dc5d12b
SR
6422 size -= ret;
6423
2cadf913
SR
6424 *ppos += size;
6425 info->read += size;
6426
6427 return size;
6428}
6429
6430static int tracing_buffers_release(struct inode *inode, struct file *file)
6431{
6432 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6433 struct trace_iterator *iter = &info->iter;
2cadf913 6434
a695cb58
SRRH
6435 mutex_lock(&trace_types_lock);
6436
cf6ab6d9
SRRH
6437 iter->tr->current_trace->ref--;
6438
ff451961 6439 __trace_array_put(iter->tr);
2cadf913 6440
ddd538f3 6441 if (info->spare)
12883efb 6442 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
6443 kfree(info);
6444
a695cb58
SRRH
6445 mutex_unlock(&trace_types_lock);
6446
2cadf913
SR
6447 return 0;
6448}
6449
6450struct buffer_ref {
6451 struct ring_buffer *buffer;
6452 void *page;
6453 int ref;
6454};
6455
6456static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6457 struct pipe_buffer *buf)
6458{
6459 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6460
6461 if (--ref->ref)
6462 return;
6463
6464 ring_buffer_free_read_page(ref->buffer, ref->page);
6465 kfree(ref);
6466 buf->private = 0;
6467}
6468
2cadf913
SR
6469static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6470 struct pipe_buffer *buf)
6471{
6472 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6473
6474 ref->ref++;
6475}
6476
6477/* Pipe buffer operations for a buffer. */
28dfef8f 6478static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 6479 .can_merge = 0,
2cadf913
SR
6480 .confirm = generic_pipe_buf_confirm,
6481 .release = buffer_pipe_buf_release,
d55cb6cf 6482 .steal = generic_pipe_buf_steal,
2cadf913
SR
6483 .get = buffer_pipe_buf_get,
6484};
6485
6486/*
6487 * Callback from splice_to_pipe(), if we need to release some pages
6488 * at the end of the spd in case we error'ed out in filling the pipe.
6489 */
6490static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6491{
6492 struct buffer_ref *ref =
6493 (struct buffer_ref *)spd->partial[i].private;
6494
6495 if (--ref->ref)
6496 return;
6497
6498 ring_buffer_free_read_page(ref->buffer, ref->page);
6499 kfree(ref);
6500 spd->partial[i].private = 0;
6501}
6502
6503static ssize_t
6504tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6505 struct pipe_inode_info *pipe, size_t len,
6506 unsigned int flags)
6507{
6508 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 6509 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
6510 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6511 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 6512 struct splice_pipe_desc spd = {
35f3d14d
JA
6513 .pages = pages_def,
6514 .partial = partial_def,
047fe360 6515 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
6516 .flags = flags,
6517 .ops = &buffer_pipe_buf_ops,
6518 .spd_release = buffer_spd_release,
6519 };
6520 struct buffer_ref *ref;
93459c6c 6521 int entries, size, i;
07906da7 6522 ssize_t ret = 0;
2cadf913 6523
6de58e62 6524#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
6525 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6526 return -EBUSY;
6de58e62
SRRH
6527#endif
6528
d716ff71
SRRH
6529 if (*ppos & (PAGE_SIZE - 1))
6530 return -EINVAL;
93cfb3c9
LJ
6531
6532 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
6533 if (len < PAGE_SIZE)
6534 return -EINVAL;
93cfb3c9
LJ
6535 len &= PAGE_MASK;
6536 }
6537
1ae2293d
AV
6538 if (splice_grow_spd(pipe, &spd))
6539 return -ENOMEM;
6540
cc60cdc9
SR
6541 again:
6542 trace_access_lock(iter->cpu_file);
12883efb 6543 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 6544
a786c06d 6545 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
6546 struct page *page;
6547 int r;
6548
6549 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
6550 if (!ref) {
6551 ret = -ENOMEM;
2cadf913 6552 break;
07906da7 6553 }
2cadf913 6554
7267fa68 6555 ref->ref = 1;
12883efb 6556 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 6557 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 6558 if (!ref->page) {
07906da7 6559 ret = -ENOMEM;
2cadf913
SR
6560 kfree(ref);
6561 break;
6562 }
6563
6564 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 6565 len, iter->cpu_file, 1);
2cadf913 6566 if (r < 0) {
7ea59064 6567 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
6568 kfree(ref);
6569 break;
6570 }
6571
6572 /*
6573 * zero out any left over data, this is going to
6574 * user land.
6575 */
6576 size = ring_buffer_page_len(ref->page);
6577 if (size < PAGE_SIZE)
6578 memset(ref->page + size, 0, PAGE_SIZE - size);
6579
6580 page = virt_to_page(ref->page);
6581
6582 spd.pages[i] = page;
6583 spd.partial[i].len = PAGE_SIZE;
6584 spd.partial[i].offset = 0;
6585 spd.partial[i].private = (unsigned long)ref;
6586 spd.nr_pages++;
93cfb3c9 6587 *ppos += PAGE_SIZE;
93459c6c 6588
12883efb 6589 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
6590 }
6591
cc60cdc9 6592 trace_access_unlock(iter->cpu_file);
2cadf913
SR
6593 spd.nr_pages = i;
6594
6595 /* did we read anything? */
6596 if (!spd.nr_pages) {
07906da7 6597 if (ret)
1ae2293d 6598 goto out;
d716ff71 6599
1ae2293d 6600 ret = -EAGAIN;
d716ff71 6601 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
1ae2293d 6602 goto out;
07906da7 6603
e30f53aa 6604 ret = wait_on_pipe(iter, true);
8b8b3683 6605 if (ret)
1ae2293d 6606 goto out;
e30f53aa 6607
cc60cdc9 6608 goto again;
2cadf913
SR
6609 }
6610
6611 ret = splice_to_pipe(pipe, &spd);
1ae2293d 6612out:
047fe360 6613 splice_shrink_spd(&spd);
6de58e62 6614
2cadf913
SR
6615 return ret;
6616}
6617
6618static const struct file_operations tracing_buffers_fops = {
6619 .open = tracing_buffers_open,
6620 .read = tracing_buffers_read,
cc60cdc9 6621 .poll = tracing_buffers_poll,
2cadf913
SR
6622 .release = tracing_buffers_release,
6623 .splice_read = tracing_buffers_splice_read,
6624 .llseek = no_llseek,
6625};
6626
c8d77183
SR
6627static ssize_t
6628tracing_stats_read(struct file *filp, char __user *ubuf,
6629 size_t count, loff_t *ppos)
6630{
4d3435b8
ON
6631 struct inode *inode = file_inode(filp);
6632 struct trace_array *tr = inode->i_private;
12883efb 6633 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 6634 int cpu = tracing_get_cpu(inode);
c8d77183
SR
6635 struct trace_seq *s;
6636 unsigned long cnt;
c64e148a
VN
6637 unsigned long long t;
6638 unsigned long usec_rem;
c8d77183 6639
e4f2d10f 6640 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 6641 if (!s)
a646365c 6642 return -ENOMEM;
c8d77183
SR
6643
6644 trace_seq_init(s);
6645
12883efb 6646 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6647 trace_seq_printf(s, "entries: %ld\n", cnt);
6648
12883efb 6649 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6650 trace_seq_printf(s, "overrun: %ld\n", cnt);
6651
12883efb 6652 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
6653 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6654
12883efb 6655 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
6656 trace_seq_printf(s, "bytes: %ld\n", cnt);
6657
58e8eedf 6658 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 6659 /* local or global for trace_clock */
12883efb 6660 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
6661 usec_rem = do_div(t, USEC_PER_SEC);
6662 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6663 t, usec_rem);
6664
12883efb 6665 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
6666 usec_rem = do_div(t, USEC_PER_SEC);
6667 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6668 } else {
6669 /* counter or tsc mode for trace_clock */
6670 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 6671 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 6672
11043d8b 6673 trace_seq_printf(s, "now ts: %llu\n",
12883efb 6674 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 6675 }
c64e148a 6676
12883efb 6677 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
6678 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6679
12883efb 6680 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
6681 trace_seq_printf(s, "read events: %ld\n", cnt);
6682
5ac48378
SRRH
6683 count = simple_read_from_buffer(ubuf, count, ppos,
6684 s->buffer, trace_seq_used(s));
c8d77183
SR
6685
6686 kfree(s);
6687
6688 return count;
6689}
6690
6691static const struct file_operations tracing_stats_fops = {
4d3435b8 6692 .open = tracing_open_generic_tr,
c8d77183 6693 .read = tracing_stats_read,
b444786f 6694 .llseek = generic_file_llseek,
4d3435b8 6695 .release = tracing_release_generic_tr,
c8d77183
SR
6696};
6697
bc0c38d1
SR
6698#ifdef CONFIG_DYNAMIC_FTRACE
6699
b807c3d0
SR
6700int __weak ftrace_arch_read_dyn_info(char *buf, int size)
6701{
6702 return 0;
6703}
6704
bc0c38d1 6705static ssize_t
b807c3d0 6706tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
6707 size_t cnt, loff_t *ppos)
6708{
a26a2a27
SR
6709 static char ftrace_dyn_info_buffer[1024];
6710 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 6711 unsigned long *p = filp->private_data;
b807c3d0 6712 char *buf = ftrace_dyn_info_buffer;
a26a2a27 6713 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
6714 int r;
6715
b807c3d0
SR
6716 mutex_lock(&dyn_info_mutex);
6717 r = sprintf(buf, "%ld ", *p);
4bf39a94 6718
a26a2a27 6719 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
6720 buf[r++] = '\n';
6721
6722 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6723
6724 mutex_unlock(&dyn_info_mutex);
6725
6726 return r;
bc0c38d1
SR
6727}
6728
5e2336a0 6729static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 6730 .open = tracing_open_generic,
b807c3d0 6731 .read = tracing_read_dyn_info,
b444786f 6732 .llseek = generic_file_llseek,
bc0c38d1 6733};
77fd5c15 6734#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 6735
77fd5c15
SRRH
6736#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6737static void
bca6c8d0
SRV
6738ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
6739 struct ftrace_probe_ops *ops, void **data)
77fd5c15
SRRH
6740{
6741 tracing_snapshot();
6742}
bc0c38d1 6743
77fd5c15 6744static void
bca6c8d0
SRV
6745ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
6746 struct ftrace_probe_ops *ops, void **data)
bc0c38d1 6747{
1a93f8bd
SRV
6748 struct ftrace_func_mapper *mapper = ops->private_data;
6749 long *count = NULL;
77fd5c15 6750
1a93f8bd
SRV
6751 if (mapper)
6752 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6753
6754 if (count) {
6755
6756 if (*count <= 0)
6757 return;
bc0c38d1 6758
77fd5c15 6759 (*count)--;
1a93f8bd 6760 }
77fd5c15
SRRH
6761
6762 tracing_snapshot();
6763}
6764
6765static int
6766ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6767 struct ftrace_probe_ops *ops, void *data)
6768{
1a93f8bd
SRV
6769 struct ftrace_func_mapper *mapper = ops->private_data;
6770 long *count = NULL;
77fd5c15
SRRH
6771
6772 seq_printf(m, "%ps:", (void *)ip);
6773
fa6f0cc7 6774 seq_puts(m, "snapshot");
77fd5c15 6775
1a93f8bd
SRV
6776 if (mapper)
6777 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
6778
6779 if (count)
6780 seq_printf(m, ":count=%ld\n", *count);
77fd5c15 6781 else
1a93f8bd 6782 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
6783
6784 return 0;
6785}
6786
1a93f8bd
SRV
6787static int
6788ftrace_snapshot_init(struct ftrace_probe_ops *ops, unsigned long ip,
1a48df00 6789 void *data)
1a93f8bd
SRV
6790{
6791 struct ftrace_func_mapper *mapper = ops->private_data;
6792
1a48df00 6793 return ftrace_func_mapper_add_ip(mapper, ip, data);
1a93f8bd
SRV
6794}
6795
6796static void
6797ftrace_snapshot_free(struct ftrace_probe_ops *ops, unsigned long ip,
6798 void **_data)
6799{
6800 struct ftrace_func_mapper *mapper = ops->private_data;
6801
6802 ftrace_func_mapper_remove_ip(mapper, ip);
6803}
6804
77fd5c15
SRRH
6805static struct ftrace_probe_ops snapshot_probe_ops = {
6806 .func = ftrace_snapshot,
6807 .print = ftrace_snapshot_print,
6808};
6809
6810static struct ftrace_probe_ops snapshot_count_probe_ops = {
6811 .func = ftrace_count_snapshot,
6812 .print = ftrace_snapshot_print,
1a93f8bd
SRV
6813 .init = ftrace_snapshot_init,
6814 .free = ftrace_snapshot_free,
77fd5c15
SRRH
6815};
6816
6817static int
6818ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6819 char *glob, char *cmd, char *param, int enable)
6820{
6821 struct ftrace_probe_ops *ops;
6822 void *count = (void *)-1;
6823 char *number;
6824 int ret;
6825
6826 /* hash funcs only work with set_ftrace_filter */
6827 if (!enable)
6828 return -EINVAL;
6829
6830 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6831
6832 if (glob[0] == '!') {
6833 unregister_ftrace_function_probe_func(glob+1, ops);
6834 return 0;
6835 }
6836
6837 if (!param)
6838 goto out_reg;
6839
6840 number = strsep(&param, ":");
6841
6842 if (!strlen(number))
6843 goto out_reg;
6844
1a93f8bd
SRV
6845 if (!ops->private_data) {
6846 ops->private_data = allocate_ftrace_func_mapper();
6847 if (!ops->private_data)
6848 return -ENOMEM;
6849 }
6850
77fd5c15
SRRH
6851 /*
6852 * We use the callback data field (which is a pointer)
6853 * as our counter.
6854 */
6855 ret = kstrtoul(number, 0, (unsigned long *)&count);
6856 if (ret)
6857 return ret;
6858
6859 out_reg:
6860 ret = register_ftrace_function_probe(glob, ops, count);
6861
6862 if (ret >= 0)
6863 alloc_snapshot(&global_trace);
6864
6865 return ret < 0 ? ret : 0;
6866}
6867
6868static struct ftrace_func_command ftrace_snapshot_cmd = {
6869 .name = "snapshot",
6870 .func = ftrace_trace_snapshot_callback,
6871};
6872
38de93ab 6873static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6874{
6875 return register_ftrace_command(&ftrace_snapshot_cmd);
6876}
6877#else
38de93ab 6878static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6879#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6880
7eeafbca 6881static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6882{
8434dc93
SRRH
6883 if (WARN_ON(!tr->dir))
6884 return ERR_PTR(-ENODEV);
6885
6886 /* Top directory uses NULL as the parent */
6887 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6888 return NULL;
6889
6890 /* All sub buffers have a descriptor */
2b6080f2 6891 return tr->dir;
bc0c38d1
SR
6892}
6893
2b6080f2 6894static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6895{
b04cc6b1
FW
6896 struct dentry *d_tracer;
6897
2b6080f2
SR
6898 if (tr->percpu_dir)
6899 return tr->percpu_dir;
b04cc6b1 6900
7eeafbca 6901 d_tracer = tracing_get_dentry(tr);
14a5ae40 6902 if (IS_ERR(d_tracer))
b04cc6b1
FW
6903 return NULL;
6904
8434dc93 6905 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6906
2b6080f2 6907 WARN_ONCE(!tr->percpu_dir,
8434dc93 6908 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6909
2b6080f2 6910 return tr->percpu_dir;
b04cc6b1
FW
6911}
6912
649e9c70
ON
6913static struct dentry *
6914trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6915 void *data, long cpu, const struct file_operations *fops)
6916{
6917 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6918
6919 if (ret) /* See tracing_get_cpu() */
7682c918 6920 d_inode(ret)->i_cdev = (void *)(cpu + 1);
649e9c70
ON
6921 return ret;
6922}
6923
2b6080f2 6924static void
8434dc93 6925tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6926{
2b6080f2 6927 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6928 struct dentry *d_cpu;
dd49a38c 6929 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6930
0a3d7ce7
NK
6931 if (!d_percpu)
6932 return;
6933
dd49a38c 6934 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6935 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6936 if (!d_cpu) {
a395d6a7 6937 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6938 return;
6939 }
b04cc6b1 6940
8656e7a2 6941 /* per cpu trace_pipe */
649e9c70 6942 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6943 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6944
6945 /* per cpu trace */
649e9c70 6946 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6947 tr, cpu, &tracing_fops);
7f96f93f 6948
649e9c70 6949 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6950 tr, cpu, &tracing_buffers_fops);
7f96f93f 6951
649e9c70 6952 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6953 tr, cpu, &tracing_stats_fops);
438ced17 6954
649e9c70 6955 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6956 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6957
6958#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6959 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6960 tr, cpu, &snapshot_fops);
6de58e62 6961
649e9c70 6962 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6963 tr, cpu, &snapshot_raw_fops);
f1affcaa 6964#endif
b04cc6b1
FW
6965}
6966
60a11774
SR
6967#ifdef CONFIG_FTRACE_SELFTEST
6968/* Let selftest have access to static functions in this file */
6969#include "trace_selftest.c"
6970#endif
6971
577b785f
SR
6972static ssize_t
6973trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6974 loff_t *ppos)
6975{
6976 struct trace_option_dentry *topt = filp->private_data;
6977 char *buf;
6978
6979 if (topt->flags->val & topt->opt->bit)
6980 buf = "1\n";
6981 else
6982 buf = "0\n";
6983
6984 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6985}
6986
6987static ssize_t
6988trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6989 loff_t *ppos)
6990{
6991 struct trace_option_dentry *topt = filp->private_data;
6992 unsigned long val;
577b785f
SR
6993 int ret;
6994
22fe9b54
PH
6995 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6996 if (ret)
577b785f
SR
6997 return ret;
6998
8d18eaaf
LZ
6999 if (val != 0 && val != 1)
7000 return -EINVAL;
577b785f 7001
8d18eaaf 7002 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 7003 mutex_lock(&trace_types_lock);
8c1a49ae 7004 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 7005 topt->opt, !val);
577b785f
SR
7006 mutex_unlock(&trace_types_lock);
7007 if (ret)
7008 return ret;
577b785f
SR
7009 }
7010
7011 *ppos += cnt;
7012
7013 return cnt;
7014}
7015
7016
7017static const struct file_operations trace_options_fops = {
7018 .open = tracing_open_generic,
7019 .read = trace_options_read,
7020 .write = trace_options_write,
b444786f 7021 .llseek = generic_file_llseek,
577b785f
SR
7022};
7023
9a38a885
SRRH
7024/*
7025 * In order to pass in both the trace_array descriptor as well as the index
7026 * to the flag that the trace option file represents, the trace_array
7027 * has a character array of trace_flags_index[], which holds the index
7028 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7029 * The address of this character array is passed to the flag option file
7030 * read/write callbacks.
7031 *
7032 * In order to extract both the index and the trace_array descriptor,
7033 * get_tr_index() uses the following algorithm.
7034 *
7035 * idx = *ptr;
7036 *
7037 * As the pointer itself contains the address of the index (remember
7038 * index[1] == 1).
7039 *
7040 * Then to get the trace_array descriptor, by subtracting that index
7041 * from the ptr, we get to the start of the index itself.
7042 *
7043 * ptr - idx == &index[0]
7044 *
7045 * Then a simple container_of() from that pointer gets us to the
7046 * trace_array descriptor.
7047 */
7048static void get_tr_index(void *data, struct trace_array **ptr,
7049 unsigned int *pindex)
7050{
7051 *pindex = *(unsigned char *)data;
7052
7053 *ptr = container_of(data - *pindex, struct trace_array,
7054 trace_flags_index);
7055}
7056
a8259075
SR
7057static ssize_t
7058trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7059 loff_t *ppos)
7060{
9a38a885
SRRH
7061 void *tr_index = filp->private_data;
7062 struct trace_array *tr;
7063 unsigned int index;
a8259075
SR
7064 char *buf;
7065
9a38a885
SRRH
7066 get_tr_index(tr_index, &tr, &index);
7067
7068 if (tr->trace_flags & (1 << index))
a8259075
SR
7069 buf = "1\n";
7070 else
7071 buf = "0\n";
7072
7073 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7074}
7075
7076static ssize_t
7077trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7078 loff_t *ppos)
7079{
9a38a885
SRRH
7080 void *tr_index = filp->private_data;
7081 struct trace_array *tr;
7082 unsigned int index;
a8259075
SR
7083 unsigned long val;
7084 int ret;
7085
9a38a885
SRRH
7086 get_tr_index(tr_index, &tr, &index);
7087
22fe9b54
PH
7088 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7089 if (ret)
a8259075
SR
7090 return ret;
7091
f2d84b65 7092 if (val != 0 && val != 1)
a8259075 7093 return -EINVAL;
69d34da2
SRRH
7094
7095 mutex_lock(&trace_types_lock);
2b6080f2 7096 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 7097 mutex_unlock(&trace_types_lock);
a8259075 7098
613f04a0
SRRH
7099 if (ret < 0)
7100 return ret;
7101
a8259075
SR
7102 *ppos += cnt;
7103
7104 return cnt;
7105}
7106
a8259075
SR
7107static const struct file_operations trace_options_core_fops = {
7108 .open = tracing_open_generic,
7109 .read = trace_options_core_read,
7110 .write = trace_options_core_write,
b444786f 7111 .llseek = generic_file_llseek,
a8259075
SR
7112};
7113
5452af66 7114struct dentry *trace_create_file(const char *name,
f4ae40a6 7115 umode_t mode,
5452af66
FW
7116 struct dentry *parent,
7117 void *data,
7118 const struct file_operations *fops)
7119{
7120 struct dentry *ret;
7121
8434dc93 7122 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 7123 if (!ret)
a395d6a7 7124 pr_warn("Could not create tracefs '%s' entry\n", name);
5452af66
FW
7125
7126 return ret;
7127}
7128
7129
2b6080f2 7130static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
7131{
7132 struct dentry *d_tracer;
a8259075 7133
2b6080f2
SR
7134 if (tr->options)
7135 return tr->options;
a8259075 7136
7eeafbca 7137 d_tracer = tracing_get_dentry(tr);
14a5ae40 7138 if (IS_ERR(d_tracer))
a8259075
SR
7139 return NULL;
7140
8434dc93 7141 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 7142 if (!tr->options) {
a395d6a7 7143 pr_warn("Could not create tracefs directory 'options'\n");
a8259075
SR
7144 return NULL;
7145 }
7146
2b6080f2 7147 return tr->options;
a8259075
SR
7148}
7149
577b785f 7150static void
2b6080f2
SR
7151create_trace_option_file(struct trace_array *tr,
7152 struct trace_option_dentry *topt,
577b785f
SR
7153 struct tracer_flags *flags,
7154 struct tracer_opt *opt)
7155{
7156 struct dentry *t_options;
577b785f 7157
2b6080f2 7158 t_options = trace_options_init_dentry(tr);
577b785f
SR
7159 if (!t_options)
7160 return;
7161
7162 topt->flags = flags;
7163 topt->opt = opt;
2b6080f2 7164 topt->tr = tr;
577b785f 7165
5452af66 7166 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
7167 &trace_options_fops);
7168
577b785f
SR
7169}
7170
37aea98b 7171static void
2b6080f2 7172create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
7173{
7174 struct trace_option_dentry *topts;
37aea98b 7175 struct trace_options *tr_topts;
577b785f
SR
7176 struct tracer_flags *flags;
7177 struct tracer_opt *opts;
7178 int cnt;
37aea98b 7179 int i;
577b785f
SR
7180
7181 if (!tracer)
37aea98b 7182 return;
577b785f
SR
7183
7184 flags = tracer->flags;
7185
7186 if (!flags || !flags->opts)
37aea98b
SRRH
7187 return;
7188
7189 /*
7190 * If this is an instance, only create flags for tracers
7191 * the instance may have.
7192 */
7193 if (!trace_ok_for_array(tracer, tr))
7194 return;
7195
7196 for (i = 0; i < tr->nr_topts; i++) {
d39cdd20
CH
7197 /* Make sure there's no duplicate flags. */
7198 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
37aea98b
SRRH
7199 return;
7200 }
577b785f
SR
7201
7202 opts = flags->opts;
7203
7204 for (cnt = 0; opts[cnt].name; cnt++)
7205 ;
7206
0cfe8245 7207 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f 7208 if (!topts)
37aea98b
SRRH
7209 return;
7210
7211 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7212 GFP_KERNEL);
7213 if (!tr_topts) {
7214 kfree(topts);
7215 return;
7216 }
7217
7218 tr->topts = tr_topts;
7219 tr->topts[tr->nr_topts].tracer = tracer;
7220 tr->topts[tr->nr_topts].topts = topts;
7221 tr->nr_topts++;
577b785f 7222
41d9c0be 7223 for (cnt = 0; opts[cnt].name; cnt++) {
2b6080f2 7224 create_trace_option_file(tr, &topts[cnt], flags,
577b785f 7225 &opts[cnt]);
41d9c0be
SRRH
7226 WARN_ONCE(topts[cnt].entry == NULL,
7227 "Failed to create trace option: %s",
7228 opts[cnt].name);
7229 }
577b785f
SR
7230}
7231
a8259075 7232static struct dentry *
2b6080f2
SR
7233create_trace_option_core_file(struct trace_array *tr,
7234 const char *option, long index)
a8259075
SR
7235{
7236 struct dentry *t_options;
a8259075 7237
2b6080f2 7238 t_options = trace_options_init_dentry(tr);
a8259075
SR
7239 if (!t_options)
7240 return NULL;
7241
9a38a885
SRRH
7242 return trace_create_file(option, 0644, t_options,
7243 (void *)&tr->trace_flags_index[index],
7244 &trace_options_core_fops);
a8259075
SR
7245}
7246
16270145 7247static void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
7248{
7249 struct dentry *t_options;
16270145 7250 bool top_level = tr == &global_trace;
a8259075
SR
7251 int i;
7252
2b6080f2 7253 t_options = trace_options_init_dentry(tr);
a8259075
SR
7254 if (!t_options)
7255 return;
7256
16270145
SRRH
7257 for (i = 0; trace_options[i]; i++) {
7258 if (top_level ||
7259 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7260 create_trace_option_core_file(tr, trace_options[i], i);
7261 }
a8259075
SR
7262}
7263
499e5470
SR
7264static ssize_t
7265rb_simple_read(struct file *filp, char __user *ubuf,
7266 size_t cnt, loff_t *ppos)
7267{
348f0fc2 7268 struct trace_array *tr = filp->private_data;
499e5470
SR
7269 char buf[64];
7270 int r;
7271
10246fa3 7272 r = tracer_tracing_is_on(tr);
499e5470
SR
7273 r = sprintf(buf, "%d\n", r);
7274
7275 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7276}
7277
7278static ssize_t
7279rb_simple_write(struct file *filp, const char __user *ubuf,
7280 size_t cnt, loff_t *ppos)
7281{
348f0fc2 7282 struct trace_array *tr = filp->private_data;
12883efb 7283 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
7284 unsigned long val;
7285 int ret;
7286
7287 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7288 if (ret)
7289 return ret;
7290
7291 if (buffer) {
2df8f8a6
SR
7292 mutex_lock(&trace_types_lock);
7293 if (val) {
10246fa3 7294 tracer_tracing_on(tr);
2b6080f2
SR
7295 if (tr->current_trace->start)
7296 tr->current_trace->start(tr);
2df8f8a6 7297 } else {
10246fa3 7298 tracer_tracing_off(tr);
2b6080f2
SR
7299 if (tr->current_trace->stop)
7300 tr->current_trace->stop(tr);
2df8f8a6
SR
7301 }
7302 mutex_unlock(&trace_types_lock);
499e5470
SR
7303 }
7304
7305 (*ppos)++;
7306
7307 return cnt;
7308}
7309
7310static const struct file_operations rb_simple_fops = {
7b85af63 7311 .open = tracing_open_generic_tr,
499e5470
SR
7312 .read = rb_simple_read,
7313 .write = rb_simple_write,
7b85af63 7314 .release = tracing_release_generic_tr,
499e5470
SR
7315 .llseek = default_llseek,
7316};
7317
277ba044
SR
7318struct dentry *trace_instance_dir;
7319
7320static void
8434dc93 7321init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 7322
55034cd6
SRRH
7323static int
7324allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
7325{
7326 enum ring_buffer_flags rb_flags;
737223fb 7327
983f938a 7328 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
737223fb 7329
dced341b
SRRH
7330 buf->tr = tr;
7331
55034cd6
SRRH
7332 buf->buffer = ring_buffer_alloc(size, rb_flags);
7333 if (!buf->buffer)
7334 return -ENOMEM;
737223fb 7335
55034cd6
SRRH
7336 buf->data = alloc_percpu(struct trace_array_cpu);
7337 if (!buf->data) {
7338 ring_buffer_free(buf->buffer);
7339 return -ENOMEM;
7340 }
737223fb 7341
737223fb
SRRH
7342 /* Allocate the first page for all buffers */
7343 set_buffer_entries(&tr->trace_buffer,
7344 ring_buffer_size(tr->trace_buffer.buffer, 0));
7345
55034cd6
SRRH
7346 return 0;
7347}
737223fb 7348
55034cd6
SRRH
7349static int allocate_trace_buffers(struct trace_array *tr, int size)
7350{
7351 int ret;
737223fb 7352
55034cd6
SRRH
7353 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7354 if (ret)
7355 return ret;
737223fb 7356
55034cd6
SRRH
7357#ifdef CONFIG_TRACER_MAX_TRACE
7358 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7359 allocate_snapshot ? size : 1);
7360 if (WARN_ON(ret)) {
737223fb 7361 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
7362 free_percpu(tr->trace_buffer.data);
7363 return -ENOMEM;
7364 }
7365 tr->allocated_snapshot = allocate_snapshot;
737223fb 7366
55034cd6
SRRH
7367 /*
7368 * Only the top level trace array gets its snapshot allocated
7369 * from the kernel command line.
7370 */
7371 allocate_snapshot = false;
737223fb 7372#endif
55034cd6 7373 return 0;
737223fb
SRRH
7374}
7375
f0b70cc4
SRRH
7376static void free_trace_buffer(struct trace_buffer *buf)
7377{
7378 if (buf->buffer) {
7379 ring_buffer_free(buf->buffer);
7380 buf->buffer = NULL;
7381 free_percpu(buf->data);
7382 buf->data = NULL;
7383 }
7384}
7385
23aaa3c1
SRRH
7386static void free_trace_buffers(struct trace_array *tr)
7387{
7388 if (!tr)
7389 return;
7390
f0b70cc4 7391 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
7392
7393#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 7394 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
7395#endif
7396}
7397
9a38a885
SRRH
7398static void init_trace_flags_index(struct trace_array *tr)
7399{
7400 int i;
7401
7402 /* Used by the trace options files */
7403 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7404 tr->trace_flags_index[i] = i;
7405}
7406
37aea98b
SRRH
7407static void __update_tracer_options(struct trace_array *tr)
7408{
7409 struct tracer *t;
7410
7411 for (t = trace_types; t; t = t->next)
7412 add_tracer_options(tr, t);
7413}
7414
7415static void update_tracer_options(struct trace_array *tr)
7416{
7417 mutex_lock(&trace_types_lock);
7418 __update_tracer_options(tr);
7419 mutex_unlock(&trace_types_lock);
7420}
7421
eae47358 7422static int instance_mkdir(const char *name)
737223fb 7423{
277ba044
SR
7424 struct trace_array *tr;
7425 int ret;
277ba044
SR
7426
7427 mutex_lock(&trace_types_lock);
7428
7429 ret = -EEXIST;
7430 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7431 if (tr->name && strcmp(tr->name, name) == 0)
7432 goto out_unlock;
7433 }
7434
7435 ret = -ENOMEM;
7436 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7437 if (!tr)
7438 goto out_unlock;
7439
7440 tr->name = kstrdup(name, GFP_KERNEL);
7441 if (!tr->name)
7442 goto out_free_tr;
7443
ccfe9e42
AL
7444 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7445 goto out_free_tr;
7446
20550622 7447 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
983f938a 7448
ccfe9e42
AL
7449 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7450
277ba044
SR
7451 raw_spin_lock_init(&tr->start_lock);
7452
0b9b12c1
SRRH
7453 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7454
277ba044
SR
7455 tr->current_trace = &nop_trace;
7456
7457 INIT_LIST_HEAD(&tr->systems);
7458 INIT_LIST_HEAD(&tr->events);
7459
737223fb 7460 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
7461 goto out_free_tr;
7462
8434dc93 7463 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
7464 if (!tr->dir)
7465 goto out_free_tr;
7466
7467 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 7468 if (ret) {
8434dc93 7469 tracefs_remove_recursive(tr->dir);
277ba044 7470 goto out_free_tr;
609e85a7 7471 }
277ba044 7472
8434dc93 7473 init_tracer_tracefs(tr, tr->dir);
9a38a885 7474 init_trace_flags_index(tr);
37aea98b 7475 __update_tracer_options(tr);
277ba044
SR
7476
7477 list_add(&tr->list, &ftrace_trace_arrays);
7478
7479 mutex_unlock(&trace_types_lock);
7480
7481 return 0;
7482
7483 out_free_tr:
23aaa3c1 7484 free_trace_buffers(tr);
ccfe9e42 7485 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
7486 kfree(tr->name);
7487 kfree(tr);
7488
7489 out_unlock:
7490 mutex_unlock(&trace_types_lock);
7491
7492 return ret;
7493
7494}
7495
eae47358 7496static int instance_rmdir(const char *name)
0c8916c3
SR
7497{
7498 struct trace_array *tr;
7499 int found = 0;
7500 int ret;
37aea98b 7501 int i;
0c8916c3
SR
7502
7503 mutex_lock(&trace_types_lock);
7504
7505 ret = -ENODEV;
7506 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7507 if (tr->name && strcmp(tr->name, name) == 0) {
7508 found = 1;
7509 break;
7510 }
7511 }
7512 if (!found)
7513 goto out_unlock;
7514
a695cb58 7515 ret = -EBUSY;
cf6ab6d9 7516 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
7517 goto out_unlock;
7518
0c8916c3
SR
7519 list_del(&tr->list);
7520
20550622
SRRH
7521 /* Disable all the flags that were enabled coming in */
7522 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7523 if ((1 << i) & ZEROED_TRACE_FLAGS)
7524 set_tracer_flag(tr, 1 << i, 0);
7525 }
7526
6b450d25 7527 tracing_set_nop(tr);
0c8916c3 7528 event_trace_del_tracer(tr);
591dffda 7529 ftrace_destroy_function_files(tr);
681a4a2f 7530 tracefs_remove_recursive(tr->dir);
a9fcaaac 7531 free_trace_buffers(tr);
0c8916c3 7532
37aea98b
SRRH
7533 for (i = 0; i < tr->nr_topts; i++) {
7534 kfree(tr->topts[i].topts);
7535 }
7536 kfree(tr->topts);
7537
0c8916c3
SR
7538 kfree(tr->name);
7539 kfree(tr);
7540
7541 ret = 0;
7542
7543 out_unlock:
7544 mutex_unlock(&trace_types_lock);
7545
7546 return ret;
7547}
7548
277ba044
SR
7549static __init void create_trace_instances(struct dentry *d_tracer)
7550{
eae47358
SRRH
7551 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7552 instance_mkdir,
7553 instance_rmdir);
277ba044
SR
7554 if (WARN_ON(!trace_instance_dir))
7555 return;
277ba044
SR
7556}
7557
2b6080f2 7558static void
8434dc93 7559init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 7560{
121aaee7 7561 int cpu;
2b6080f2 7562
607e2ea1
SRRH
7563 trace_create_file("available_tracers", 0444, d_tracer,
7564 tr, &show_traces_fops);
7565
7566 trace_create_file("current_tracer", 0644, d_tracer,
7567 tr, &set_tracer_fops);
7568
ccfe9e42
AL
7569 trace_create_file("tracing_cpumask", 0644, d_tracer,
7570 tr, &tracing_cpumask_fops);
7571
2b6080f2
SR
7572 trace_create_file("trace_options", 0644, d_tracer,
7573 tr, &tracing_iter_fops);
7574
7575 trace_create_file("trace", 0644, d_tracer,
6484c71c 7576 tr, &tracing_fops);
2b6080f2
SR
7577
7578 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 7579 tr, &tracing_pipe_fops);
2b6080f2
SR
7580
7581 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 7582 tr, &tracing_entries_fops);
2b6080f2
SR
7583
7584 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7585 tr, &tracing_total_entries_fops);
7586
238ae93d 7587 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
7588 tr, &tracing_free_buffer_fops);
7589
7590 trace_create_file("trace_marker", 0220, d_tracer,
7591 tr, &tracing_mark_fops);
7592
fa32e855
SR
7593 trace_create_file("trace_marker_raw", 0220, d_tracer,
7594 tr, &tracing_mark_raw_fops);
7595
2b6080f2
SR
7596 trace_create_file("trace_clock", 0644, d_tracer, tr,
7597 &trace_clock_fops);
7598
7599 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 7600 tr, &rb_simple_fops);
ce9bae55 7601
16270145
SRRH
7602 create_trace_options_dir(tr);
7603
f971cc9a 7604#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6d9b3fa5
SRRH
7605 trace_create_file("tracing_max_latency", 0644, d_tracer,
7606 &tr->max_latency, &tracing_max_lat_fops);
7607#endif
7608
591dffda
SRRH
7609 if (ftrace_create_function_files(tr, d_tracer))
7610 WARN(1, "Could not allocate function filter files");
7611
ce9bae55
SRRH
7612#ifdef CONFIG_TRACER_SNAPSHOT
7613 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 7614 tr, &snapshot_fops);
ce9bae55 7615#endif
121aaee7
SRRH
7616
7617 for_each_tracing_cpu(cpu)
8434dc93 7618 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 7619
345ddcc8 7620 ftrace_init_tracefs(tr, d_tracer);
2b6080f2
SR
7621}
7622
93faccbb 7623static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
f76180bc
SRRH
7624{
7625 struct vfsmount *mnt;
7626 struct file_system_type *type;
7627
7628 /*
7629 * To maintain backward compatibility for tools that mount
7630 * debugfs to get to the tracing facility, tracefs is automatically
7631 * mounted to the debugfs/tracing directory.
7632 */
7633 type = get_fs_type("tracefs");
7634 if (!type)
7635 return NULL;
93faccbb 7636 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
f76180bc
SRRH
7637 put_filesystem(type);
7638 if (IS_ERR(mnt))
7639 return NULL;
7640 mntget(mnt);
7641
7642 return mnt;
7643}
7644
7eeafbca
SRRH
7645/**
7646 * tracing_init_dentry - initialize top level trace array
7647 *
7648 * This is called when creating files or directories in the tracing
7649 * directory. It is called via fs_initcall() by any of the boot up code
7650 * and expects to return the dentry of the top level tracing directory.
7651 */
7652struct dentry *tracing_init_dentry(void)
7653{
7654 struct trace_array *tr = &global_trace;
7655
f76180bc 7656 /* The top level trace array uses NULL as parent */
7eeafbca 7657 if (tr->dir)
f76180bc 7658 return NULL;
7eeafbca 7659
8b129199
JW
7660 if (WARN_ON(!tracefs_initialized()) ||
7661 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7662 WARN_ON(!debugfs_initialized())))
7eeafbca
SRRH
7663 return ERR_PTR(-ENODEV);
7664
f76180bc
SRRH
7665 /*
7666 * As there may still be users that expect the tracing
7667 * files to exist in debugfs/tracing, we must automount
7668 * the tracefs file system there, so older tools still
7669 * work with the newer kerenl.
7670 */
7671 tr->dir = debugfs_create_automount("tracing", NULL,
7672 trace_automount, NULL);
7eeafbca
SRRH
7673 if (!tr->dir) {
7674 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7675 return ERR_PTR(-ENOMEM);
7676 }
7677
8434dc93 7678 return NULL;
7eeafbca
SRRH
7679}
7680
0c564a53
SRRH
7681extern struct trace_enum_map *__start_ftrace_enum_maps[];
7682extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7683
7684static void __init trace_enum_init(void)
7685{
3673b8e4
SRRH
7686 int len;
7687
7688 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 7689 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
7690}
7691
7692#ifdef CONFIG_MODULES
7693static void trace_module_add_enums(struct module *mod)
7694{
7695 if (!mod->num_trace_enums)
7696 return;
7697
7698 /*
7699 * Modules with bad taint do not have events created, do
7700 * not bother with enums either.
7701 */
7702 if (trace_module_has_bad_taint(mod))
7703 return;
7704
9828413d 7705 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
7706}
7707
9828413d
SRRH
7708#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7709static void trace_module_remove_enums(struct module *mod)
7710{
7711 union trace_enum_map_item *map;
7712 union trace_enum_map_item **last = &trace_enum_maps;
7713
7714 if (!mod->num_trace_enums)
7715 return;
7716
7717 mutex_lock(&trace_enum_mutex);
7718
7719 map = trace_enum_maps;
7720
7721 while (map) {
7722 if (map->head.mod == mod)
7723 break;
7724 map = trace_enum_jmp_to_tail(map);
7725 last = &map->tail.next;
7726 map = map->tail.next;
7727 }
7728 if (!map)
7729 goto out;
7730
7731 *last = trace_enum_jmp_to_tail(map)->tail.next;
7732 kfree(map);
7733 out:
7734 mutex_unlock(&trace_enum_mutex);
7735}
7736#else
7737static inline void trace_module_remove_enums(struct module *mod) { }
7738#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7739
3673b8e4
SRRH
7740static int trace_module_notify(struct notifier_block *self,
7741 unsigned long val, void *data)
7742{
7743 struct module *mod = data;
7744
7745 switch (val) {
7746 case MODULE_STATE_COMING:
7747 trace_module_add_enums(mod);
7748 break;
9828413d
SRRH
7749 case MODULE_STATE_GOING:
7750 trace_module_remove_enums(mod);
7751 break;
3673b8e4
SRRH
7752 }
7753
7754 return 0;
0c564a53
SRRH
7755}
7756
3673b8e4
SRRH
7757static struct notifier_block trace_module_nb = {
7758 .notifier_call = trace_module_notify,
7759 .priority = 0,
7760};
9828413d 7761#endif /* CONFIG_MODULES */
3673b8e4 7762
8434dc93 7763static __init int tracer_init_tracefs(void)
bc0c38d1
SR
7764{
7765 struct dentry *d_tracer;
bc0c38d1 7766
7e53bd42
LJ
7767 trace_access_lock_init();
7768
bc0c38d1 7769 d_tracer = tracing_init_dentry();
14a5ae40 7770 if (IS_ERR(d_tracer))
ed6f1c99 7771 return 0;
bc0c38d1 7772
8434dc93 7773 init_tracer_tracefs(&global_trace, d_tracer);
501c2375 7774 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
bc0c38d1 7775
5452af66 7776 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 7777 &global_trace, &tracing_thresh_fops);
a8259075 7778
339ae5d3 7779 trace_create_file("README", 0444, d_tracer,
5452af66
FW
7780 NULL, &tracing_readme_fops);
7781
69abe6a5
AP
7782 trace_create_file("saved_cmdlines", 0444, d_tracer,
7783 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 7784
939c7a4f
YY
7785 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7786 NULL, &tracing_saved_cmdlines_size_fops);
7787
0c564a53
SRRH
7788 trace_enum_init();
7789
9828413d
SRRH
7790 trace_create_enum_file(d_tracer);
7791
3673b8e4
SRRH
7792#ifdef CONFIG_MODULES
7793 register_module_notifier(&trace_module_nb);
7794#endif
7795
bc0c38d1 7796#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
7797 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7798 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 7799#endif
b04cc6b1 7800
277ba044 7801 create_trace_instances(d_tracer);
5452af66 7802
37aea98b 7803 update_tracer_options(&global_trace);
09d23a1d 7804
b5ad384e 7805 return 0;
bc0c38d1
SR
7806}
7807
3f5a54e3
SR
7808static int trace_panic_handler(struct notifier_block *this,
7809 unsigned long event, void *unused)
7810{
944ac425 7811 if (ftrace_dump_on_oops)
cecbca96 7812 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7813 return NOTIFY_OK;
7814}
7815
7816static struct notifier_block trace_panic_notifier = {
7817 .notifier_call = trace_panic_handler,
7818 .next = NULL,
7819 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7820};
7821
7822static int trace_die_handler(struct notifier_block *self,
7823 unsigned long val,
7824 void *data)
7825{
7826 switch (val) {
7827 case DIE_OOPS:
944ac425 7828 if (ftrace_dump_on_oops)
cecbca96 7829 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
7830 break;
7831 default:
7832 break;
7833 }
7834 return NOTIFY_OK;
7835}
7836
7837static struct notifier_block trace_die_notifier = {
7838 .notifier_call = trace_die_handler,
7839 .priority = 200
7840};
7841
7842/*
7843 * printk is set to max of 1024, we really don't need it that big.
7844 * Nothing should be printing 1000 characters anyway.
7845 */
7846#define TRACE_MAX_PRINT 1000
7847
7848/*
7849 * Define here KERN_TRACE so that we have one place to modify
7850 * it if we decide to change what log level the ftrace dump
7851 * should be at.
7852 */
428aee14 7853#define KERN_TRACE KERN_EMERG
3f5a54e3 7854
955b61e5 7855void
3f5a54e3
SR
7856trace_printk_seq(struct trace_seq *s)
7857{
7858 /* Probably should print a warning here. */
3a161d99
SRRH
7859 if (s->seq.len >= TRACE_MAX_PRINT)
7860 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 7861
820b75f6
SRRH
7862 /*
7863 * More paranoid code. Although the buffer size is set to
7864 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7865 * an extra layer of protection.
7866 */
7867 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7868 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
7869
7870 /* should be zero ended, but we are paranoid. */
3a161d99 7871 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
7872
7873 printk(KERN_TRACE "%s", s->buffer);
7874
f9520750 7875 trace_seq_init(s);
3f5a54e3
SR
7876}
7877
955b61e5
JW
7878void trace_init_global_iter(struct trace_iterator *iter)
7879{
7880 iter->tr = &global_trace;
2b6080f2 7881 iter->trace = iter->tr->current_trace;
ae3b5093 7882 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 7883 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
7884
7885 if (iter->trace && iter->trace->open)
7886 iter->trace->open(iter);
7887
7888 /* Annotate start of buffers if we had overruns */
7889 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7890 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7891
7892 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7893 if (trace_clocks[iter->tr->clock_id].in_ns)
7894 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
7895}
7896
7fe70b57 7897void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 7898{
3f5a54e3
SR
7899 /* use static because iter can be a bit big for the stack */
7900 static struct trace_iterator iter;
7fe70b57 7901 static atomic_t dump_running;
983f938a 7902 struct trace_array *tr = &global_trace;
cf586b61 7903 unsigned int old_userobj;
d769041f
SR
7904 unsigned long flags;
7905 int cnt = 0, cpu;
3f5a54e3 7906
7fe70b57
SRRH
7907 /* Only allow one dump user at a time. */
7908 if (atomic_inc_return(&dump_running) != 1) {
7909 atomic_dec(&dump_running);
7910 return;
7911 }
3f5a54e3 7912
7fe70b57
SRRH
7913 /*
7914 * Always turn off tracing when we dump.
7915 * We don't need to show trace output of what happens
7916 * between multiple crashes.
7917 *
7918 * If the user does a sysrq-z, then they can re-enable
7919 * tracing with echo 1 > tracing_on.
7920 */
0ee6b6cf 7921 tracing_off();
cf586b61 7922
7fe70b57 7923 local_irq_save(flags);
3f5a54e3 7924
38dbe0b1 7925 /* Simulate the iterator */
955b61e5
JW
7926 trace_init_global_iter(&iter);
7927
d769041f 7928 for_each_tracing_cpu(cpu) {
5e2d5ef8 7929 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
d769041f
SR
7930 }
7931
983f938a 7932 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
cf586b61 7933
b54d3de9 7934 /* don't look at user memory in panic mode */
983f938a 7935 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
b54d3de9 7936
cecbca96
FW
7937 switch (oops_dump_mode) {
7938 case DUMP_ALL:
ae3b5093 7939 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7940 break;
7941 case DUMP_ORIG:
7942 iter.cpu_file = raw_smp_processor_id();
7943 break;
7944 case DUMP_NONE:
7945 goto out_enable;
7946 default:
7947 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7948 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7949 }
7950
7951 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7952
7fe70b57
SRRH
7953 /* Did function tracer already get disabled? */
7954 if (ftrace_is_dead()) {
7955 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7956 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7957 }
7958
3f5a54e3
SR
7959 /*
7960 * We need to stop all tracing on all CPUS to read the
7961 * the next buffer. This is a bit expensive, but is
7962 * not done often. We fill all what we can read,
7963 * and then release the locks again.
7964 */
7965
3f5a54e3
SR
7966 while (!trace_empty(&iter)) {
7967
7968 if (!cnt)
7969 printk(KERN_TRACE "---------------------------------\n");
7970
7971 cnt++;
7972
7973 /* reset all but tr, trace, and overruns */
7974 memset(&iter.seq, 0,
7975 sizeof(struct trace_iterator) -
7976 offsetof(struct trace_iterator, seq));
7977 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7978 iter.pos = -1;
7979
955b61e5 7980 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7981 int ret;
7982
7983 ret = print_trace_line(&iter);
7984 if (ret != TRACE_TYPE_NO_CONSUME)
7985 trace_consume(&iter);
3f5a54e3 7986 }
b892e5c8 7987 touch_nmi_watchdog();
3f5a54e3
SR
7988
7989 trace_printk_seq(&iter.seq);
7990 }
7991
7992 if (!cnt)
7993 printk(KERN_TRACE " (ftrace buffer empty)\n");
7994 else
7995 printk(KERN_TRACE "---------------------------------\n");
7996
cecbca96 7997 out_enable:
983f938a 7998 tr->trace_flags |= old_userobj;
cf586b61 7999
7fe70b57
SRRH
8000 for_each_tracing_cpu(cpu) {
8001 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 8002 }
7fe70b57 8003 atomic_dec(&dump_running);
cd891ae0 8004 local_irq_restore(flags);
3f5a54e3 8005}
a8eecf22 8006EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 8007
3928a8a2 8008__init static int tracer_alloc_buffers(void)
bc0c38d1 8009{
73c5162a 8010 int ring_buf_size;
9e01c1b7 8011 int ret = -ENOMEM;
4c11d7ae 8012
b5e87c05
SRRH
8013 /*
8014 * Make sure we don't accidently add more trace options
8015 * than we have bits for.
8016 */
9a38a885 8017 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
b5e87c05 8018
9e01c1b7
RR
8019 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8020 goto out;
8021
ccfe9e42 8022 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 8023 goto out_free_buffer_mask;
4c11d7ae 8024
07d777fe
SR
8025 /* Only allocate trace_printk buffers if a trace_printk exists */
8026 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 8027 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
8028 trace_printk_init_buffers();
8029
73c5162a
SR
8030 /* To save memory, keep the ring buffer size to its minimum */
8031 if (ring_buffer_expanded)
8032 ring_buf_size = trace_buf_size;
8033 else
8034 ring_buf_size = 1;
8035
9e01c1b7 8036 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 8037 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 8038
2b6080f2
SR
8039 raw_spin_lock_init(&global_trace.start_lock);
8040
b32614c0
SAS
8041 /*
8042 * The prepare callbacks allocates some memory for the ring buffer. We
8043 * don't free the buffer if the if the CPU goes down. If we were to free
8044 * the buffer, then the user would lose any trace that was in the
8045 * buffer. The memory will be removed once the "instance" is removed.
8046 */
8047 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8048 "trace/RB:preapre", trace_rb_cpu_prepare,
8049 NULL);
8050 if (ret < 0)
8051 goto out_free_cpumask;
2c4a33ab
SRRH
8052 /* Used for event triggers */
8053 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8054 if (!temp_buffer)
b32614c0 8055 goto out_rm_hp_state;
2c4a33ab 8056
939c7a4f
YY
8057 if (trace_create_savedcmd() < 0)
8058 goto out_free_temp_buffer;
8059
9e01c1b7 8060 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 8061 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
8062 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8063 WARN_ON(1);
939c7a4f 8064 goto out_free_savedcmd;
4c11d7ae 8065 }
a7603ff4 8066
499e5470
SR
8067 if (global_trace.buffer_disabled)
8068 tracing_off();
4c11d7ae 8069
e1e232ca
SR
8070 if (trace_boot_clock) {
8071 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8072 if (ret < 0)
a395d6a7
JP
8073 pr_warn("Trace clock %s not defined, going back to default\n",
8074 trace_boot_clock);
e1e232ca
SR
8075 }
8076
ca164318
SRRH
8077 /*
8078 * register_tracer() might reference current_trace, so it
8079 * needs to be set before we register anything. This is
8080 * just a bootstrap of current_trace anyway.
8081 */
2b6080f2
SR
8082 global_trace.current_trace = &nop_trace;
8083
0b9b12c1
SRRH
8084 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8085
4104d326
SRRH
8086 ftrace_init_global_array_ops(&global_trace);
8087
9a38a885
SRRH
8088 init_trace_flags_index(&global_trace);
8089
ca164318
SRRH
8090 register_tracer(&nop_trace);
8091
dbeafd0d
SRV
8092 /* Function tracing may start here (via kernel command line) */
8093 init_function_trace();
8094
60a11774
SR
8095 /* All seems OK, enable tracing */
8096 tracing_disabled = 0;
3928a8a2 8097
3f5a54e3
SR
8098 atomic_notifier_chain_register(&panic_notifier_list,
8099 &trace_panic_notifier);
8100
8101 register_die_notifier(&trace_die_notifier);
2fc1dfbe 8102
ae63b31e
SR
8103 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8104
8105 INIT_LIST_HEAD(&global_trace.systems);
8106 INIT_LIST_HEAD(&global_trace.events);
8107 list_add(&global_trace.list, &ftrace_trace_arrays);
8108
a4d1e688 8109 apply_trace_boot_options();
7bcfaf54 8110
77fd5c15
SRRH
8111 register_snapshot_cmd();
8112
2fc1dfbe 8113 return 0;
3f5a54e3 8114
939c7a4f
YY
8115out_free_savedcmd:
8116 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
8117out_free_temp_buffer:
8118 ring_buffer_free(temp_buffer);
b32614c0
SAS
8119out_rm_hp_state:
8120 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
9e01c1b7 8121out_free_cpumask:
ccfe9e42 8122 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
8123out_free_buffer_mask:
8124 free_cpumask_var(tracing_buffer_mask);
8125out:
8126 return ret;
bc0c38d1 8127}
b2821ae6 8128
e725c731 8129void __init early_trace_init(void)
5f893b26 8130{
0daa2302
SRRH
8131 if (tracepoint_printk) {
8132 tracepoint_print_iter =
8133 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8134 if (WARN_ON(!tracepoint_print_iter))
8135 tracepoint_printk = 0;
42391745
SRRH
8136 else
8137 static_key_enable(&tracepoint_printk_key.key);
0daa2302 8138 }
5f893b26 8139 tracer_alloc_buffers();
e725c731
SRV
8140}
8141
8142void __init trace_init(void)
8143{
0c564a53 8144 trace_event_init();
5f893b26
SRRH
8145}
8146
b2821ae6
SR
8147__init static int clear_boot_tracer(void)
8148{
8149 /*
8150 * The default tracer at boot buffer is an init section.
8151 * This function is called in lateinit. If we did not
8152 * find the boot tracer, then clear it out, to prevent
8153 * later registration from accessing the buffer that is
8154 * about to be freed.
8155 */
8156 if (!default_bootup_tracer)
8157 return 0;
8158
8159 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8160 default_bootup_tracer);
8161 default_bootup_tracer = NULL;
8162
8163 return 0;
8164}
8165
8434dc93 8166fs_initcall(tracer_init_tracefs);
b2821ae6 8167late_initcall(clear_boot_tracer);