Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[linux-2.6-block.git] / kernel / trace / trace.c
CommitLineData
bc0c38d1
SR
1/*
2 * ring buffer based function tracer
3 *
2b6080f2 4 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
bc0c38d1
SR
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 12 * Copyright (C) 2004 Nadia Yvette Chambers
bc0c38d1 13 */
2cadf913 14#include <linux/ring_buffer.h>
273b281f 15#include <generated/utsrelease.h>
2cadf913
SR
16#include <linux/stacktrace.h>
17#include <linux/writeback.h>
bc0c38d1
SR
18#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
3f5a54e3 20#include <linux/notifier.h>
2cadf913 21#include <linux/irqflags.h>
bc0c38d1 22#include <linux/debugfs.h>
8434dc93 23#include <linux/tracefs.h>
4c11d7ae 24#include <linux/pagemap.h>
bc0c38d1
SR
25#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
2cadf913 28#include <linux/kprobes.h>
bc0c38d1
SR
29#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
2cadf913 32#include <linux/splice.h>
3f5a54e3 33#include <linux/kdebug.h>
5f0c6c03 34#include <linux/string.h>
f76180bc 35#include <linux/mount.h>
7e53bd42 36#include <linux/rwsem.h>
5a0e3ad6 37#include <linux/slab.h>
bc0c38d1
SR
38#include <linux/ctype.h>
39#include <linux/init.h>
2a2cc8f7 40#include <linux/poll.h>
b892e5c8 41#include <linux/nmi.h>
bc0c38d1 42#include <linux/fs.h>
8bd75c77 43#include <linux/sched/rt.h>
86387f7e 44
bc0c38d1 45#include "trace.h"
f0868d1e 46#include "trace_output.h"
bc0c38d1 47
73c5162a
SR
48/*
49 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
55034cd6 52bool ring_buffer_expanded;
73c5162a 53
8e1b82e0
FW
54/*
55 * We need to change this state when a selftest is running.
ff32504f
FW
56 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
5e1607a0 58 * insertions into the ring-buffer such as trace_printk could occurred
ff32504f
FW
59 * at the same time, giving false positive or negative results.
60 */
8e1b82e0 61static bool __read_mostly tracing_selftest_running;
ff32504f 62
b2821ae6
SR
63/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
020e5f85 66bool __read_mostly tracing_selftest_disabled;
b2821ae6 67
0daa2302
SRRH
68/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
adf9f195
FW
72/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
8c1a49ae
SRRH
82static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
adf9f195
FW
84{
85 return 0;
86}
0f048701 87
7ffbd48d
SR
88/*
89 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
0f048701
SR
95/*
96 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
4fd27358 101static int tracing_disabled = 1;
0f048701 102
9288f99a 103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
d769041f 104
955b61e5 105cpumask_var_t __read_mostly tracing_buffer_mask;
ab46428c 106
944ac425
SR
107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
cecbca96
FW
118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
944ac425 121 */
cecbca96
FW
122
123enum ftrace_dump_mode ftrace_dump_on_oops;
944ac425 124
de7edd31
SRRH
125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
9828413d
SRRH
128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
607e2ea1 164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
b2821ae6 165
ee6c2c1b
LZ
166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
b2821ae6 168static char *default_bootup_tracer;
d9e54076 169
55034cd6
SRRH
170static bool allocate_snapshot;
171
1beee96b 172static int __init set_cmdline_ftrace(char *str)
d9e54076 173{
67012ab1 174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
b2821ae6 175 default_bootup_tracer = bootup_tracer_buf;
73c5162a 176 /* We are using ftrace early, expand it */
55034cd6 177 ring_buffer_expanded = true;
d9e54076
PZ
178 return 1;
179}
1beee96b 180__setup("ftrace=", set_cmdline_ftrace);
d9e54076 181
944ac425
SR
182static int __init set_ftrace_dump_on_oops(char *str)
183{
cecbca96
FW
184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
944ac425
SR
195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
60a11774 197
de7edd31
SRRH
198static int __init stop_trace_on_warning(char *str)
199{
933ff9f2
LCG
200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
de7edd31
SRRH
202 return 1;
203}
933ff9f2 204__setup("traceoff_on_warning", stop_trace_on_warning);
de7edd31 205
3209cff4 206static int __init boot_alloc_snapshot(char *str)
55034cd6
SRRH
207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
3209cff4 213__setup("alloc_snapshot", boot_alloc_snapshot);
55034cd6 214
7bcfaf54
SR
215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
67012ab1 221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
7bcfaf54
SR
222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
e1e232ca
SR
227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
0daa2302
SRRH
238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
de7edd31 245
cf8e3474 246unsigned long long ns2usecs(cycle_t nsec)
bc0c38d1
SR
247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
4fcdae83
SR
253/*
254 * The global_trace is the descriptor that holds the tracing
255 * buffers for the live tracing. For each CPU, it contains
256 * a link list of pages that will store trace entries. The
257 * page descriptor of the pages in the memory is used to hold
258 * the link list by linking the lru item in the page descriptor
259 * to each of the pages in the buffer per CPU.
260 *
261 * For each active CPU there is a data field that holds the
262 * pages for the buffer for that CPU. Each CPU has the same number
263 * of pages allocated for its buffer.
264 */
bc0c38d1
SR
265static struct trace_array global_trace;
266
ae63b31e 267LIST_HEAD(ftrace_trace_arrays);
bc0c38d1 268
ff451961
SRRH
269int trace_array_get(struct trace_array *this_tr)
270{
271 struct trace_array *tr;
272 int ret = -ENODEV;
273
274 mutex_lock(&trace_types_lock);
275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 if (tr == this_tr) {
277 tr->ref++;
278 ret = 0;
279 break;
280 }
281 }
282 mutex_unlock(&trace_types_lock);
283
284 return ret;
285}
286
287static void __trace_array_put(struct trace_array *this_tr)
288{
289 WARN_ON(!this_tr->ref);
290 this_tr->ref--;
291}
292
293void trace_array_put(struct trace_array *this_tr)
294{
295 mutex_lock(&trace_types_lock);
296 __trace_array_put(this_tr);
297 mutex_unlock(&trace_types_lock);
298}
299
f306cc82
TZ
300int filter_check_discard(struct ftrace_event_file *file, void *rec,
301 struct ring_buffer *buffer,
302 struct ring_buffer_event *event)
eb02ce01 303{
f306cc82
TZ
304 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
305 !filter_match_preds(file->filter, rec)) {
306 ring_buffer_discard_commit(buffer, event);
307 return 1;
308 }
309
310 return 0;
311}
312EXPORT_SYMBOL_GPL(filter_check_discard);
313
314int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
315 struct ring_buffer *buffer,
316 struct ring_buffer_event *event)
317{
318 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 !filter_match_preds(call->filter, rec)) {
320 ring_buffer_discard_commit(buffer, event);
321 return 1;
322 }
323
324 return 0;
eb02ce01 325}
f306cc82 326EXPORT_SYMBOL_GPL(call_filter_check_discard);
eb02ce01 327
ad1438a0 328static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
37886f6a
SR
329{
330 u64 ts;
331
332 /* Early boot up does not have a buffer yet */
9457158b 333 if (!buf->buffer)
37886f6a
SR
334 return trace_clock_local();
335
9457158b
AL
336 ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
37886f6a
SR
338
339 return ts;
340}
bc0c38d1 341
9457158b
AL
342cycle_t ftrace_now(int cpu)
343{
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345}
346
10246fa3
SRRH
347/**
348 * tracing_is_enabled - Show if global_trace has been disabled
349 *
350 * Shows if the global trace has been enabled or not. It uses the
351 * mirror flag "buffer_disabled" to be used in fast paths such as for
352 * the irqsoff tracer. But it may be inaccurate due to races. If you
353 * need to know the accurate state, use tracing_is_on() which is a little
354 * slower, but accurate.
355 */
9036990d
SR
356int tracing_is_enabled(void)
357{
10246fa3
SRRH
358 /*
359 * For quick access (irqsoff uses this in fast path), just
360 * return the mirror variable of the state of the ring buffer.
361 * It's a little racy, but we don't really care.
362 */
363 smp_rmb();
364 return !global_trace.buffer_disabled;
9036990d
SR
365}
366
4fcdae83 367/*
3928a8a2
SR
368 * trace_buf_size is the size in bytes that is allocated
369 * for a buffer. Note, the number of bytes is always rounded
370 * to page size.
3f5a54e3
SR
371 *
372 * This number is purposely set to a low number of 16384.
373 * If the dump on oops happens, it will be much appreciated
374 * to not have to wait for all that output. Anyway this can be
375 * boot time and run time configurable.
4fcdae83 376 */
3928a8a2 377#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
3f5a54e3 378
3928a8a2 379static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
bc0c38d1 380
4fcdae83 381/* trace_types holds a link list of available tracers. */
bc0c38d1 382static struct tracer *trace_types __read_mostly;
4fcdae83 383
4fcdae83
SR
384/*
385 * trace_types_lock is used to protect the trace_types list.
4fcdae83 386 */
a8227415 387DEFINE_MUTEX(trace_types_lock);
4fcdae83 388
7e53bd42
LJ
389/*
390 * serialize the access of the ring buffer
391 *
392 * ring buffer serializes readers, but it is low level protection.
393 * The validity of the events (which returns by ring_buffer_peek() ..etc)
394 * are not protected by ring buffer.
395 *
396 * The content of events may become garbage if we allow other process consumes
397 * these events concurrently:
398 * A) the page of the consumed events may become a normal page
399 * (not reader page) in ring buffer, and this page will be rewrited
400 * by events producer.
401 * B) The page of the consumed events may become a page for splice_read,
402 * and this page will be returned to system.
403 *
404 * These primitives allow multi process access to different cpu ring buffer
405 * concurrently.
406 *
407 * These primitives don't distinguish read-only and read-consume access.
408 * Multi read-only access are also serialized.
409 */
410
411#ifdef CONFIG_SMP
412static DECLARE_RWSEM(all_cpu_access_lock);
413static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
ae3b5093 417 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
418 /* gain it for accessing the whole ring buffer. */
419 down_write(&all_cpu_access_lock);
420 } else {
421 /* gain it for accessing a cpu ring buffer. */
422
ae3b5093 423 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
7e53bd42
LJ
424 down_read(&all_cpu_access_lock);
425
426 /* Secondly block other access to this @cpu ring buffer. */
427 mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 }
429}
430
431static inline void trace_access_unlock(int cpu)
432{
ae3b5093 433 if (cpu == RING_BUFFER_ALL_CPUS) {
7e53bd42
LJ
434 up_write(&all_cpu_access_lock);
435 } else {
436 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 up_read(&all_cpu_access_lock);
438 }
439}
440
441static inline void trace_access_lock_init(void)
442{
443 int cpu;
444
445 for_each_possible_cpu(cpu)
446 mutex_init(&per_cpu(cpu_access_lock, cpu));
447}
448
449#else
450
451static DEFINE_MUTEX(access_lock);
452
453static inline void trace_access_lock(int cpu)
454{
455 (void)cpu;
456 mutex_lock(&access_lock);
457}
458
459static inline void trace_access_unlock(int cpu)
460{
461 (void)cpu;
462 mutex_unlock(&access_lock);
463}
464
465static inline void trace_access_lock_init(void)
466{
467}
468
469#endif
470
ee6bce52 471/* trace_flags holds trace_options default values */
12ef7d44 472unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
a2a16d6a 473 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
77271ce4 474 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
328df475 475 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
e7e2ee89 476
5280bcef 477static void tracer_tracing_on(struct trace_array *tr)
10246fa3
SRRH
478{
479 if (tr->trace_buffer.buffer)
480 ring_buffer_record_on(tr->trace_buffer.buffer);
481 /*
482 * This flag is looked at when buffers haven't been allocated
483 * yet, or by some tracers (like irqsoff), that just want to
484 * know if the ring buffer has been disabled, but it can handle
485 * races of where it gets disabled but we still do a record.
486 * As the check is in the fast path of the tracers, it is more
487 * important to be fast than accurate.
488 */
489 tr->buffer_disabled = 0;
490 /* Make the flag seen by readers */
491 smp_wmb();
492}
493
499e5470
SR
494/**
495 * tracing_on - enable tracing buffers
496 *
497 * This function enables tracing buffers that may have been
498 * disabled with tracing_off.
499 */
500void tracing_on(void)
501{
10246fa3 502 tracer_tracing_on(&global_trace);
499e5470
SR
503}
504EXPORT_SYMBOL_GPL(tracing_on);
505
09ae7234
SRRH
506/**
507 * __trace_puts - write a constant string into the trace buffer.
508 * @ip: The address of the caller
509 * @str: The constant string to write
510 * @size: The size of the string.
511 */
512int __trace_puts(unsigned long ip, const char *str, int size)
513{
514 struct ring_buffer_event *event;
515 struct ring_buffer *buffer;
516 struct print_entry *entry;
517 unsigned long irq_flags;
518 int alloc;
8abfb872
J
519 int pc;
520
f0160a5a
J
521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
8abfb872 524 pc = preempt_count();
09ae7234 525
3132e107
SRRH
526 if (unlikely(tracing_selftest_running || tracing_disabled))
527 return 0;
528
09ae7234
SRRH
529 alloc = sizeof(*entry) + size + 2; /* possible \n added */
530
531 local_save_flags(irq_flags);
532 buffer = global_trace.trace_buffer.buffer;
533 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
8abfb872 534 irq_flags, pc);
09ae7234
SRRH
535 if (!event)
536 return 0;
537
538 entry = ring_buffer_event_data(event);
539 entry->ip = ip;
540
541 memcpy(&entry->buf, str, size);
542
543 /* Add a newline if necessary */
544 if (entry->buf[size - 1] != '\n') {
545 entry->buf[size] = '\n';
546 entry->buf[size + 1] = '\0';
547 } else
548 entry->buf[size] = '\0';
549
550 __buffer_unlock_commit(buffer, event);
8abfb872 551 ftrace_trace_stack(buffer, irq_flags, 4, pc);
09ae7234
SRRH
552
553 return size;
554}
555EXPORT_SYMBOL_GPL(__trace_puts);
556
557/**
558 * __trace_bputs - write the pointer to a constant string into trace buffer
559 * @ip: The address of the caller
560 * @str: The constant string to write to the buffer to
561 */
562int __trace_bputs(unsigned long ip, const char *str)
563{
564 struct ring_buffer_event *event;
565 struct ring_buffer *buffer;
566 struct bputs_entry *entry;
567 unsigned long irq_flags;
568 int size = sizeof(struct bputs_entry);
8abfb872
J
569 int pc;
570
f0160a5a
J
571 if (!(trace_flags & TRACE_ITER_PRINTK))
572 return 0;
573
8abfb872 574 pc = preempt_count();
09ae7234 575
3132e107
SRRH
576 if (unlikely(tracing_selftest_running || tracing_disabled))
577 return 0;
578
09ae7234
SRRH
579 local_save_flags(irq_flags);
580 buffer = global_trace.trace_buffer.buffer;
581 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
8abfb872 582 irq_flags, pc);
09ae7234
SRRH
583 if (!event)
584 return 0;
585
586 entry = ring_buffer_event_data(event);
587 entry->ip = ip;
588 entry->str = str;
589
590 __buffer_unlock_commit(buffer, event);
8abfb872 591 ftrace_trace_stack(buffer, irq_flags, 4, pc);
09ae7234
SRRH
592
593 return 1;
594}
595EXPORT_SYMBOL_GPL(__trace_bputs);
596
ad909e21
SRRH
597#ifdef CONFIG_TRACER_SNAPSHOT
598/**
599 * trace_snapshot - take a snapshot of the current buffer.
600 *
601 * This causes a swap between the snapshot buffer and the current live
602 * tracing buffer. You can use this to take snapshots of the live
603 * trace when some condition is triggered, but continue to trace.
604 *
605 * Note, make sure to allocate the snapshot with either
606 * a tracing_snapshot_alloc(), or by doing it manually
607 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
608 *
609 * If the snapshot buffer is not allocated, it will stop tracing.
610 * Basically making a permanent snapshot.
611 */
612void tracing_snapshot(void)
613{
614 struct trace_array *tr = &global_trace;
615 struct tracer *tracer = tr->current_trace;
616 unsigned long flags;
617
1b22e382
SRRH
618 if (in_nmi()) {
619 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
620 internal_trace_puts("*** snapshot is being ignored ***\n");
621 return;
622 }
623
ad909e21 624 if (!tr->allocated_snapshot) {
ca268da6
SRRH
625 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
626 internal_trace_puts("*** stopping trace here! ***\n");
ad909e21
SRRH
627 tracing_off();
628 return;
629 }
630
631 /* Note, snapshot can not be used when the tracer uses it */
632 if (tracer->use_max_tr) {
ca268da6
SRRH
633 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
634 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
ad909e21
SRRH
635 return;
636 }
637
638 local_irq_save(flags);
639 update_max_tr(tr, current, smp_processor_id());
640 local_irq_restore(flags);
641}
1b22e382 642EXPORT_SYMBOL_GPL(tracing_snapshot);
ad909e21
SRRH
643
644static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
645 struct trace_buffer *size_buf, int cpu_id);
3209cff4
SRRH
646static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
647
648static int alloc_snapshot(struct trace_array *tr)
649{
650 int ret;
651
652 if (!tr->allocated_snapshot) {
653
654 /* allocate spare buffer */
655 ret = resize_buffer_duplicate_size(&tr->max_buffer,
656 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
657 if (ret < 0)
658 return ret;
659
660 tr->allocated_snapshot = true;
661 }
662
663 return 0;
664}
665
ad1438a0 666static void free_snapshot(struct trace_array *tr)
3209cff4
SRRH
667{
668 /*
669 * We don't free the ring buffer. instead, resize it because
670 * The max_tr ring buffer has some state (e.g. ring->clock) and
671 * we want preserve it.
672 */
673 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
674 set_buffer_entries(&tr->max_buffer, 1);
675 tracing_reset_online_cpus(&tr->max_buffer);
676 tr->allocated_snapshot = false;
677}
ad909e21 678
93e31ffb
TZ
679/**
680 * tracing_alloc_snapshot - allocate snapshot buffer.
681 *
682 * This only allocates the snapshot buffer if it isn't already
683 * allocated - it doesn't also take a snapshot.
684 *
685 * This is meant to be used in cases where the snapshot buffer needs
686 * to be set up for events that can't sleep but need to be able to
687 * trigger a snapshot.
688 */
689int tracing_alloc_snapshot(void)
690{
691 struct trace_array *tr = &global_trace;
692 int ret;
693
694 ret = alloc_snapshot(tr);
695 WARN_ON(ret < 0);
696
697 return ret;
698}
699EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
700
ad909e21
SRRH
701/**
702 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
703 *
704 * This is similar to trace_snapshot(), but it will allocate the
705 * snapshot buffer if it isn't already allocated. Use this only
706 * where it is safe to sleep, as the allocation may sleep.
707 *
708 * This causes a swap between the snapshot buffer and the current live
709 * tracing buffer. You can use this to take snapshots of the live
710 * trace when some condition is triggered, but continue to trace.
711 */
712void tracing_snapshot_alloc(void)
713{
ad909e21
SRRH
714 int ret;
715
93e31ffb
TZ
716 ret = tracing_alloc_snapshot();
717 if (ret < 0)
3209cff4 718 return;
ad909e21
SRRH
719
720 tracing_snapshot();
721}
1b22e382 722EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
723#else
724void tracing_snapshot(void)
725{
726 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
727}
1b22e382 728EXPORT_SYMBOL_GPL(tracing_snapshot);
93e31ffb
TZ
729int tracing_alloc_snapshot(void)
730{
731 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
732 return -ENODEV;
733}
734EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
ad909e21
SRRH
735void tracing_snapshot_alloc(void)
736{
737 /* Give warning */
738 tracing_snapshot();
739}
1b22e382 740EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
ad909e21
SRRH
741#endif /* CONFIG_TRACER_SNAPSHOT */
742
5280bcef 743static void tracer_tracing_off(struct trace_array *tr)
10246fa3
SRRH
744{
745 if (tr->trace_buffer.buffer)
746 ring_buffer_record_off(tr->trace_buffer.buffer);
747 /*
748 * This flag is looked at when buffers haven't been allocated
749 * yet, or by some tracers (like irqsoff), that just want to
750 * know if the ring buffer has been disabled, but it can handle
751 * races of where it gets disabled but we still do a record.
752 * As the check is in the fast path of the tracers, it is more
753 * important to be fast than accurate.
754 */
755 tr->buffer_disabled = 1;
756 /* Make the flag seen by readers */
757 smp_wmb();
758}
759
499e5470
SR
760/**
761 * tracing_off - turn off tracing buffers
762 *
763 * This function stops the tracing buffers from recording data.
764 * It does not disable any overhead the tracers themselves may
765 * be causing. This function simply causes all recording to
766 * the ring buffers to fail.
767 */
768void tracing_off(void)
769{
10246fa3 770 tracer_tracing_off(&global_trace);
499e5470
SR
771}
772EXPORT_SYMBOL_GPL(tracing_off);
773
de7edd31
SRRH
774void disable_trace_on_warning(void)
775{
776 if (__disable_trace_on_warning)
777 tracing_off();
778}
779
10246fa3
SRRH
780/**
781 * tracer_tracing_is_on - show real state of ring buffer enabled
782 * @tr : the trace array to know if ring buffer is enabled
783 *
784 * Shows real state of the ring buffer if it is enabled or not.
785 */
5280bcef 786static int tracer_tracing_is_on(struct trace_array *tr)
10246fa3
SRRH
787{
788 if (tr->trace_buffer.buffer)
789 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
790 return !tr->buffer_disabled;
791}
792
499e5470
SR
793/**
794 * tracing_is_on - show state of ring buffers enabled
795 */
796int tracing_is_on(void)
797{
10246fa3 798 return tracer_tracing_is_on(&global_trace);
499e5470
SR
799}
800EXPORT_SYMBOL_GPL(tracing_is_on);
801
3928a8a2 802static int __init set_buf_size(char *str)
bc0c38d1 803{
3928a8a2 804 unsigned long buf_size;
c6caeeb1 805
bc0c38d1
SR
806 if (!str)
807 return 0;
9d612bef 808 buf_size = memparse(str, &str);
c6caeeb1 809 /* nr_entries can not be zero */
9d612bef 810 if (buf_size == 0)
c6caeeb1 811 return 0;
3928a8a2 812 trace_buf_size = buf_size;
bc0c38d1
SR
813 return 1;
814}
3928a8a2 815__setup("trace_buf_size=", set_buf_size);
bc0c38d1 816
0e950173
TB
817static int __init set_tracing_thresh(char *str)
818{
87abb3b1 819 unsigned long threshold;
0e950173
TB
820 int ret;
821
822 if (!str)
823 return 0;
bcd83ea6 824 ret = kstrtoul(str, 0, &threshold);
0e950173
TB
825 if (ret < 0)
826 return 0;
87abb3b1 827 tracing_thresh = threshold * 1000;
0e950173
TB
828 return 1;
829}
830__setup("tracing_thresh=", set_tracing_thresh);
831
57f50be1
SR
832unsigned long nsecs_to_usecs(unsigned long nsecs)
833{
834 return nsecs / 1000;
835}
836
4fcdae83 837/* These must match the bit postions in trace_iterator_flags */
bc0c38d1
SR
838static const char *trace_options[] = {
839 "print-parent",
840 "sym-offset",
841 "sym-addr",
842 "verbose",
f9896bf3 843 "raw",
5e3ca0ec 844 "hex",
cb0f12aa 845 "bin",
2a2cc8f7 846 "block",
86387f7e 847 "stacktrace",
5e1607a0 848 "trace_printk",
b2a866f9 849 "ftrace_preempt",
9f029e83 850 "branch",
12ef7d44 851 "annotate",
02b67518 852 "userstacktrace",
b54d3de9 853 "sym-userobj",
66896a85 854 "printk-msg-only",
c4a8e8be 855 "context-info",
c032ef64 856 "latency-format",
be6f164a 857 "sleep-time",
a2a16d6a 858 "graph-time",
e870e9a1 859 "record-cmd",
750912fa 860 "overwrite",
cf30cf67 861 "disable_on_free",
77271ce4 862 "irq-info",
5224c3a3 863 "markers",
328df475 864 "function-trace",
bc0c38d1
SR
865 NULL
866};
867
5079f326
Z
868static struct {
869 u64 (*func)(void);
870 const char *name;
8be0709f 871 int in_ns; /* is this clock in nanoseconds? */
5079f326 872} trace_clocks[] = {
1b3e5c09
TG
873 { trace_clock_local, "local", 1 },
874 { trace_clock_global, "global", 1 },
875 { trace_clock_counter, "counter", 0 },
e7fda6c4 876 { trace_clock_jiffies, "uptime", 0 },
1b3e5c09
TG
877 { trace_clock, "perf", 1 },
878 { ktime_get_mono_fast_ns, "mono", 1 },
8cbd9cc6 879 ARCH_TRACE_CLOCKS
5079f326
Z
880};
881
b63f39ea 882/*
883 * trace_parser_get_init - gets the buffer for trace parser
884 */
885int trace_parser_get_init(struct trace_parser *parser, int size)
886{
887 memset(parser, 0, sizeof(*parser));
888
889 parser->buffer = kmalloc(size, GFP_KERNEL);
890 if (!parser->buffer)
891 return 1;
892
893 parser->size = size;
894 return 0;
895}
896
897/*
898 * trace_parser_put - frees the buffer for trace parser
899 */
900void trace_parser_put(struct trace_parser *parser)
901{
902 kfree(parser->buffer);
903}
904
905/*
906 * trace_get_user - reads the user input string separated by space
907 * (matched by isspace(ch))
908 *
909 * For each string found the 'struct trace_parser' is updated,
910 * and the function returns.
911 *
912 * Returns number of bytes read.
913 *
914 * See kernel/trace/trace.h for 'struct trace_parser' details.
915 */
916int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
917 size_t cnt, loff_t *ppos)
918{
919 char ch;
920 size_t read = 0;
921 ssize_t ret;
922
923 if (!*ppos)
924 trace_parser_clear(parser);
925
926 ret = get_user(ch, ubuf++);
927 if (ret)
928 goto out;
929
930 read++;
931 cnt--;
932
933 /*
934 * The parser is not finished with the last write,
935 * continue reading the user input without skipping spaces.
936 */
937 if (!parser->cont) {
938 /* skip white space */
939 while (cnt && isspace(ch)) {
940 ret = get_user(ch, ubuf++);
941 if (ret)
942 goto out;
943 read++;
944 cnt--;
945 }
946
947 /* only spaces were written */
948 if (isspace(ch)) {
949 *ppos += read;
950 ret = read;
951 goto out;
952 }
953
954 parser->idx = 0;
955 }
956
957 /* read the non-space input */
958 while (cnt && !isspace(ch)) {
3c235a33 959 if (parser->idx < parser->size - 1)
b63f39ea 960 parser->buffer[parser->idx++] = ch;
961 else {
962 ret = -EINVAL;
963 goto out;
964 }
965 ret = get_user(ch, ubuf++);
966 if (ret)
967 goto out;
968 read++;
969 cnt--;
970 }
971
972 /* We either got finished input or we have to wait for another call. */
973 if (isspace(ch)) {
974 parser->buffer[parser->idx] = 0;
975 parser->cont = false;
057db848 976 } else if (parser->idx < parser->size - 1) {
b63f39ea 977 parser->cont = true;
978 parser->buffer[parser->idx++] = ch;
057db848
SR
979 } else {
980 ret = -EINVAL;
981 goto out;
b63f39ea 982 }
983
984 *ppos += read;
985 ret = read;
986
987out:
988 return ret;
989}
990
3a161d99 991/* TODO add a seq_buf_to_buffer() */
b8b94265 992static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
3c56819b
EGM
993{
994 int len;
3c56819b 995
5ac48378 996 if (trace_seq_used(s) <= s->seq.readpos)
3c56819b
EGM
997 return -EBUSY;
998
5ac48378 999 len = trace_seq_used(s) - s->seq.readpos;
3c56819b
EGM
1000 if (cnt > len)
1001 cnt = len;
3a161d99 1002 memcpy(buf, s->buffer + s->seq.readpos, cnt);
3c56819b 1003
3a161d99 1004 s->seq.readpos += cnt;
3c56819b
EGM
1005 return cnt;
1006}
1007
0e950173
TB
1008unsigned long __read_mostly tracing_thresh;
1009
5d4a9dba 1010#ifdef CONFIG_TRACER_MAX_TRACE
5d4a9dba
SR
1011/*
1012 * Copy the new maximum trace into the separate maximum-trace
1013 * structure. (this way the maximum trace is permanently saved,
1014 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1015 */
1016static void
1017__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1018{
12883efb
SRRH
1019 struct trace_buffer *trace_buf = &tr->trace_buffer;
1020 struct trace_buffer *max_buf = &tr->max_buffer;
1021 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1022 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
5d4a9dba 1023
12883efb
SRRH
1024 max_buf->cpu = cpu;
1025 max_buf->time_start = data->preempt_timestamp;
5d4a9dba 1026
6d9b3fa5 1027 max_data->saved_latency = tr->max_latency;
8248ac05
SR
1028 max_data->critical_start = data->critical_start;
1029 max_data->critical_end = data->critical_end;
5d4a9dba 1030
1acaa1b2 1031 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
8248ac05 1032 max_data->pid = tsk->pid;
f17a5194
SRRH
1033 /*
1034 * If tsk == current, then use current_uid(), as that does not use
1035 * RCU. The irq tracer can be called out of RCU scope.
1036 */
1037 if (tsk == current)
1038 max_data->uid = current_uid();
1039 else
1040 max_data->uid = task_uid(tsk);
1041
8248ac05
SR
1042 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1043 max_data->policy = tsk->policy;
1044 max_data->rt_priority = tsk->rt_priority;
5d4a9dba
SR
1045
1046 /* record this tasks comm */
1047 tracing_record_cmdline(tsk);
1048}
1049
4fcdae83
SR
1050/**
1051 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1052 * @tr: tracer
1053 * @tsk: the task with the latency
1054 * @cpu: The cpu that initiated the trace.
1055 *
1056 * Flip the buffers between the @tr and the max_tr and record information
1057 * about which task was the cause of this latency.
1058 */
e309b41d 1059void
bc0c38d1
SR
1060update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1061{
2721e72d 1062 struct ring_buffer *buf;
bc0c38d1 1063
2b6080f2 1064 if (tr->stop_count)
b8de7bd1
SR
1065 return;
1066
4c11d7ae 1067 WARN_ON_ONCE(!irqs_disabled());
34600f0e 1068
45ad21ca 1069 if (!tr->allocated_snapshot) {
debdd57f 1070 /* Only the nop tracer should hit this when disabling */
2b6080f2 1071 WARN_ON_ONCE(tr->current_trace != &nop_trace);
34600f0e 1072 return;
debdd57f 1073 }
34600f0e 1074
0b9b12c1 1075 arch_spin_lock(&tr->max_lock);
3928a8a2 1076
12883efb
SRRH
1077 buf = tr->trace_buffer.buffer;
1078 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1079 tr->max_buffer.buffer = buf;
3928a8a2 1080
bc0c38d1 1081 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1082 arch_spin_unlock(&tr->max_lock);
bc0c38d1
SR
1083}
1084
1085/**
1086 * update_max_tr_single - only copy one trace over, and reset the rest
1087 * @tr - tracer
1088 * @tsk - task with the latency
1089 * @cpu - the cpu of the buffer to copy.
4fcdae83
SR
1090 *
1091 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
bc0c38d1 1092 */
e309b41d 1093void
bc0c38d1
SR
1094update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1095{
3928a8a2 1096 int ret;
bc0c38d1 1097
2b6080f2 1098 if (tr->stop_count)
b8de7bd1
SR
1099 return;
1100
4c11d7ae 1101 WARN_ON_ONCE(!irqs_disabled());
6c24499f 1102 if (!tr->allocated_snapshot) {
2930e04d 1103 /* Only the nop tracer should hit this when disabling */
9e8529af 1104 WARN_ON_ONCE(tr->current_trace != &nop_trace);
ef710e10 1105 return;
2930e04d 1106 }
ef710e10 1107
0b9b12c1 1108 arch_spin_lock(&tr->max_lock);
bc0c38d1 1109
12883efb 1110 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
3928a8a2 1111
e8165dbb
SR
1112 if (ret == -EBUSY) {
1113 /*
1114 * We failed to swap the buffer due to a commit taking
1115 * place on this CPU. We fail to record, but we reset
1116 * the max trace buffer (no one writes directly to it)
1117 * and flag that it failed.
1118 */
12883efb 1119 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
e8165dbb
SR
1120 "Failed to swap buffers due to commit in progress\n");
1121 }
1122
e8165dbb 1123 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
bc0c38d1
SR
1124
1125 __update_max_tr(tr, tsk, cpu);
0b9b12c1 1126 arch_spin_unlock(&tr->max_lock);
bc0c38d1 1127}
5d4a9dba 1128#endif /* CONFIG_TRACER_MAX_TRACE */
bc0c38d1 1129
e30f53aa 1130static int wait_on_pipe(struct trace_iterator *iter, bool full)
0d5c6e1c 1131{
15693458
SRRH
1132 /* Iterators are static, they should be filled or empty */
1133 if (trace_buffer_iter(iter, iter->cpu_file))
8b8b3683 1134 return 0;
0d5c6e1c 1135
e30f53aa
RV
1136 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1137 full);
0d5c6e1c
SR
1138}
1139
f4e781c0
SRRH
1140#ifdef CONFIG_FTRACE_STARTUP_TEST
1141static int run_tracer_selftest(struct tracer *type)
1142{
1143 struct trace_array *tr = &global_trace;
1144 struct tracer *saved_tracer = tr->current_trace;
1145 int ret;
0d5c6e1c 1146
f4e781c0
SRRH
1147 if (!type->selftest || tracing_selftest_disabled)
1148 return 0;
0d5c6e1c
SR
1149
1150 /*
f4e781c0
SRRH
1151 * Run a selftest on this tracer.
1152 * Here we reset the trace buffer, and set the current
1153 * tracer to be this tracer. The tracer can then run some
1154 * internal tracing to verify that everything is in order.
1155 * If we fail, we do not register this tracer.
0d5c6e1c 1156 */
f4e781c0 1157 tracing_reset_online_cpus(&tr->trace_buffer);
0d5c6e1c 1158
f4e781c0
SRRH
1159 tr->current_trace = type;
1160
1161#ifdef CONFIG_TRACER_MAX_TRACE
1162 if (type->use_max_tr) {
1163 /* If we expanded the buffers, make sure the max is expanded too */
1164 if (ring_buffer_expanded)
1165 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1166 RING_BUFFER_ALL_CPUS);
1167 tr->allocated_snapshot = true;
1168 }
1169#endif
1170
1171 /* the test is responsible for initializing and enabling */
1172 pr_info("Testing tracer %s: ", type->name);
1173 ret = type->selftest(type, tr);
1174 /* the test is responsible for resetting too */
1175 tr->current_trace = saved_tracer;
1176 if (ret) {
1177 printk(KERN_CONT "FAILED!\n");
1178 /* Add the warning after printing 'FAILED' */
1179 WARN_ON(1);
1180 return -1;
1181 }
1182 /* Only reset on passing, to avoid touching corrupted buffers */
1183 tracing_reset_online_cpus(&tr->trace_buffer);
1184
1185#ifdef CONFIG_TRACER_MAX_TRACE
1186 if (type->use_max_tr) {
1187 tr->allocated_snapshot = false;
0d5c6e1c 1188
f4e781c0
SRRH
1189 /* Shrink the max buffer again */
1190 if (ring_buffer_expanded)
1191 ring_buffer_resize(tr->max_buffer.buffer, 1,
1192 RING_BUFFER_ALL_CPUS);
1193 }
1194#endif
1195
1196 printk(KERN_CONT "PASSED\n");
1197 return 0;
1198}
1199#else
1200static inline int run_tracer_selftest(struct tracer *type)
1201{
1202 return 0;
0d5c6e1c 1203}
f4e781c0 1204#endif /* CONFIG_FTRACE_STARTUP_TEST */
0d5c6e1c 1205
4fcdae83
SR
1206/**
1207 * register_tracer - register a tracer with the ftrace system.
1208 * @type - the plugin for the tracer
1209 *
1210 * Register a new plugin tracer.
1211 */
bc0c38d1
SR
1212int register_tracer(struct tracer *type)
1213{
1214 struct tracer *t;
bc0c38d1
SR
1215 int ret = 0;
1216
1217 if (!type->name) {
1218 pr_info("Tracer must have a name\n");
1219 return -1;
1220 }
1221
24a461d5 1222 if (strlen(type->name) >= MAX_TRACER_SIZE) {
ee6c2c1b
LZ
1223 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1224 return -1;
1225 }
1226
bc0c38d1 1227 mutex_lock(&trace_types_lock);
86fa2f60 1228
8e1b82e0
FW
1229 tracing_selftest_running = true;
1230
bc0c38d1
SR
1231 for (t = trace_types; t; t = t->next) {
1232 if (strcmp(type->name, t->name) == 0) {
1233 /* already found */
ee6c2c1b 1234 pr_info("Tracer %s already registered\n",
bc0c38d1
SR
1235 type->name);
1236 ret = -1;
1237 goto out;
1238 }
1239 }
1240
adf9f195
FW
1241 if (!type->set_flag)
1242 type->set_flag = &dummy_set_flag;
1243 if (!type->flags)
1244 type->flags = &dummy_tracer_flags;
1245 else
1246 if (!type->flags->opts)
1247 type->flags->opts = dummy_tracer_opt;
6eaaa5d5 1248
f4e781c0
SRRH
1249 ret = run_tracer_selftest(type);
1250 if (ret < 0)
1251 goto out;
60a11774 1252
bc0c38d1
SR
1253 type->next = trace_types;
1254 trace_types = type;
60a11774 1255
bc0c38d1 1256 out:
8e1b82e0 1257 tracing_selftest_running = false;
bc0c38d1
SR
1258 mutex_unlock(&trace_types_lock);
1259
dac74940
SR
1260 if (ret || !default_bootup_tracer)
1261 goto out_unlock;
1262
ee6c2c1b 1263 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
dac74940
SR
1264 goto out_unlock;
1265
1266 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1267 /* Do we want this tracer to start on bootup? */
607e2ea1 1268 tracing_set_tracer(&global_trace, type->name);
dac74940
SR
1269 default_bootup_tracer = NULL;
1270 /* disable other selftests, since this will break it. */
55034cd6 1271 tracing_selftest_disabled = true;
b2821ae6 1272#ifdef CONFIG_FTRACE_STARTUP_TEST
dac74940
SR
1273 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1274 type->name);
b2821ae6 1275#endif
b2821ae6 1276
dac74940 1277 out_unlock:
bc0c38d1
SR
1278 return ret;
1279}
1280
12883efb 1281void tracing_reset(struct trace_buffer *buf, int cpu)
f633903a 1282{
12883efb 1283 struct ring_buffer *buffer = buf->buffer;
f633903a 1284
a5416411
HT
1285 if (!buffer)
1286 return;
1287
f633903a
SR
1288 ring_buffer_record_disable(buffer);
1289
1290 /* Make sure all commits have finished */
1291 synchronize_sched();
68179686 1292 ring_buffer_reset_cpu(buffer, cpu);
f633903a
SR
1293
1294 ring_buffer_record_enable(buffer);
1295}
1296
12883efb 1297void tracing_reset_online_cpus(struct trace_buffer *buf)
213cc060 1298{
12883efb 1299 struct ring_buffer *buffer = buf->buffer;
213cc060
PE
1300 int cpu;
1301
a5416411
HT
1302 if (!buffer)
1303 return;
1304
621968cd
SR
1305 ring_buffer_record_disable(buffer);
1306
1307 /* Make sure all commits have finished */
1308 synchronize_sched();
1309
9457158b 1310 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
213cc060
PE
1311
1312 for_each_online_cpu(cpu)
68179686 1313 ring_buffer_reset_cpu(buffer, cpu);
621968cd
SR
1314
1315 ring_buffer_record_enable(buffer);
213cc060
PE
1316}
1317
09d8091c 1318/* Must have trace_types_lock held */
873c642f 1319void tracing_reset_all_online_cpus(void)
9456f0fa 1320{
873c642f
SRRH
1321 struct trace_array *tr;
1322
873c642f 1323 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
12883efb
SRRH
1324 tracing_reset_online_cpus(&tr->trace_buffer);
1325#ifdef CONFIG_TRACER_MAX_TRACE
1326 tracing_reset_online_cpus(&tr->max_buffer);
1327#endif
873c642f 1328 }
9456f0fa
SR
1329}
1330
939c7a4f 1331#define SAVED_CMDLINES_DEFAULT 128
2c7eea4c 1332#define NO_CMDLINE_MAP UINT_MAX
edc35bd7 1333static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
939c7a4f
YY
1334struct saved_cmdlines_buffer {
1335 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1336 unsigned *map_cmdline_to_pid;
1337 unsigned cmdline_num;
1338 int cmdline_idx;
1339 char *saved_cmdlines;
1340};
1341static struct saved_cmdlines_buffer *savedcmd;
25b0b44a 1342
25b0b44a 1343/* temporary disable recording */
4fd27358 1344static atomic_t trace_record_cmdline_disabled __read_mostly;
bc0c38d1 1345
939c7a4f
YY
1346static inline char *get_saved_cmdlines(int idx)
1347{
1348 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1349}
1350
1351static inline void set_cmdline(int idx, const char *cmdline)
bc0c38d1 1352{
939c7a4f
YY
1353 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1354}
1355
1356static int allocate_cmdlines_buffer(unsigned int val,
1357 struct saved_cmdlines_buffer *s)
1358{
1359 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1360 GFP_KERNEL);
1361 if (!s->map_cmdline_to_pid)
1362 return -ENOMEM;
1363
1364 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1365 if (!s->saved_cmdlines) {
1366 kfree(s->map_cmdline_to_pid);
1367 return -ENOMEM;
1368 }
1369
1370 s->cmdline_idx = 0;
1371 s->cmdline_num = val;
1372 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1373 sizeof(s->map_pid_to_cmdline));
1374 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1375 val * sizeof(*s->map_cmdline_to_pid));
1376
1377 return 0;
1378}
1379
1380static int trace_create_savedcmd(void)
1381{
1382 int ret;
1383
a6af8fbf 1384 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
939c7a4f
YY
1385 if (!savedcmd)
1386 return -ENOMEM;
1387
1388 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1389 if (ret < 0) {
1390 kfree(savedcmd);
1391 savedcmd = NULL;
1392 return -ENOMEM;
1393 }
1394
1395 return 0;
bc0c38d1
SR
1396}
1397
b5130b1e
CE
1398int is_tracing_stopped(void)
1399{
2b6080f2 1400 return global_trace.stop_count;
b5130b1e
CE
1401}
1402
0f048701
SR
1403/**
1404 * tracing_start - quick start of the tracer
1405 *
1406 * If tracing is enabled but was stopped by tracing_stop,
1407 * this will start the tracer back up.
1408 */
1409void tracing_start(void)
1410{
1411 struct ring_buffer *buffer;
1412 unsigned long flags;
1413
1414 if (tracing_disabled)
1415 return;
1416
2b6080f2
SR
1417 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1418 if (--global_trace.stop_count) {
1419 if (global_trace.stop_count < 0) {
b06a8301
SR
1420 /* Someone screwed up their debugging */
1421 WARN_ON_ONCE(1);
2b6080f2 1422 global_trace.stop_count = 0;
b06a8301 1423 }
0f048701
SR
1424 goto out;
1425 }
1426
a2f80714 1427 /* Prevent the buffers from switching */
0b9b12c1 1428 arch_spin_lock(&global_trace.max_lock);
0f048701 1429
12883efb 1430 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1431 if (buffer)
1432 ring_buffer_record_enable(buffer);
1433
12883efb
SRRH
1434#ifdef CONFIG_TRACER_MAX_TRACE
1435 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1436 if (buffer)
1437 ring_buffer_record_enable(buffer);
12883efb 1438#endif
0f048701 1439
0b9b12c1 1440 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1441
0f048701 1442 out:
2b6080f2
SR
1443 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1444}
1445
1446static void tracing_start_tr(struct trace_array *tr)
1447{
1448 struct ring_buffer *buffer;
1449 unsigned long flags;
1450
1451 if (tracing_disabled)
1452 return;
1453
1454 /* If global, we need to also start the max tracer */
1455 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1456 return tracing_start();
1457
1458 raw_spin_lock_irqsave(&tr->start_lock, flags);
1459
1460 if (--tr->stop_count) {
1461 if (tr->stop_count < 0) {
1462 /* Someone screwed up their debugging */
1463 WARN_ON_ONCE(1);
1464 tr->stop_count = 0;
1465 }
1466 goto out;
1467 }
1468
12883efb 1469 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1470 if (buffer)
1471 ring_buffer_record_enable(buffer);
1472
1473 out:
1474 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1475}
1476
1477/**
1478 * tracing_stop - quick stop of the tracer
1479 *
1480 * Light weight way to stop tracing. Use in conjunction with
1481 * tracing_start.
1482 */
1483void tracing_stop(void)
1484{
1485 struct ring_buffer *buffer;
1486 unsigned long flags;
1487
2b6080f2
SR
1488 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1489 if (global_trace.stop_count++)
0f048701
SR
1490 goto out;
1491
a2f80714 1492 /* Prevent the buffers from switching */
0b9b12c1 1493 arch_spin_lock(&global_trace.max_lock);
a2f80714 1494
12883efb 1495 buffer = global_trace.trace_buffer.buffer;
0f048701
SR
1496 if (buffer)
1497 ring_buffer_record_disable(buffer);
1498
12883efb
SRRH
1499#ifdef CONFIG_TRACER_MAX_TRACE
1500 buffer = global_trace.max_buffer.buffer;
0f048701
SR
1501 if (buffer)
1502 ring_buffer_record_disable(buffer);
12883efb 1503#endif
0f048701 1504
0b9b12c1 1505 arch_spin_unlock(&global_trace.max_lock);
a2f80714 1506
0f048701 1507 out:
2b6080f2
SR
1508 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1509}
1510
1511static void tracing_stop_tr(struct trace_array *tr)
1512{
1513 struct ring_buffer *buffer;
1514 unsigned long flags;
1515
1516 /* If global, we need to also stop the max tracer */
1517 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1518 return tracing_stop();
1519
1520 raw_spin_lock_irqsave(&tr->start_lock, flags);
1521 if (tr->stop_count++)
1522 goto out;
1523
12883efb 1524 buffer = tr->trace_buffer.buffer;
2b6080f2
SR
1525 if (buffer)
1526 ring_buffer_record_disable(buffer);
1527
1528 out:
1529 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
0f048701
SR
1530}
1531
e309b41d 1532void trace_stop_cmdline_recording(void);
bc0c38d1 1533
379cfdac 1534static int trace_save_cmdline(struct task_struct *tsk)
bc0c38d1 1535{
a635cf04 1536 unsigned pid, idx;
bc0c38d1
SR
1537
1538 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
379cfdac 1539 return 0;
bc0c38d1
SR
1540
1541 /*
1542 * It's not the end of the world if we don't get
1543 * the lock, but we also don't want to spin
1544 * nor do we want to disable interrupts,
1545 * so if we miss here, then better luck next time.
1546 */
0199c4e6 1547 if (!arch_spin_trylock(&trace_cmdline_lock))
379cfdac 1548 return 0;
bc0c38d1 1549
939c7a4f 1550 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
2c7eea4c 1551 if (idx == NO_CMDLINE_MAP) {
939c7a4f 1552 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
bc0c38d1 1553
a635cf04
CE
1554 /*
1555 * Check whether the cmdline buffer at idx has a pid
1556 * mapped. We are going to overwrite that entry so we
1557 * need to clear the map_pid_to_cmdline. Otherwise we
1558 * would read the new comm for the old pid.
1559 */
939c7a4f 1560 pid = savedcmd->map_cmdline_to_pid[idx];
a635cf04 1561 if (pid != NO_CMDLINE_MAP)
939c7a4f 1562 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
bc0c38d1 1563
939c7a4f
YY
1564 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1565 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
bc0c38d1 1566
939c7a4f 1567 savedcmd->cmdline_idx = idx;
bc0c38d1
SR
1568 }
1569
939c7a4f 1570 set_cmdline(idx, tsk->comm);
bc0c38d1 1571
0199c4e6 1572 arch_spin_unlock(&trace_cmdline_lock);
379cfdac
SRRH
1573
1574 return 1;
bc0c38d1
SR
1575}
1576
4c27e756 1577static void __trace_find_cmdline(int pid, char comm[])
bc0c38d1 1578{
bc0c38d1
SR
1579 unsigned map;
1580
4ca53085
SR
1581 if (!pid) {
1582 strcpy(comm, "<idle>");
1583 return;
1584 }
bc0c38d1 1585
74bf4076
SR
1586 if (WARN_ON_ONCE(pid < 0)) {
1587 strcpy(comm, "<XXX>");
1588 return;
1589 }
1590
4ca53085
SR
1591 if (pid > PID_MAX_DEFAULT) {
1592 strcpy(comm, "<...>");
1593 return;
1594 }
bc0c38d1 1595
939c7a4f 1596 map = savedcmd->map_pid_to_cmdline[pid];
50d88758 1597 if (map != NO_CMDLINE_MAP)
939c7a4f 1598 strcpy(comm, get_saved_cmdlines(map));
50d88758
TG
1599 else
1600 strcpy(comm, "<...>");
4c27e756
SRRH
1601}
1602
1603void trace_find_cmdline(int pid, char comm[])
1604{
1605 preempt_disable();
1606 arch_spin_lock(&trace_cmdline_lock);
1607
1608 __trace_find_cmdline(pid, comm);
bc0c38d1 1609
0199c4e6 1610 arch_spin_unlock(&trace_cmdline_lock);
5b6045a9 1611 preempt_enable();
bc0c38d1
SR
1612}
1613
e309b41d 1614void tracing_record_cmdline(struct task_struct *tsk)
bc0c38d1 1615{
0fb9656d 1616 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
bc0c38d1
SR
1617 return;
1618
7ffbd48d
SR
1619 if (!__this_cpu_read(trace_cmdline_save))
1620 return;
1621
379cfdac
SRRH
1622 if (trace_save_cmdline(tsk))
1623 __this_cpu_write(trace_cmdline_save, false);
bc0c38d1
SR
1624}
1625
45dcd8b8 1626void
38697053
SR
1627tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1628 int pc)
bc0c38d1
SR
1629{
1630 struct task_struct *tsk = current;
bc0c38d1 1631
777e208d
SR
1632 entry->preempt_count = pc & 0xff;
1633 entry->pid = (tsk) ? tsk->pid : 0;
1634 entry->flags =
9244489a 1635#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2e2ca155 1636 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
9244489a
SR
1637#else
1638 TRACE_FLAG_IRQS_NOSUPPORT |
1639#endif
bc0c38d1
SR
1640 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1641 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
e5137b50
PZ
1642 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1643 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
bc0c38d1 1644}
f413cdb8 1645EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
bc0c38d1 1646
e77405ad
SR
1647struct ring_buffer_event *
1648trace_buffer_lock_reserve(struct ring_buffer *buffer,
1649 int type,
1650 unsigned long len,
1651 unsigned long flags, int pc)
51a763dd
ACM
1652{
1653 struct ring_buffer_event *event;
1654
e77405ad 1655 event = ring_buffer_lock_reserve(buffer, len);
51a763dd
ACM
1656 if (event != NULL) {
1657 struct trace_entry *ent = ring_buffer_event_data(event);
1658
1659 tracing_generic_entry_update(ent, flags, pc);
1660 ent->type = type;
1661 }
1662
1663 return event;
1664}
51a763dd 1665
7ffbd48d
SR
1666void
1667__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1668{
1669 __this_cpu_write(trace_cmdline_save, true);
1670 ring_buffer_unlock_commit(buffer, event);
1671}
1672
e77405ad
SR
1673static inline void
1674__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1675 struct ring_buffer_event *event,
0d5c6e1c 1676 unsigned long flags, int pc)
51a763dd 1677{
7ffbd48d 1678 __buffer_unlock_commit(buffer, event);
51a763dd 1679
e77405ad
SR
1680 ftrace_trace_stack(buffer, flags, 6, pc);
1681 ftrace_trace_userstack(buffer, flags, pc);
07edf712
FW
1682}
1683
e77405ad
SR
1684void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1685 struct ring_buffer_event *event,
1686 unsigned long flags, int pc)
07edf712 1687{
0d5c6e1c 1688 __trace_buffer_unlock_commit(buffer, event, flags, pc);
51a763dd 1689}
0d5c6e1c 1690EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
51a763dd 1691
2c4a33ab
SRRH
1692static struct ring_buffer *temp_buffer;
1693
ccb469a1
SR
1694struct ring_buffer_event *
1695trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1696 struct ftrace_event_file *ftrace_file,
1697 int type, unsigned long len,
1698 unsigned long flags, int pc)
1699{
2c4a33ab
SRRH
1700 struct ring_buffer_event *entry;
1701
12883efb 1702 *current_rb = ftrace_file->tr->trace_buffer.buffer;
2c4a33ab 1703 entry = trace_buffer_lock_reserve(*current_rb,
ccb469a1 1704 type, len, flags, pc);
2c4a33ab
SRRH
1705 /*
1706 * If tracing is off, but we have triggers enabled
1707 * we still need to look at the event data. Use the temp_buffer
1708 * to store the trace event for the tigger to use. It's recusive
1709 * safe and will not be recorded anywhere.
1710 */
1711 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1712 *current_rb = temp_buffer;
1713 entry = trace_buffer_lock_reserve(*current_rb,
1714 type, len, flags, pc);
1715 }
1716 return entry;
ccb469a1
SR
1717}
1718EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1719
ef5580d0 1720struct ring_buffer_event *
e77405ad
SR
1721trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1722 int type, unsigned long len,
ef5580d0
SR
1723 unsigned long flags, int pc)
1724{
12883efb 1725 *current_rb = global_trace.trace_buffer.buffer;
e77405ad 1726 return trace_buffer_lock_reserve(*current_rb,
ef5580d0
SR
1727 type, len, flags, pc);
1728}
94487d6d 1729EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
ef5580d0 1730
e77405ad
SR
1731void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1732 struct ring_buffer_event *event,
ef5580d0
SR
1733 unsigned long flags, int pc)
1734{
0d5c6e1c 1735 __trace_buffer_unlock_commit(buffer, event, flags, pc);
07edf712 1736}
94487d6d 1737EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
07edf712 1738
0d5c6e1c
SR
1739void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1740 struct ring_buffer_event *event,
1741 unsigned long flags, int pc,
1742 struct pt_regs *regs)
1fd8df2c 1743{
7ffbd48d 1744 __buffer_unlock_commit(buffer, event);
1fd8df2c
MH
1745
1746 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1747 ftrace_trace_userstack(buffer, flags, pc);
1748}
0d5c6e1c 1749EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1fd8df2c 1750
e77405ad
SR
1751void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1752 struct ring_buffer_event *event)
77d9f465 1753{
e77405ad 1754 ring_buffer_discard_commit(buffer, event);
ef5580d0 1755}
12acd473 1756EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
ef5580d0 1757
e309b41d 1758void
7be42151 1759trace_function(struct trace_array *tr,
38697053
SR
1760 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1761 int pc)
bc0c38d1 1762{
e1112b4d 1763 struct ftrace_event_call *call = &event_function;
12883efb 1764 struct ring_buffer *buffer = tr->trace_buffer.buffer;
3928a8a2 1765 struct ring_buffer_event *event;
777e208d 1766 struct ftrace_entry *entry;
bc0c38d1 1767
d769041f 1768 /* If we are reading the ring buffer, don't trace */
dd17c8f7 1769 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
d769041f
SR
1770 return;
1771
e77405ad 1772 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
51a763dd 1773 flags, pc);
3928a8a2
SR
1774 if (!event)
1775 return;
1776 entry = ring_buffer_event_data(event);
777e208d
SR
1777 entry->ip = ip;
1778 entry->parent_ip = parent_ip;
e1112b4d 1779
f306cc82 1780 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1781 __buffer_unlock_commit(buffer, event);
bc0c38d1
SR
1782}
1783
c0a0d0d3 1784#ifdef CONFIG_STACKTRACE
4a9bd3f1
SR
1785
1786#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1787struct ftrace_stack {
1788 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1789};
1790
1791static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1792static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1793
e77405ad 1794static void __ftrace_trace_stack(struct ring_buffer *buffer,
53614991 1795 unsigned long flags,
1fd8df2c 1796 int skip, int pc, struct pt_regs *regs)
86387f7e 1797{
e1112b4d 1798 struct ftrace_event_call *call = &event_kernel_stack;
3928a8a2 1799 struct ring_buffer_event *event;
777e208d 1800 struct stack_entry *entry;
86387f7e 1801 struct stack_trace trace;
4a9bd3f1
SR
1802 int use_stack;
1803 int size = FTRACE_STACK_ENTRIES;
1804
1805 trace.nr_entries = 0;
1806 trace.skip = skip;
1807
1808 /*
1809 * Since events can happen in NMIs there's no safe way to
1810 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1811 * or NMI comes in, it will just have to use the default
1812 * FTRACE_STACK_SIZE.
1813 */
1814 preempt_disable_notrace();
1815
82146529 1816 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
4a9bd3f1
SR
1817 /*
1818 * We don't need any atomic variables, just a barrier.
1819 * If an interrupt comes in, we don't care, because it would
1820 * have exited and put the counter back to what we want.
1821 * We just need a barrier to keep gcc from moving things
1822 * around.
1823 */
1824 barrier();
1825 if (use_stack == 1) {
bdffd893 1826 trace.entries = this_cpu_ptr(ftrace_stack.calls);
4a9bd3f1
SR
1827 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1828
1829 if (regs)
1830 save_stack_trace_regs(regs, &trace);
1831 else
1832 save_stack_trace(&trace);
1833
1834 if (trace.nr_entries > size)
1835 size = trace.nr_entries;
1836 } else
1837 /* From now on, use_stack is a boolean */
1838 use_stack = 0;
1839
1840 size *= sizeof(unsigned long);
86387f7e 1841
e77405ad 1842 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
4a9bd3f1 1843 sizeof(*entry) + size, flags, pc);
3928a8a2 1844 if (!event)
4a9bd3f1
SR
1845 goto out;
1846 entry = ring_buffer_event_data(event);
86387f7e 1847
4a9bd3f1
SR
1848 memset(&entry->caller, 0, size);
1849
1850 if (use_stack)
1851 memcpy(&entry->caller, trace.entries,
1852 trace.nr_entries * sizeof(unsigned long));
1853 else {
1854 trace.max_entries = FTRACE_STACK_ENTRIES;
1855 trace.entries = entry->caller;
1856 if (regs)
1857 save_stack_trace_regs(regs, &trace);
1858 else
1859 save_stack_trace(&trace);
1860 }
1861
1862 entry->size = trace.nr_entries;
86387f7e 1863
f306cc82 1864 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1865 __buffer_unlock_commit(buffer, event);
4a9bd3f1
SR
1866
1867 out:
1868 /* Again, don't let gcc optimize things here */
1869 barrier();
82146529 1870 __this_cpu_dec(ftrace_stack_reserve);
4a9bd3f1
SR
1871 preempt_enable_notrace();
1872
f0a920d5
IM
1873}
1874
1fd8df2c
MH
1875void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1876 int skip, int pc, struct pt_regs *regs)
1877{
1878 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1879 return;
1880
1881 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1882}
1883
e77405ad
SR
1884void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1885 int skip, int pc)
53614991
SR
1886{
1887 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1888 return;
1889
1fd8df2c 1890 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
53614991
SR
1891}
1892
c0a0d0d3
FW
1893void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1894 int pc)
38697053 1895{
12883efb 1896 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
38697053
SR
1897}
1898
03889384
SR
1899/**
1900 * trace_dump_stack - record a stack back trace in the trace buffer
c142be8e 1901 * @skip: Number of functions to skip (helper handlers)
03889384 1902 */
c142be8e 1903void trace_dump_stack(int skip)
03889384
SR
1904{
1905 unsigned long flags;
1906
1907 if (tracing_disabled || tracing_selftest_running)
e36c5458 1908 return;
03889384
SR
1909
1910 local_save_flags(flags);
1911
c142be8e
SRRH
1912 /*
1913 * Skip 3 more, seems to get us at the caller of
1914 * this function.
1915 */
1916 skip += 3;
1917 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1918 flags, skip, preempt_count(), NULL);
03889384
SR
1919}
1920
91e86e56
SR
1921static DEFINE_PER_CPU(int, user_stack_count);
1922
e77405ad
SR
1923void
1924ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
02b67518 1925{
e1112b4d 1926 struct ftrace_event_call *call = &event_user_stack;
8d7c6a96 1927 struct ring_buffer_event *event;
02b67518
TE
1928 struct userstack_entry *entry;
1929 struct stack_trace trace;
02b67518
TE
1930
1931 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1932 return;
1933
b6345879
SR
1934 /*
1935 * NMIs can not handle page faults, even with fix ups.
1936 * The save user stack can (and often does) fault.
1937 */
1938 if (unlikely(in_nmi()))
1939 return;
02b67518 1940
91e86e56
SR
1941 /*
1942 * prevent recursion, since the user stack tracing may
1943 * trigger other kernel events.
1944 */
1945 preempt_disable();
1946 if (__this_cpu_read(user_stack_count))
1947 goto out;
1948
1949 __this_cpu_inc(user_stack_count);
1950
e77405ad 1951 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
51a763dd 1952 sizeof(*entry), flags, pc);
02b67518 1953 if (!event)
1dbd1951 1954 goto out_drop_count;
02b67518 1955 entry = ring_buffer_event_data(event);
02b67518 1956
48659d31 1957 entry->tgid = current->tgid;
02b67518
TE
1958 memset(&entry->caller, 0, sizeof(entry->caller));
1959
1960 trace.nr_entries = 0;
1961 trace.max_entries = FTRACE_STACK_ENTRIES;
1962 trace.skip = 0;
1963 trace.entries = entry->caller;
1964
1965 save_stack_trace_user(&trace);
f306cc82 1966 if (!call_filter_check_discard(call, entry, buffer, event))
7ffbd48d 1967 __buffer_unlock_commit(buffer, event);
91e86e56 1968
1dbd1951 1969 out_drop_count:
91e86e56 1970 __this_cpu_dec(user_stack_count);
91e86e56
SR
1971 out:
1972 preempt_enable();
02b67518
TE
1973}
1974
4fd27358
HE
1975#ifdef UNUSED
1976static void __trace_userstack(struct trace_array *tr, unsigned long flags)
02b67518 1977{
7be42151 1978 ftrace_trace_userstack(tr, flags, preempt_count());
02b67518 1979}
4fd27358 1980#endif /* UNUSED */
02b67518 1981
c0a0d0d3
FW
1982#endif /* CONFIG_STACKTRACE */
1983
07d777fe
SR
1984/* created for use with alloc_percpu */
1985struct trace_buffer_struct {
1986 char buffer[TRACE_BUF_SIZE];
1987};
1988
1989static struct trace_buffer_struct *trace_percpu_buffer;
1990static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1991static struct trace_buffer_struct *trace_percpu_irq_buffer;
1992static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1993
1994/*
1995 * The buffer used is dependent on the context. There is a per cpu
1996 * buffer for normal context, softirq contex, hard irq context and
1997 * for NMI context. Thise allows for lockless recording.
1998 *
1999 * Note, if the buffers failed to be allocated, then this returns NULL
2000 */
2001static char *get_trace_buf(void)
2002{
2003 struct trace_buffer_struct *percpu_buffer;
07d777fe
SR
2004
2005 /*
2006 * If we have allocated per cpu buffers, then we do not
2007 * need to do any locking.
2008 */
2009 if (in_nmi())
2010 percpu_buffer = trace_percpu_nmi_buffer;
2011 else if (in_irq())
2012 percpu_buffer = trace_percpu_irq_buffer;
2013 else if (in_softirq())
2014 percpu_buffer = trace_percpu_sirq_buffer;
2015 else
2016 percpu_buffer = trace_percpu_buffer;
2017
2018 if (!percpu_buffer)
2019 return NULL;
2020
d8a0349c 2021 return this_cpu_ptr(&percpu_buffer->buffer[0]);
07d777fe
SR
2022}
2023
2024static int alloc_percpu_trace_buffer(void)
2025{
2026 struct trace_buffer_struct *buffers;
2027 struct trace_buffer_struct *sirq_buffers;
2028 struct trace_buffer_struct *irq_buffers;
2029 struct trace_buffer_struct *nmi_buffers;
2030
2031 buffers = alloc_percpu(struct trace_buffer_struct);
2032 if (!buffers)
2033 goto err_warn;
2034
2035 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2036 if (!sirq_buffers)
2037 goto err_sirq;
2038
2039 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2040 if (!irq_buffers)
2041 goto err_irq;
2042
2043 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2044 if (!nmi_buffers)
2045 goto err_nmi;
2046
2047 trace_percpu_buffer = buffers;
2048 trace_percpu_sirq_buffer = sirq_buffers;
2049 trace_percpu_irq_buffer = irq_buffers;
2050 trace_percpu_nmi_buffer = nmi_buffers;
2051
2052 return 0;
2053
2054 err_nmi:
2055 free_percpu(irq_buffers);
2056 err_irq:
2057 free_percpu(sirq_buffers);
2058 err_sirq:
2059 free_percpu(buffers);
2060 err_warn:
2061 WARN(1, "Could not allocate percpu trace_printk buffer");
2062 return -ENOMEM;
2063}
2064
81698831
SR
2065static int buffers_allocated;
2066
07d777fe
SR
2067void trace_printk_init_buffers(void)
2068{
07d777fe
SR
2069 if (buffers_allocated)
2070 return;
2071
2072 if (alloc_percpu_trace_buffer())
2073 return;
2074
2184db46
SR
2075 /* trace_printk() is for debug use only. Don't use it in production. */
2076
69a1c994
BP
2077 pr_warning("\n");
2078 pr_warning("**********************************************************\n");
2184db46
SR
2079 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2080 pr_warning("** **\n");
2081 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2082 pr_warning("** **\n");
2083 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
eff264ef 2084 pr_warning("** unsafe for production use. **\n");
2184db46
SR
2085 pr_warning("** **\n");
2086 pr_warning("** If you see this message and you are not debugging **\n");
2087 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2088 pr_warning("** **\n");
2089 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2090 pr_warning("**********************************************************\n");
07d777fe 2091
b382ede6
SR
2092 /* Expand the buffers to set size */
2093 tracing_update_buffers();
2094
07d777fe 2095 buffers_allocated = 1;
81698831
SR
2096
2097 /*
2098 * trace_printk_init_buffers() can be called by modules.
2099 * If that happens, then we need to start cmdline recording
2100 * directly here. If the global_trace.buffer is already
2101 * allocated here, then this was called by module code.
2102 */
12883efb 2103 if (global_trace.trace_buffer.buffer)
81698831
SR
2104 tracing_start_cmdline_record();
2105}
2106
2107void trace_printk_start_comm(void)
2108{
2109 /* Start tracing comms if trace printk is set */
2110 if (!buffers_allocated)
2111 return;
2112 tracing_start_cmdline_record();
2113}
2114
2115static void trace_printk_start_stop_comm(int enabled)
2116{
2117 if (!buffers_allocated)
2118 return;
2119
2120 if (enabled)
2121 tracing_start_cmdline_record();
2122 else
2123 tracing_stop_cmdline_record();
07d777fe
SR
2124}
2125
769b0441 2126/**
48ead020 2127 * trace_vbprintk - write binary msg to tracing buffer
769b0441
FW
2128 *
2129 */
40ce74f1 2130int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
769b0441 2131{
e1112b4d 2132 struct ftrace_event_call *call = &event_bprint;
769b0441 2133 struct ring_buffer_event *event;
e77405ad 2134 struct ring_buffer *buffer;
769b0441 2135 struct trace_array *tr = &global_trace;
48ead020 2136 struct bprint_entry *entry;
769b0441 2137 unsigned long flags;
07d777fe
SR
2138 char *tbuffer;
2139 int len = 0, size, pc;
769b0441
FW
2140
2141 if (unlikely(tracing_selftest_running || tracing_disabled))
2142 return 0;
2143
2144 /* Don't pollute graph traces with trace_vprintk internals */
2145 pause_graph_tracing();
2146
2147 pc = preempt_count();
5168ae50 2148 preempt_disable_notrace();
769b0441 2149
07d777fe
SR
2150 tbuffer = get_trace_buf();
2151 if (!tbuffer) {
2152 len = 0;
769b0441 2153 goto out;
07d777fe 2154 }
769b0441 2155
07d777fe 2156 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
769b0441 2157
07d777fe
SR
2158 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2159 goto out;
769b0441 2160
07d777fe 2161 local_save_flags(flags);
769b0441 2162 size = sizeof(*entry) + sizeof(u32) * len;
12883efb 2163 buffer = tr->trace_buffer.buffer;
e77405ad
SR
2164 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2165 flags, pc);
769b0441 2166 if (!event)
07d777fe 2167 goto out;
769b0441
FW
2168 entry = ring_buffer_event_data(event);
2169 entry->ip = ip;
769b0441
FW
2170 entry->fmt = fmt;
2171
07d777fe 2172 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
f306cc82 2173 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2174 __buffer_unlock_commit(buffer, event);
d931369b
SR
2175 ftrace_trace_stack(buffer, flags, 6, pc);
2176 }
769b0441 2177
769b0441 2178out:
5168ae50 2179 preempt_enable_notrace();
769b0441
FW
2180 unpause_graph_tracing();
2181
2182 return len;
2183}
48ead020
FW
2184EXPORT_SYMBOL_GPL(trace_vbprintk);
2185
12883efb
SRRH
2186static int
2187__trace_array_vprintk(struct ring_buffer *buffer,
2188 unsigned long ip, const char *fmt, va_list args)
48ead020 2189{
e1112b4d 2190 struct ftrace_event_call *call = &event_print;
48ead020 2191 struct ring_buffer_event *event;
07d777fe 2192 int len = 0, size, pc;
48ead020 2193 struct print_entry *entry;
07d777fe
SR
2194 unsigned long flags;
2195 char *tbuffer;
48ead020
FW
2196
2197 if (tracing_disabled || tracing_selftest_running)
2198 return 0;
2199
07d777fe
SR
2200 /* Don't pollute graph traces with trace_vprintk internals */
2201 pause_graph_tracing();
2202
48ead020
FW
2203 pc = preempt_count();
2204 preempt_disable_notrace();
48ead020 2205
07d777fe
SR
2206
2207 tbuffer = get_trace_buf();
2208 if (!tbuffer) {
2209 len = 0;
48ead020 2210 goto out;
07d777fe 2211 }
48ead020 2212
3558a5ac 2213 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
48ead020 2214
07d777fe 2215 local_save_flags(flags);
48ead020 2216 size = sizeof(*entry) + len + 1;
e77405ad 2217 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
07d777fe 2218 flags, pc);
48ead020 2219 if (!event)
07d777fe 2220 goto out;
48ead020 2221 entry = ring_buffer_event_data(event);
c13d2f7c 2222 entry->ip = ip;
48ead020 2223
3558a5ac 2224 memcpy(&entry->buf, tbuffer, len + 1);
f306cc82 2225 if (!call_filter_check_discard(call, entry, buffer, event)) {
7ffbd48d 2226 __buffer_unlock_commit(buffer, event);
07d777fe 2227 ftrace_trace_stack(buffer, flags, 6, pc);
d931369b 2228 }
48ead020
FW
2229 out:
2230 preempt_enable_notrace();
07d777fe 2231 unpause_graph_tracing();
48ead020
FW
2232
2233 return len;
2234}
659372d3 2235
12883efb
SRRH
2236int trace_array_vprintk(struct trace_array *tr,
2237 unsigned long ip, const char *fmt, va_list args)
2238{
2239 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2240}
2241
2242int trace_array_printk(struct trace_array *tr,
2243 unsigned long ip, const char *fmt, ...)
2244{
2245 int ret;
2246 va_list ap;
2247
2248 if (!(trace_flags & TRACE_ITER_PRINTK))
2249 return 0;
2250
2251 va_start(ap, fmt);
2252 ret = trace_array_vprintk(tr, ip, fmt, ap);
2253 va_end(ap);
2254 return ret;
2255}
2256
2257int trace_array_printk_buf(struct ring_buffer *buffer,
2258 unsigned long ip, const char *fmt, ...)
2259{
2260 int ret;
2261 va_list ap;
2262
2263 if (!(trace_flags & TRACE_ITER_PRINTK))
2264 return 0;
2265
2266 va_start(ap, fmt);
2267 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2268 va_end(ap);
2269 return ret;
2270}
2271
659372d3
SR
2272int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2273{
a813a159 2274 return trace_array_vprintk(&global_trace, ip, fmt, args);
659372d3 2275}
769b0441
FW
2276EXPORT_SYMBOL_GPL(trace_vprintk);
2277
e2ac8ef5 2278static void trace_iterator_increment(struct trace_iterator *iter)
5a90f577 2279{
6d158a81
SR
2280 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2281
5a90f577 2282 iter->idx++;
6d158a81
SR
2283 if (buf_iter)
2284 ring_buffer_read(buf_iter, NULL);
5a90f577
SR
2285}
2286
e309b41d 2287static struct trace_entry *
bc21b478
SR
2288peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2289 unsigned long *lost_events)
dd0e545f 2290{
3928a8a2 2291 struct ring_buffer_event *event;
6d158a81 2292 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
dd0e545f 2293
d769041f
SR
2294 if (buf_iter)
2295 event = ring_buffer_iter_peek(buf_iter, ts);
2296 else
12883efb 2297 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
bc21b478 2298 lost_events);
d769041f 2299
4a9bd3f1
SR
2300 if (event) {
2301 iter->ent_size = ring_buffer_event_length(event);
2302 return ring_buffer_event_data(event);
2303 }
2304 iter->ent_size = 0;
2305 return NULL;
dd0e545f 2306}
d769041f 2307
dd0e545f 2308static struct trace_entry *
bc21b478
SR
2309__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2310 unsigned long *missing_events, u64 *ent_ts)
bc0c38d1 2311{
12883efb 2312 struct ring_buffer *buffer = iter->trace_buffer->buffer;
bc0c38d1 2313 struct trace_entry *ent, *next = NULL;
aa27497c 2314 unsigned long lost_events = 0, next_lost = 0;
b04cc6b1 2315 int cpu_file = iter->cpu_file;
3928a8a2 2316 u64 next_ts = 0, ts;
bc0c38d1 2317 int next_cpu = -1;
12b5da34 2318 int next_size = 0;
bc0c38d1
SR
2319 int cpu;
2320
b04cc6b1
FW
2321 /*
2322 * If we are in a per_cpu trace file, don't bother by iterating over
2323 * all cpu and peek directly.
2324 */
ae3b5093 2325 if (cpu_file > RING_BUFFER_ALL_CPUS) {
b04cc6b1
FW
2326 if (ring_buffer_empty_cpu(buffer, cpu_file))
2327 return NULL;
bc21b478 2328 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
b04cc6b1
FW
2329 if (ent_cpu)
2330 *ent_cpu = cpu_file;
2331
2332 return ent;
2333 }
2334
ab46428c 2335 for_each_tracing_cpu(cpu) {
dd0e545f 2336
3928a8a2
SR
2337 if (ring_buffer_empty_cpu(buffer, cpu))
2338 continue;
dd0e545f 2339
bc21b478 2340 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
dd0e545f 2341
cdd31cd2
IM
2342 /*
2343 * Pick the entry with the smallest timestamp:
2344 */
3928a8a2 2345 if (ent && (!next || ts < next_ts)) {
bc0c38d1
SR
2346 next = ent;
2347 next_cpu = cpu;
3928a8a2 2348 next_ts = ts;
bc21b478 2349 next_lost = lost_events;
12b5da34 2350 next_size = iter->ent_size;
bc0c38d1
SR
2351 }
2352 }
2353
12b5da34
SR
2354 iter->ent_size = next_size;
2355
bc0c38d1
SR
2356 if (ent_cpu)
2357 *ent_cpu = next_cpu;
2358
3928a8a2
SR
2359 if (ent_ts)
2360 *ent_ts = next_ts;
2361
bc21b478
SR
2362 if (missing_events)
2363 *missing_events = next_lost;
2364
bc0c38d1
SR
2365 return next;
2366}
2367
dd0e545f 2368/* Find the next real entry, without updating the iterator itself */
c4a8e8be
FW
2369struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2370 int *ent_cpu, u64 *ent_ts)
bc0c38d1 2371{
bc21b478 2372 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
dd0e545f
SR
2373}
2374
2375/* Find the next real entry, and increment the iterator to the next entry */
955b61e5 2376void *trace_find_next_entry_inc(struct trace_iterator *iter)
dd0e545f 2377{
bc21b478
SR
2378 iter->ent = __find_next_entry(iter, &iter->cpu,
2379 &iter->lost_events, &iter->ts);
dd0e545f 2380
3928a8a2 2381 if (iter->ent)
e2ac8ef5 2382 trace_iterator_increment(iter);
dd0e545f 2383
3928a8a2 2384 return iter->ent ? iter : NULL;
b3806b43 2385}
bc0c38d1 2386
e309b41d 2387static void trace_consume(struct trace_iterator *iter)
b3806b43 2388{
12883efb 2389 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
bc21b478 2390 &iter->lost_events);
bc0c38d1
SR
2391}
2392
e309b41d 2393static void *s_next(struct seq_file *m, void *v, loff_t *pos)
bc0c38d1
SR
2394{
2395 struct trace_iterator *iter = m->private;
bc0c38d1 2396 int i = (int)*pos;
4e3c3333 2397 void *ent;
bc0c38d1 2398
a63ce5b3
SR
2399 WARN_ON_ONCE(iter->leftover);
2400
bc0c38d1
SR
2401 (*pos)++;
2402
2403 /* can't go backwards */
2404 if (iter->idx > i)
2405 return NULL;
2406
2407 if (iter->idx < 0)
955b61e5 2408 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2409 else
2410 ent = iter;
2411
2412 while (ent && iter->idx < i)
955b61e5 2413 ent = trace_find_next_entry_inc(iter);
bc0c38d1
SR
2414
2415 iter->pos = *pos;
2416
bc0c38d1
SR
2417 return ent;
2418}
2419
955b61e5 2420void tracing_iter_reset(struct trace_iterator *iter, int cpu)
2f26ebd5 2421{
2f26ebd5
SR
2422 struct ring_buffer_event *event;
2423 struct ring_buffer_iter *buf_iter;
2424 unsigned long entries = 0;
2425 u64 ts;
2426
12883efb 2427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
2f26ebd5 2428
6d158a81
SR
2429 buf_iter = trace_buffer_iter(iter, cpu);
2430 if (!buf_iter)
2f26ebd5
SR
2431 return;
2432
2f26ebd5
SR
2433 ring_buffer_iter_reset(buf_iter);
2434
2435 /*
2436 * We could have the case with the max latency tracers
2437 * that a reset never took place on a cpu. This is evident
2438 * by the timestamp being before the start of the buffer.
2439 */
2440 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
12883efb 2441 if (ts >= iter->trace_buffer->time_start)
2f26ebd5
SR
2442 break;
2443 entries++;
2444 ring_buffer_read(buf_iter, NULL);
2445 }
2446
12883efb 2447 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
2f26ebd5
SR
2448}
2449
d7350c3f 2450/*
d7350c3f
FW
2451 * The current tracer is copied to avoid a global locking
2452 * all around.
2453 */
bc0c38d1
SR
2454static void *s_start(struct seq_file *m, loff_t *pos)
2455{
2456 struct trace_iterator *iter = m->private;
2b6080f2 2457 struct trace_array *tr = iter->tr;
b04cc6b1 2458 int cpu_file = iter->cpu_file;
bc0c38d1
SR
2459 void *p = NULL;
2460 loff_t l = 0;
3928a8a2 2461 int cpu;
bc0c38d1 2462
2fd196ec
HT
2463 /*
2464 * copy the tracer to avoid using a global lock all around.
2465 * iter->trace is a copy of current_trace, the pointer to the
2466 * name may be used instead of a strcmp(), as iter->trace->name
2467 * will point to the same string as current_trace->name.
2468 */
bc0c38d1 2469 mutex_lock(&trace_types_lock);
2b6080f2
SR
2470 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2471 *iter->trace = *tr->current_trace;
d7350c3f 2472 mutex_unlock(&trace_types_lock);
bc0c38d1 2473
12883efb 2474#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2475 if (iter->snapshot && iter->trace->use_max_tr)
2476 return ERR_PTR(-EBUSY);
12883efb 2477#endif
debdd57f
HT
2478
2479 if (!iter->snapshot)
2480 atomic_inc(&trace_record_cmdline_disabled);
bc0c38d1 2481
bc0c38d1
SR
2482 if (*pos != iter->pos) {
2483 iter->ent = NULL;
2484 iter->cpu = 0;
2485 iter->idx = -1;
2486
ae3b5093 2487 if (cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 2488 for_each_tracing_cpu(cpu)
2f26ebd5 2489 tracing_iter_reset(iter, cpu);
b04cc6b1 2490 } else
2f26ebd5 2491 tracing_iter_reset(iter, cpu_file);
bc0c38d1 2492
ac91d854 2493 iter->leftover = 0;
bc0c38d1
SR
2494 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2495 ;
2496
2497 } else {
a63ce5b3
SR
2498 /*
2499 * If we overflowed the seq_file before, then we want
2500 * to just reuse the trace_seq buffer again.
2501 */
2502 if (iter->leftover)
2503 p = iter;
2504 else {
2505 l = *pos - 1;
2506 p = s_next(m, p, &l);
2507 }
bc0c38d1
SR
2508 }
2509
4f535968 2510 trace_event_read_lock();
7e53bd42 2511 trace_access_lock(cpu_file);
bc0c38d1
SR
2512 return p;
2513}
2514
2515static void s_stop(struct seq_file *m, void *p)
2516{
7e53bd42
LJ
2517 struct trace_iterator *iter = m->private;
2518
12883efb 2519#ifdef CONFIG_TRACER_MAX_TRACE
debdd57f
HT
2520 if (iter->snapshot && iter->trace->use_max_tr)
2521 return;
12883efb 2522#endif
debdd57f
HT
2523
2524 if (!iter->snapshot)
2525 atomic_dec(&trace_record_cmdline_disabled);
12883efb 2526
7e53bd42 2527 trace_access_unlock(iter->cpu_file);
4f535968 2528 trace_event_read_unlock();
bc0c38d1
SR
2529}
2530
39eaf7ef 2531static void
12883efb
SRRH
2532get_total_entries(struct trace_buffer *buf,
2533 unsigned long *total, unsigned long *entries)
39eaf7ef
SR
2534{
2535 unsigned long count;
2536 int cpu;
2537
2538 *total = 0;
2539 *entries = 0;
2540
2541 for_each_tracing_cpu(cpu) {
12883efb 2542 count = ring_buffer_entries_cpu(buf->buffer, cpu);
39eaf7ef
SR
2543 /*
2544 * If this buffer has skipped entries, then we hold all
2545 * entries for the trace and we need to ignore the
2546 * ones before the time stamp.
2547 */
12883efb
SRRH
2548 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2549 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
39eaf7ef
SR
2550 /* total is the same as the entries */
2551 *total += count;
2552 } else
2553 *total += count +
12883efb 2554 ring_buffer_overrun_cpu(buf->buffer, cpu);
39eaf7ef
SR
2555 *entries += count;
2556 }
2557}
2558
e309b41d 2559static void print_lat_help_header(struct seq_file *m)
bc0c38d1 2560{
d79ac28f
RV
2561 seq_puts(m, "# _------=> CPU# \n"
2562 "# / _-----=> irqs-off \n"
2563 "# | / _----=> need-resched \n"
2564 "# || / _---=> hardirq/softirq \n"
2565 "# ||| / _--=> preempt-depth \n"
2566 "# |||| / delay \n"
2567 "# cmd pid ||||| time | caller \n"
2568 "# \\ / ||||| \\ | / \n");
bc0c38d1
SR
2569}
2570
12883efb 2571static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
bc0c38d1 2572{
39eaf7ef
SR
2573 unsigned long total;
2574 unsigned long entries;
2575
12883efb 2576 get_total_entries(buf, &total, &entries);
39eaf7ef
SR
2577 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2578 entries, total, num_online_cpus());
2579 seq_puts(m, "#\n");
2580}
2581
12883efb 2582static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
39eaf7ef 2583{
12883efb 2584 print_event_info(buf, m);
d79ac28f
RV
2585 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2586 "# | | | | |\n");
bc0c38d1
SR
2587}
2588
12883efb 2589static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
77271ce4 2590{
12883efb 2591 print_event_info(buf, m);
d79ac28f
RV
2592 seq_puts(m, "# _-----=> irqs-off\n"
2593 "# / _----=> need-resched\n"
2594 "# | / _---=> hardirq/softirq\n"
2595 "# || / _--=> preempt-depth\n"
2596 "# ||| / delay\n"
2597 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2598 "# | | | |||| | |\n");
77271ce4 2599}
bc0c38d1 2600
62b915f1 2601void
bc0c38d1
SR
2602print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2603{
2604 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
12883efb
SRRH
2605 struct trace_buffer *buf = iter->trace_buffer;
2606 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
2b6080f2 2607 struct tracer *type = iter->trace;
39eaf7ef
SR
2608 unsigned long entries;
2609 unsigned long total;
bc0c38d1
SR
2610 const char *name = "preemption";
2611
d840f718 2612 name = type->name;
bc0c38d1 2613
12883efb 2614 get_total_entries(buf, &total, &entries);
bc0c38d1 2615
888b55dc 2616 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
bc0c38d1 2617 name, UTS_RELEASE);
888b55dc 2618 seq_puts(m, "# -----------------------------------"
bc0c38d1 2619 "---------------------------------\n");
888b55dc 2620 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
bc0c38d1 2621 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
57f50be1 2622 nsecs_to_usecs(data->saved_latency),
bc0c38d1 2623 entries,
4c11d7ae 2624 total,
12883efb 2625 buf->cpu,
bc0c38d1
SR
2626#if defined(CONFIG_PREEMPT_NONE)
2627 "server",
2628#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2629 "desktop",
b5c21b45 2630#elif defined(CONFIG_PREEMPT)
bc0c38d1
SR
2631 "preempt",
2632#else
2633 "unknown",
2634#endif
2635 /* These are reserved for later use */
2636 0, 0, 0, 0);
2637#ifdef CONFIG_SMP
2638 seq_printf(m, " #P:%d)\n", num_online_cpus());
2639#else
2640 seq_puts(m, ")\n");
2641#endif
888b55dc
KM
2642 seq_puts(m, "# -----------------\n");
2643 seq_printf(m, "# | task: %.16s-%d "
bc0c38d1 2644 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
d20b92ab
EB
2645 data->comm, data->pid,
2646 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
bc0c38d1 2647 data->policy, data->rt_priority);
888b55dc 2648 seq_puts(m, "# -----------------\n");
bc0c38d1
SR
2649
2650 if (data->critical_start) {
888b55dc 2651 seq_puts(m, "# => started at: ");
214023c3
SR
2652 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2653 trace_print_seq(m, &iter->seq);
888b55dc 2654 seq_puts(m, "\n# => ended at: ");
214023c3
SR
2655 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2656 trace_print_seq(m, &iter->seq);
8248ac05 2657 seq_puts(m, "\n#\n");
bc0c38d1
SR
2658 }
2659
888b55dc 2660 seq_puts(m, "#\n");
bc0c38d1
SR
2661}
2662
a309720c
SR
2663static void test_cpu_buff_start(struct trace_iterator *iter)
2664{
2665 struct trace_seq *s = &iter->seq;
2666
12ef7d44
SR
2667 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2668 return;
2669
2670 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2671 return;
2672
4462344e 2673 if (cpumask_test_cpu(iter->cpu, iter->started))
a309720c
SR
2674 return;
2675
12883efb 2676 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
2f26ebd5
SR
2677 return;
2678
4462344e 2679 cpumask_set_cpu(iter->cpu, iter->started);
b0dfa978
FW
2680
2681 /* Don't print started cpu buffer for the first entry of the trace */
2682 if (iter->idx > 1)
2683 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2684 iter->cpu);
a309720c
SR
2685}
2686
2c4f035f 2687static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
bc0c38d1 2688{
214023c3 2689 struct trace_seq *s = &iter->seq;
bc0c38d1 2690 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
4e3c3333 2691 struct trace_entry *entry;
f633cef0 2692 struct trace_event *event;
bc0c38d1 2693
4e3c3333 2694 entry = iter->ent;
dd0e545f 2695
a309720c
SR
2696 test_cpu_buff_start(iter);
2697
c4a8e8be 2698 event = ftrace_find_event(entry->type);
bc0c38d1 2699
c4a8e8be 2700 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2701 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2702 trace_print_lat_context(iter);
2703 else
2704 trace_print_context(iter);
c4a8e8be 2705 }
bc0c38d1 2706
19a7fe20
SRRH
2707 if (trace_seq_has_overflowed(s))
2708 return TRACE_TYPE_PARTIAL_LINE;
2709
268ccda0 2710 if (event)
a9a57763 2711 return event->funcs->trace(iter, sym_flags, event);
d9793bd8 2712
19a7fe20 2713 trace_seq_printf(s, "Unknown type %d\n", entry->type);
02b67518 2714
19a7fe20 2715 return trace_handle_return(s);
bc0c38d1
SR
2716}
2717
2c4f035f 2718static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
f9896bf3
IM
2719{
2720 struct trace_seq *s = &iter->seq;
2721 struct trace_entry *entry;
f633cef0 2722 struct trace_event *event;
f9896bf3
IM
2723
2724 entry = iter->ent;
dd0e545f 2725
19a7fe20
SRRH
2726 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2727 trace_seq_printf(s, "%d %d %llu ",
2728 entry->pid, iter->cpu, iter->ts);
2729
2730 if (trace_seq_has_overflowed(s))
2731 return TRACE_TYPE_PARTIAL_LINE;
f9896bf3 2732
f633cef0 2733 event = ftrace_find_event(entry->type);
268ccda0 2734 if (event)
a9a57763 2735 return event->funcs->raw(iter, 0, event);
d9793bd8 2736
19a7fe20 2737 trace_seq_printf(s, "%d ?\n", entry->type);
777e208d 2738
19a7fe20 2739 return trace_handle_return(s);
f9896bf3
IM
2740}
2741
2c4f035f 2742static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
5e3ca0ec
IM
2743{
2744 struct trace_seq *s = &iter->seq;
2745 unsigned char newline = '\n';
2746 struct trace_entry *entry;
f633cef0 2747 struct trace_event *event;
5e3ca0ec
IM
2748
2749 entry = iter->ent;
dd0e545f 2750
c4a8e8be 2751 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2752 SEQ_PUT_HEX_FIELD(s, entry->pid);
2753 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2754 SEQ_PUT_HEX_FIELD(s, iter->ts);
2755 if (trace_seq_has_overflowed(s))
2756 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2757 }
5e3ca0ec 2758
f633cef0 2759 event = ftrace_find_event(entry->type);
268ccda0 2760 if (event) {
a9a57763 2761 enum print_line_t ret = event->funcs->hex(iter, 0, event);
d9793bd8
ACM
2762 if (ret != TRACE_TYPE_HANDLED)
2763 return ret;
2764 }
7104f300 2765
19a7fe20 2766 SEQ_PUT_FIELD(s, newline);
5e3ca0ec 2767
19a7fe20 2768 return trace_handle_return(s);
5e3ca0ec
IM
2769}
2770
2c4f035f 2771static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
cb0f12aa
IM
2772{
2773 struct trace_seq *s = &iter->seq;
2774 struct trace_entry *entry;
f633cef0 2775 struct trace_event *event;
cb0f12aa
IM
2776
2777 entry = iter->ent;
dd0e545f 2778
c4a8e8be 2779 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
19a7fe20
SRRH
2780 SEQ_PUT_FIELD(s, entry->pid);
2781 SEQ_PUT_FIELD(s, iter->cpu);
2782 SEQ_PUT_FIELD(s, iter->ts);
2783 if (trace_seq_has_overflowed(s))
2784 return TRACE_TYPE_PARTIAL_LINE;
c4a8e8be 2785 }
cb0f12aa 2786
f633cef0 2787 event = ftrace_find_event(entry->type);
a9a57763
SR
2788 return event ? event->funcs->binary(iter, 0, event) :
2789 TRACE_TYPE_HANDLED;
cb0f12aa
IM
2790}
2791
62b915f1 2792int trace_empty(struct trace_iterator *iter)
bc0c38d1 2793{
6d158a81 2794 struct ring_buffer_iter *buf_iter;
bc0c38d1
SR
2795 int cpu;
2796
9aba60fe 2797 /* If we are looking at one CPU buffer, only check that one */
ae3b5093 2798 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
9aba60fe 2799 cpu = iter->cpu_file;
6d158a81
SR
2800 buf_iter = trace_buffer_iter(iter, cpu);
2801 if (buf_iter) {
2802 if (!ring_buffer_iter_empty(buf_iter))
9aba60fe
SR
2803 return 0;
2804 } else {
12883efb 2805 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
9aba60fe
SR
2806 return 0;
2807 }
2808 return 1;
2809 }
2810
ab46428c 2811 for_each_tracing_cpu(cpu) {
6d158a81
SR
2812 buf_iter = trace_buffer_iter(iter, cpu);
2813 if (buf_iter) {
2814 if (!ring_buffer_iter_empty(buf_iter))
d769041f
SR
2815 return 0;
2816 } else {
12883efb 2817 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
d769041f
SR
2818 return 0;
2819 }
bc0c38d1 2820 }
d769041f 2821
797d3712 2822 return 1;
bc0c38d1
SR
2823}
2824
4f535968 2825/* Called with trace_event_read_lock() held. */
955b61e5 2826enum print_line_t print_trace_line(struct trace_iterator *iter)
f9896bf3 2827{
2c4f035f
FW
2828 enum print_line_t ret;
2829
19a7fe20
SRRH
2830 if (iter->lost_events) {
2831 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2832 iter->cpu, iter->lost_events);
2833 if (trace_seq_has_overflowed(&iter->seq))
2834 return TRACE_TYPE_PARTIAL_LINE;
2835 }
bc21b478 2836
2c4f035f
FW
2837 if (iter->trace && iter->trace->print_line) {
2838 ret = iter->trace->print_line(iter);
2839 if (ret != TRACE_TYPE_UNHANDLED)
2840 return ret;
2841 }
72829bc3 2842
09ae7234
SRRH
2843 if (iter->ent->type == TRACE_BPUTS &&
2844 trace_flags & TRACE_ITER_PRINTK &&
2845 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2846 return trace_print_bputs_msg_only(iter);
2847
48ead020
FW
2848 if (iter->ent->type == TRACE_BPRINT &&
2849 trace_flags & TRACE_ITER_PRINTK &&
2850 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2851 return trace_print_bprintk_msg_only(iter);
48ead020 2852
66896a85
FW
2853 if (iter->ent->type == TRACE_PRINT &&
2854 trace_flags & TRACE_ITER_PRINTK &&
2855 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
5ef841f6 2856 return trace_print_printk_msg_only(iter);
66896a85 2857
cb0f12aa
IM
2858 if (trace_flags & TRACE_ITER_BIN)
2859 return print_bin_fmt(iter);
2860
5e3ca0ec
IM
2861 if (trace_flags & TRACE_ITER_HEX)
2862 return print_hex_fmt(iter);
2863
f9896bf3
IM
2864 if (trace_flags & TRACE_ITER_RAW)
2865 return print_raw_fmt(iter);
2866
f9896bf3
IM
2867 return print_trace_fmt(iter);
2868}
2869
7e9a49ef
JO
2870void trace_latency_header(struct seq_file *m)
2871{
2872 struct trace_iterator *iter = m->private;
2873
2874 /* print nothing if the buffers are empty */
2875 if (trace_empty(iter))
2876 return;
2877
2878 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2879 print_trace_header(m, iter);
2880
2881 if (!(trace_flags & TRACE_ITER_VERBOSE))
2882 print_lat_help_header(m);
2883}
2884
62b915f1
JO
2885void trace_default_header(struct seq_file *m)
2886{
2887 struct trace_iterator *iter = m->private;
2888
f56e7f8e
JO
2889 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2890 return;
2891
62b915f1
JO
2892 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2893 /* print nothing if the buffers are empty */
2894 if (trace_empty(iter))
2895 return;
2896 print_trace_header(m, iter);
2897 if (!(trace_flags & TRACE_ITER_VERBOSE))
2898 print_lat_help_header(m);
2899 } else {
77271ce4
SR
2900 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2901 if (trace_flags & TRACE_ITER_IRQ_INFO)
12883efb 2902 print_func_help_header_irq(iter->trace_buffer, m);
77271ce4 2903 else
12883efb 2904 print_func_help_header(iter->trace_buffer, m);
77271ce4 2905 }
62b915f1
JO
2906 }
2907}
2908
e0a413f6
SR
2909static void test_ftrace_alive(struct seq_file *m)
2910{
2911 if (!ftrace_is_dead())
2912 return;
d79ac28f
RV
2913 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2914 "# MAY BE MISSING FUNCTION EVENTS\n");
e0a413f6
SR
2915}
2916
d8741e2e 2917#ifdef CONFIG_TRACER_MAX_TRACE
f1affcaa 2918static void show_snapshot_main_help(struct seq_file *m)
d8741e2e 2919{
d79ac28f
RV
2920 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2921 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2922 "# Takes a snapshot of the main buffer.\n"
2923 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2924 "# (Doesn't have to be '2' works with any number that\n"
2925 "# is not a '0' or '1')\n");
d8741e2e 2926}
f1affcaa
SRRH
2927
2928static void show_snapshot_percpu_help(struct seq_file *m)
2929{
fa6f0cc7 2930 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
f1affcaa 2931#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
d79ac28f
RV
2932 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2933 "# Takes a snapshot of the main buffer for this cpu.\n");
f1affcaa 2934#else
d79ac28f
RV
2935 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2936 "# Must use main snapshot file to allocate.\n");
f1affcaa 2937#endif
d79ac28f
RV
2938 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2939 "# (Doesn't have to be '2' works with any number that\n"
2940 "# is not a '0' or '1')\n");
f1affcaa
SRRH
2941}
2942
d8741e2e
SRRH
2943static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2944{
45ad21ca 2945 if (iter->tr->allocated_snapshot)
fa6f0cc7 2946 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
d8741e2e 2947 else
fa6f0cc7 2948 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
d8741e2e 2949
fa6f0cc7 2950 seq_puts(m, "# Snapshot commands:\n");
f1affcaa
SRRH
2951 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2952 show_snapshot_main_help(m);
2953 else
2954 show_snapshot_percpu_help(m);
d8741e2e
SRRH
2955}
2956#else
2957/* Should never be called */
2958static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2959#endif
2960
bc0c38d1
SR
2961static int s_show(struct seq_file *m, void *v)
2962{
2963 struct trace_iterator *iter = v;
a63ce5b3 2964 int ret;
bc0c38d1
SR
2965
2966 if (iter->ent == NULL) {
2967 if (iter->tr) {
2968 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2969 seq_puts(m, "#\n");
e0a413f6 2970 test_ftrace_alive(m);
bc0c38d1 2971 }
d8741e2e
SRRH
2972 if (iter->snapshot && trace_empty(iter))
2973 print_snapshot_help(m, iter);
2974 else if (iter->trace && iter->trace->print_header)
8bba1bf5 2975 iter->trace->print_header(m);
62b915f1
JO
2976 else
2977 trace_default_header(m);
2978
a63ce5b3
SR
2979 } else if (iter->leftover) {
2980 /*
2981 * If we filled the seq_file buffer earlier, we
2982 * want to just show it now.
2983 */
2984 ret = trace_print_seq(m, &iter->seq);
2985
2986 /* ret should this time be zero, but you never know */
2987 iter->leftover = ret;
2988
bc0c38d1 2989 } else {
f9896bf3 2990 print_trace_line(iter);
a63ce5b3
SR
2991 ret = trace_print_seq(m, &iter->seq);
2992 /*
2993 * If we overflow the seq_file buffer, then it will
2994 * ask us for this data again at start up.
2995 * Use that instead.
2996 * ret is 0 if seq_file write succeeded.
2997 * -1 otherwise.
2998 */
2999 iter->leftover = ret;
bc0c38d1
SR
3000 }
3001
3002 return 0;
3003}
3004
649e9c70
ON
3005/*
3006 * Should be used after trace_array_get(), trace_types_lock
3007 * ensures that i_cdev was already initialized.
3008 */
3009static inline int tracing_get_cpu(struct inode *inode)
3010{
3011 if (inode->i_cdev) /* See trace_create_cpu_file() */
3012 return (long)inode->i_cdev - 1;
3013 return RING_BUFFER_ALL_CPUS;
3014}
3015
88e9d34c 3016static const struct seq_operations tracer_seq_ops = {
4bf39a94
IM
3017 .start = s_start,
3018 .next = s_next,
3019 .stop = s_stop,
3020 .show = s_show,
bc0c38d1
SR
3021};
3022
e309b41d 3023static struct trace_iterator *
6484c71c 3024__tracing_open(struct inode *inode, struct file *file, bool snapshot)
bc0c38d1 3025{
6484c71c 3026 struct trace_array *tr = inode->i_private;
bc0c38d1 3027 struct trace_iterator *iter;
50e18b94 3028 int cpu;
bc0c38d1 3029
85a2f9b4
SR
3030 if (tracing_disabled)
3031 return ERR_PTR(-ENODEV);
60a11774 3032
50e18b94 3033 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
85a2f9b4
SR
3034 if (!iter)
3035 return ERR_PTR(-ENOMEM);
bc0c38d1 3036
6d158a81
SR
3037 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3038 GFP_KERNEL);
93574fcc
DC
3039 if (!iter->buffer_iter)
3040 goto release;
3041
d7350c3f
FW
3042 /*
3043 * We make a copy of the current tracer to avoid concurrent
3044 * changes on it while we are reading.
3045 */
bc0c38d1 3046 mutex_lock(&trace_types_lock);
d7350c3f 3047 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
85a2f9b4 3048 if (!iter->trace)
d7350c3f 3049 goto fail;
85a2f9b4 3050
2b6080f2 3051 *iter->trace = *tr->current_trace;
d7350c3f 3052
79f55997 3053 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
b0dfa978
FW
3054 goto fail;
3055
12883efb
SRRH
3056 iter->tr = tr;
3057
3058#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
3059 /* Currently only the top directory has a snapshot */
3060 if (tr->current_trace->print_max || snapshot)
12883efb 3061 iter->trace_buffer = &tr->max_buffer;
bc0c38d1 3062 else
12883efb
SRRH
3063#endif
3064 iter->trace_buffer = &tr->trace_buffer;
debdd57f 3065 iter->snapshot = snapshot;
bc0c38d1 3066 iter->pos = -1;
6484c71c 3067 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 3068 mutex_init(&iter->mutex);
bc0c38d1 3069
8bba1bf5
MM
3070 /* Notify the tracer early; before we stop tracing. */
3071 if (iter->trace && iter->trace->open)
a93751ca 3072 iter->trace->open(iter);
8bba1bf5 3073
12ef7d44 3074 /* Annotate start of buffers if we had overruns */
12883efb 3075 if (ring_buffer_overruns(iter->trace_buffer->buffer))
12ef7d44
SR
3076 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3077
8be0709f 3078 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 3079 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
3080 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3081
debdd57f
HT
3082 /* stop the trace while dumping if we are not opening "snapshot" */
3083 if (!iter->snapshot)
2b6080f2 3084 tracing_stop_tr(tr);
2f26ebd5 3085
ae3b5093 3086 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
b04cc6b1 3087 for_each_tracing_cpu(cpu) {
b04cc6b1 3088 iter->buffer_iter[cpu] =
12883efb 3089 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3090 }
3091 ring_buffer_read_prepare_sync();
3092 for_each_tracing_cpu(cpu) {
3093 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3094 tracing_iter_reset(iter, cpu);
b04cc6b1
FW
3095 }
3096 } else {
3097 cpu = iter->cpu_file;
3928a8a2 3098 iter->buffer_iter[cpu] =
12883efb 3099 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
72c9ddfd
DM
3100 ring_buffer_read_prepare_sync();
3101 ring_buffer_read_start(iter->buffer_iter[cpu]);
2f26ebd5 3102 tracing_iter_reset(iter, cpu);
3928a8a2
SR
3103 }
3104
bc0c38d1
SR
3105 mutex_unlock(&trace_types_lock);
3106
bc0c38d1 3107 return iter;
3928a8a2 3108
d7350c3f 3109 fail:
3928a8a2 3110 mutex_unlock(&trace_types_lock);
d7350c3f 3111 kfree(iter->trace);
6d158a81 3112 kfree(iter->buffer_iter);
93574fcc 3113release:
50e18b94
JO
3114 seq_release_private(inode, file);
3115 return ERR_PTR(-ENOMEM);
bc0c38d1
SR
3116}
3117
3118int tracing_open_generic(struct inode *inode, struct file *filp)
3119{
60a11774
SR
3120 if (tracing_disabled)
3121 return -ENODEV;
3122
bc0c38d1
SR
3123 filp->private_data = inode->i_private;
3124 return 0;
3125}
3126
2e86421d
GB
3127bool tracing_is_disabled(void)
3128{
3129 return (tracing_disabled) ? true: false;
3130}
3131
7b85af63
SRRH
3132/*
3133 * Open and update trace_array ref count.
3134 * Must have the current trace_array passed to it.
3135 */
dcc30223 3136static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
7b85af63
SRRH
3137{
3138 struct trace_array *tr = inode->i_private;
3139
3140 if (tracing_disabled)
3141 return -ENODEV;
3142
3143 if (trace_array_get(tr) < 0)
3144 return -ENODEV;
3145
3146 filp->private_data = inode->i_private;
3147
3148 return 0;
7b85af63
SRRH
3149}
3150
4fd27358 3151static int tracing_release(struct inode *inode, struct file *file)
bc0c38d1 3152{
6484c71c 3153 struct trace_array *tr = inode->i_private;
907f2784 3154 struct seq_file *m = file->private_data;
4acd4d00 3155 struct trace_iterator *iter;
3928a8a2 3156 int cpu;
bc0c38d1 3157
ff451961 3158 if (!(file->f_mode & FMODE_READ)) {
6484c71c 3159 trace_array_put(tr);
4acd4d00 3160 return 0;
ff451961 3161 }
4acd4d00 3162
6484c71c 3163 /* Writes do not use seq_file */
4acd4d00 3164 iter = m->private;
bc0c38d1 3165 mutex_lock(&trace_types_lock);
a695cb58 3166
3928a8a2
SR
3167 for_each_tracing_cpu(cpu) {
3168 if (iter->buffer_iter[cpu])
3169 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3170 }
3171
bc0c38d1
SR
3172 if (iter->trace && iter->trace->close)
3173 iter->trace->close(iter);
3174
debdd57f
HT
3175 if (!iter->snapshot)
3176 /* reenable tracing if it was previously enabled */
2b6080f2 3177 tracing_start_tr(tr);
f77d09a3
AL
3178
3179 __trace_array_put(tr);
3180
bc0c38d1
SR
3181 mutex_unlock(&trace_types_lock);
3182
d7350c3f 3183 mutex_destroy(&iter->mutex);
b0dfa978 3184 free_cpumask_var(iter->started);
d7350c3f 3185 kfree(iter->trace);
6d158a81 3186 kfree(iter->buffer_iter);
50e18b94 3187 seq_release_private(inode, file);
ff451961 3188
bc0c38d1
SR
3189 return 0;
3190}
3191
7b85af63
SRRH
3192static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3193{
3194 struct trace_array *tr = inode->i_private;
3195
3196 trace_array_put(tr);
bc0c38d1
SR
3197 return 0;
3198}
3199
7b85af63
SRRH
3200static int tracing_single_release_tr(struct inode *inode, struct file *file)
3201{
3202 struct trace_array *tr = inode->i_private;
3203
3204 trace_array_put(tr);
3205
3206 return single_release(inode, file);
3207}
3208
bc0c38d1
SR
3209static int tracing_open(struct inode *inode, struct file *file)
3210{
6484c71c 3211 struct trace_array *tr = inode->i_private;
85a2f9b4
SR
3212 struct trace_iterator *iter;
3213 int ret = 0;
bc0c38d1 3214
ff451961
SRRH
3215 if (trace_array_get(tr) < 0)
3216 return -ENODEV;
3217
4acd4d00 3218 /* If this file was open for write, then erase contents */
6484c71c
ON
3219 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3220 int cpu = tracing_get_cpu(inode);
3221
3222 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 3223 tracing_reset_online_cpus(&tr->trace_buffer);
4acd4d00 3224 else
6484c71c 3225 tracing_reset(&tr->trace_buffer, cpu);
4acd4d00 3226 }
bc0c38d1 3227
4acd4d00 3228 if (file->f_mode & FMODE_READ) {
6484c71c 3229 iter = __tracing_open(inode, file, false);
4acd4d00
SR
3230 if (IS_ERR(iter))
3231 ret = PTR_ERR(iter);
3232 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3233 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3234 }
ff451961
SRRH
3235
3236 if (ret < 0)
3237 trace_array_put(tr);
3238
bc0c38d1
SR
3239 return ret;
3240}
3241
607e2ea1
SRRH
3242/*
3243 * Some tracers are not suitable for instance buffers.
3244 * A tracer is always available for the global array (toplevel)
3245 * or if it explicitly states that it is.
3246 */
3247static bool
3248trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3249{
3250 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3251}
3252
3253/* Find the next tracer that this trace array may use */
3254static struct tracer *
3255get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3256{
3257 while (t && !trace_ok_for_array(t, tr))
3258 t = t->next;
3259
3260 return t;
3261}
3262
e309b41d 3263static void *
bc0c38d1
SR
3264t_next(struct seq_file *m, void *v, loff_t *pos)
3265{
607e2ea1 3266 struct trace_array *tr = m->private;
f129e965 3267 struct tracer *t = v;
bc0c38d1
SR
3268
3269 (*pos)++;
3270
3271 if (t)
607e2ea1 3272 t = get_tracer_for_array(tr, t->next);
bc0c38d1 3273
bc0c38d1
SR
3274 return t;
3275}
3276
3277static void *t_start(struct seq_file *m, loff_t *pos)
3278{
607e2ea1 3279 struct trace_array *tr = m->private;
f129e965 3280 struct tracer *t;
bc0c38d1
SR
3281 loff_t l = 0;
3282
3283 mutex_lock(&trace_types_lock);
607e2ea1
SRRH
3284
3285 t = get_tracer_for_array(tr, trace_types);
3286 for (; t && l < *pos; t = t_next(m, t, &l))
3287 ;
bc0c38d1
SR
3288
3289 return t;
3290}
3291
3292static void t_stop(struct seq_file *m, void *p)
3293{
3294 mutex_unlock(&trace_types_lock);
3295}
3296
3297static int t_show(struct seq_file *m, void *v)
3298{
3299 struct tracer *t = v;
3300
3301 if (!t)
3302 return 0;
3303
fa6f0cc7 3304 seq_puts(m, t->name);
bc0c38d1
SR
3305 if (t->next)
3306 seq_putc(m, ' ');
3307 else
3308 seq_putc(m, '\n');
3309
3310 return 0;
3311}
3312
88e9d34c 3313static const struct seq_operations show_traces_seq_ops = {
4bf39a94
IM
3314 .start = t_start,
3315 .next = t_next,
3316 .stop = t_stop,
3317 .show = t_show,
bc0c38d1
SR
3318};
3319
3320static int show_traces_open(struct inode *inode, struct file *file)
3321{
607e2ea1
SRRH
3322 struct trace_array *tr = inode->i_private;
3323 struct seq_file *m;
3324 int ret;
3325
60a11774
SR
3326 if (tracing_disabled)
3327 return -ENODEV;
3328
607e2ea1
SRRH
3329 ret = seq_open(file, &show_traces_seq_ops);
3330 if (ret)
3331 return ret;
3332
3333 m = file->private_data;
3334 m->private = tr;
3335
3336 return 0;
bc0c38d1
SR
3337}
3338
4acd4d00
SR
3339static ssize_t
3340tracing_write_stub(struct file *filp, const char __user *ubuf,
3341 size_t count, loff_t *ppos)
3342{
3343 return count;
3344}
3345
098c879e 3346loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
364829b1 3347{
098c879e
SRRH
3348 int ret;
3349
364829b1 3350 if (file->f_mode & FMODE_READ)
098c879e 3351 ret = seq_lseek(file, offset, whence);
364829b1 3352 else
098c879e
SRRH
3353 file->f_pos = ret = 0;
3354
3355 return ret;
364829b1
SP
3356}
3357
5e2336a0 3358static const struct file_operations tracing_fops = {
4bf39a94
IM
3359 .open = tracing_open,
3360 .read = seq_read,
4acd4d00 3361 .write = tracing_write_stub,
098c879e 3362 .llseek = tracing_lseek,
4bf39a94 3363 .release = tracing_release,
bc0c38d1
SR
3364};
3365
5e2336a0 3366static const struct file_operations show_traces_fops = {
c7078de1
IM
3367 .open = show_traces_open,
3368 .read = seq_read,
3369 .release = seq_release,
b444786f 3370 .llseek = seq_lseek,
c7078de1
IM
3371};
3372
36dfe925
IM
3373/*
3374 * The tracer itself will not take this lock, but still we want
3375 * to provide a consistent cpumask to user-space:
3376 */
3377static DEFINE_MUTEX(tracing_cpumask_update_lock);
3378
3379/*
3380 * Temporary storage for the character representation of the
3381 * CPU bitmask (and one more byte for the newline):
3382 */
3383static char mask_str[NR_CPUS + 1];
3384
c7078de1
IM
3385static ssize_t
3386tracing_cpumask_read(struct file *filp, char __user *ubuf,
3387 size_t count, loff_t *ppos)
3388{
ccfe9e42 3389 struct trace_array *tr = file_inode(filp)->i_private;
36dfe925 3390 int len;
c7078de1
IM
3391
3392 mutex_lock(&tracing_cpumask_update_lock);
36dfe925 3393
1a40243b
TH
3394 len = snprintf(mask_str, count, "%*pb\n",
3395 cpumask_pr_args(tr->tracing_cpumask));
3396 if (len >= count) {
36dfe925
IM
3397 count = -EINVAL;
3398 goto out_err;
3399 }
36dfe925
IM
3400 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3401
3402out_err:
c7078de1
IM
3403 mutex_unlock(&tracing_cpumask_update_lock);
3404
3405 return count;
3406}
3407
3408static ssize_t
3409tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3410 size_t count, loff_t *ppos)
3411{
ccfe9e42 3412 struct trace_array *tr = file_inode(filp)->i_private;
9e01c1b7 3413 cpumask_var_t tracing_cpumask_new;
2b6080f2 3414 int err, cpu;
9e01c1b7
RR
3415
3416 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3417 return -ENOMEM;
c7078de1 3418
9e01c1b7 3419 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
c7078de1 3420 if (err)
36dfe925
IM
3421 goto err_unlock;
3422
215368e8
LZ
3423 mutex_lock(&tracing_cpumask_update_lock);
3424
a5e25883 3425 local_irq_disable();
0b9b12c1 3426 arch_spin_lock(&tr->max_lock);
ab46428c 3427 for_each_tracing_cpu(cpu) {
36dfe925
IM
3428 /*
3429 * Increase/decrease the disabled counter if we are
3430 * about to flip a bit in the cpumask:
3431 */
ccfe9e42 3432 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3433 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3434 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3435 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925 3436 }
ccfe9e42 3437 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
9e01c1b7 3438 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
12883efb
SRRH
3439 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3440 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
36dfe925
IM
3441 }
3442 }
0b9b12c1 3443 arch_spin_unlock(&tr->max_lock);
a5e25883 3444 local_irq_enable();
36dfe925 3445
ccfe9e42 3446 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
36dfe925
IM
3447
3448 mutex_unlock(&tracing_cpumask_update_lock);
9e01c1b7 3449 free_cpumask_var(tracing_cpumask_new);
c7078de1
IM
3450
3451 return count;
36dfe925
IM
3452
3453err_unlock:
215368e8 3454 free_cpumask_var(tracing_cpumask_new);
36dfe925
IM
3455
3456 return err;
c7078de1
IM
3457}
3458
5e2336a0 3459static const struct file_operations tracing_cpumask_fops = {
ccfe9e42 3460 .open = tracing_open_generic_tr,
c7078de1
IM
3461 .read = tracing_cpumask_read,
3462 .write = tracing_cpumask_write,
ccfe9e42 3463 .release = tracing_release_generic_tr,
b444786f 3464 .llseek = generic_file_llseek,
bc0c38d1
SR
3465};
3466
fdb372ed 3467static int tracing_trace_options_show(struct seq_file *m, void *v)
bc0c38d1 3468{
d8e83d26 3469 struct tracer_opt *trace_opts;
2b6080f2 3470 struct trace_array *tr = m->private;
d8e83d26 3471 u32 tracer_flags;
d8e83d26 3472 int i;
adf9f195 3473
d8e83d26 3474 mutex_lock(&trace_types_lock);
2b6080f2
SR
3475 tracer_flags = tr->current_trace->flags->val;
3476 trace_opts = tr->current_trace->flags->opts;
d8e83d26 3477
bc0c38d1
SR
3478 for (i = 0; trace_options[i]; i++) {
3479 if (trace_flags & (1 << i))
fdb372ed 3480 seq_printf(m, "%s\n", trace_options[i]);
bc0c38d1 3481 else
fdb372ed 3482 seq_printf(m, "no%s\n", trace_options[i]);
bc0c38d1
SR
3483 }
3484
adf9f195
FW
3485 for (i = 0; trace_opts[i].name; i++) {
3486 if (tracer_flags & trace_opts[i].bit)
fdb372ed 3487 seq_printf(m, "%s\n", trace_opts[i].name);
adf9f195 3488 else
fdb372ed 3489 seq_printf(m, "no%s\n", trace_opts[i].name);
adf9f195 3490 }
d8e83d26 3491 mutex_unlock(&trace_types_lock);
adf9f195 3492
fdb372ed 3493 return 0;
bc0c38d1 3494}
bc0c38d1 3495
8c1a49ae 3496static int __set_tracer_option(struct trace_array *tr,
8d18eaaf
LZ
3497 struct tracer_flags *tracer_flags,
3498 struct tracer_opt *opts, int neg)
3499{
8c1a49ae 3500 struct tracer *trace = tr->current_trace;
8d18eaaf 3501 int ret;
bc0c38d1 3502
8c1a49ae 3503 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
8d18eaaf
LZ
3504 if (ret)
3505 return ret;
3506
3507 if (neg)
3508 tracer_flags->val &= ~opts->bit;
3509 else
3510 tracer_flags->val |= opts->bit;
3511 return 0;
bc0c38d1
SR
3512}
3513
adf9f195 3514/* Try to assign a tracer specific option */
8c1a49ae 3515static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
adf9f195 3516{
8c1a49ae 3517 struct tracer *trace = tr->current_trace;
7770841e 3518 struct tracer_flags *tracer_flags = trace->flags;
adf9f195 3519 struct tracer_opt *opts = NULL;
8d18eaaf 3520 int i;
adf9f195 3521
7770841e
Z
3522 for (i = 0; tracer_flags->opts[i].name; i++) {
3523 opts = &tracer_flags->opts[i];
adf9f195 3524
8d18eaaf 3525 if (strcmp(cmp, opts->name) == 0)
8c1a49ae 3526 return __set_tracer_option(tr, trace->flags, opts, neg);
adf9f195 3527 }
adf9f195 3528
8d18eaaf 3529 return -EINVAL;
adf9f195
FW
3530}
3531
613f04a0
SRRH
3532/* Some tracers require overwrite to stay enabled */
3533int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3534{
3535 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3536 return -1;
3537
3538 return 0;
3539}
3540
2b6080f2 3541int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
af4617bd
SR
3542{
3543 /* do nothing if flag is already set */
3544 if (!!(trace_flags & mask) == !!enabled)
613f04a0
SRRH
3545 return 0;
3546
3547 /* Give the tracer a chance to approve the change */
2b6080f2 3548 if (tr->current_trace->flag_changed)
bf6065b5 3549 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
613f04a0 3550 return -EINVAL;
af4617bd
SR
3551
3552 if (enabled)
3553 trace_flags |= mask;
3554 else
3555 trace_flags &= ~mask;
e870e9a1
LZ
3556
3557 if (mask == TRACE_ITER_RECORD_CMD)
3558 trace_event_enable_cmd_record(enabled);
750912fa 3559
80902822 3560 if (mask == TRACE_ITER_OVERWRITE) {
12883efb 3561 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
80902822 3562#ifdef CONFIG_TRACER_MAX_TRACE
12883efb 3563 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
80902822
SRRH
3564#endif
3565 }
81698831
SR
3566
3567 if (mask == TRACE_ITER_PRINTK)
3568 trace_printk_start_stop_comm(enabled);
613f04a0
SRRH
3569
3570 return 0;
af4617bd
SR
3571}
3572
2b6080f2 3573static int trace_set_options(struct trace_array *tr, char *option)
bc0c38d1 3574{
8d18eaaf 3575 char *cmp;
bc0c38d1 3576 int neg = 0;
613f04a0 3577 int ret = -ENODEV;
bc0c38d1
SR
3578 int i;
3579
7bcfaf54 3580 cmp = strstrip(option);
bc0c38d1 3581
8d18eaaf 3582 if (strncmp(cmp, "no", 2) == 0) {
bc0c38d1
SR
3583 neg = 1;
3584 cmp += 2;
3585 }
3586
69d34da2
SRRH
3587 mutex_lock(&trace_types_lock);
3588
bc0c38d1 3589 for (i = 0; trace_options[i]; i++) {
8d18eaaf 3590 if (strcmp(cmp, trace_options[i]) == 0) {
2b6080f2 3591 ret = set_tracer_flag(tr, 1 << i, !neg);
bc0c38d1
SR
3592 break;
3593 }
3594 }
adf9f195
FW
3595
3596 /* If no option could be set, test the specific tracer options */
69d34da2 3597 if (!trace_options[i])
8c1a49ae 3598 ret = set_tracer_option(tr, cmp, neg);
69d34da2
SRRH
3599
3600 mutex_unlock(&trace_types_lock);
bc0c38d1 3601
7bcfaf54
SR
3602 return ret;
3603}
3604
3605static ssize_t
3606tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3607 size_t cnt, loff_t *ppos)
3608{
2b6080f2
SR
3609 struct seq_file *m = filp->private_data;
3610 struct trace_array *tr = m->private;
7bcfaf54 3611 char buf[64];
613f04a0 3612 int ret;
7bcfaf54
SR
3613
3614 if (cnt >= sizeof(buf))
3615 return -EINVAL;
3616
3617 if (copy_from_user(&buf, ubuf, cnt))
3618 return -EFAULT;
3619
a8dd2176
SR
3620 buf[cnt] = 0;
3621
2b6080f2 3622 ret = trace_set_options(tr, buf);
613f04a0
SRRH
3623 if (ret < 0)
3624 return ret;
7bcfaf54 3625
cf8517cf 3626 *ppos += cnt;
bc0c38d1
SR
3627
3628 return cnt;
3629}
3630
fdb372ed
LZ
3631static int tracing_trace_options_open(struct inode *inode, struct file *file)
3632{
7b85af63 3633 struct trace_array *tr = inode->i_private;
f77d09a3 3634 int ret;
7b85af63 3635
fdb372ed
LZ
3636 if (tracing_disabled)
3637 return -ENODEV;
2b6080f2 3638
7b85af63
SRRH
3639 if (trace_array_get(tr) < 0)
3640 return -ENODEV;
3641
f77d09a3
AL
3642 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3643 if (ret < 0)
3644 trace_array_put(tr);
3645
3646 return ret;
fdb372ed
LZ
3647}
3648
5e2336a0 3649static const struct file_operations tracing_iter_fops = {
fdb372ed
LZ
3650 .open = tracing_trace_options_open,
3651 .read = seq_read,
3652 .llseek = seq_lseek,
7b85af63 3653 .release = tracing_single_release_tr,
ee6bce52 3654 .write = tracing_trace_options_write,
bc0c38d1
SR
3655};
3656
7bd2f24c
IM
3657static const char readme_msg[] =
3658 "tracing mini-HOWTO:\n\n"
22f45649
SRRH
3659 "# echo 0 > tracing_on : quick way to disable tracing\n"
3660 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3661 " Important files:\n"
3662 " trace\t\t\t- The static contents of the buffer\n"
3663 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3664 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3665 " current_tracer\t- function and latency tracers\n"
3666 " available_tracers\t- list of configured tracers for current_tracer\n"
3667 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3668 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3669 " trace_clock\t\t-change the clock used to order events\n"
3670 " local: Per cpu clock but may not be synced across CPUs\n"
3671 " global: Synced across CPUs but slows tracing down.\n"
3672 " counter: Not a clock, but just an increment\n"
3673 " uptime: Jiffy counter from time of boot\n"
3674 " perf: Same clock that perf events use\n"
3675#ifdef CONFIG_X86_64
3676 " x86-tsc: TSC cycle counter\n"
3677#endif
3678 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3679 " tracing_cpumask\t- Limit which CPUs to trace\n"
3680 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3681 "\t\t\t Remove sub-buffer with rmdir\n"
3682 " trace_options\t\t- Set format or modify how tracing happens\n"
71485c45
SRRH
3683 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3684 "\t\t\t option name\n"
939c7a4f 3685 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
22f45649
SRRH
3686#ifdef CONFIG_DYNAMIC_FTRACE
3687 "\n available_filter_functions - list of functions that can be filtered on\n"
71485c45
SRRH
3688 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3689 "\t\t\t functions\n"
3690 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3691 "\t modules: Can select a group via module\n"
3692 "\t Format: :mod:<module-name>\n"
3693 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3694 "\t triggers: a command to perform when function is hit\n"
3695 "\t Format: <function>:<trigger>[:count]\n"
3696 "\t trigger: traceon, traceoff\n"
3697 "\t\t enable_event:<system>:<event>\n"
3698 "\t\t disable_event:<system>:<event>\n"
22f45649 3699#ifdef CONFIG_STACKTRACE
71485c45 3700 "\t\t stacktrace\n"
22f45649
SRRH
3701#endif
3702#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3703 "\t\t snapshot\n"
22f45649 3704#endif
17a280ea
SRRH
3705 "\t\t dump\n"
3706 "\t\t cpudump\n"
71485c45
SRRH
3707 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3708 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3709 "\t The first one will disable tracing every time do_fault is hit\n"
3710 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3711 "\t The first time do trap is hit and it disables tracing, the\n"
3712 "\t counter will decrement to 2. If tracing is already disabled,\n"
3713 "\t the counter will not decrement. It only decrements when the\n"
3714 "\t trigger did work\n"
3715 "\t To remove trigger without count:\n"
3716 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3717 "\t To remove trigger with a count:\n"
3718 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
22f45649 3719 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
71485c45
SRRH
3720 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3721 "\t modules: Can select a group via module command :mod:\n"
3722 "\t Does not accept triggers\n"
22f45649
SRRH
3723#endif /* CONFIG_DYNAMIC_FTRACE */
3724#ifdef CONFIG_FUNCTION_TRACER
71485c45
SRRH
3725 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3726 "\t\t (function)\n"
22f45649
SRRH
3727#endif
3728#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3729 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
d048a8c7 3730 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
22f45649
SRRH
3731 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3732#endif
3733#ifdef CONFIG_TRACER_SNAPSHOT
71485c45
SRRH
3734 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3735 "\t\t\t snapshot buffer. Read the contents for more\n"
3736 "\t\t\t information\n"
22f45649 3737#endif
991821c8 3738#ifdef CONFIG_STACK_TRACER
22f45649
SRRH
3739 " stack_trace\t\t- Shows the max stack trace when active\n"
3740 " stack_max_size\t- Shows current max stack size that was traced\n"
71485c45
SRRH
3741 "\t\t\t Write into this file to reset the max size (trigger a\n"
3742 "\t\t\t new trace)\n"
22f45649 3743#ifdef CONFIG_DYNAMIC_FTRACE
71485c45
SRRH
3744 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3745 "\t\t\t traces\n"
22f45649 3746#endif
991821c8 3747#endif /* CONFIG_STACK_TRACER */
26f25564
TZ
3748 " events/\t\t- Directory containing all trace event subsystems:\n"
3749 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3750 " events/<system>/\t- Directory containing all trace events for <system>:\n"
71485c45
SRRH
3751 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3752 "\t\t\t events\n"
26f25564 3753 " filter\t\t- If set, only events passing filter are traced\n"
71485c45
SRRH
3754 " events/<system>/<event>/\t- Directory containing control files for\n"
3755 "\t\t\t <event>:\n"
26f25564
TZ
3756 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3757 " filter\t\t- If set, only events passing filter are traced\n"
3758 " trigger\t\t- If set, a command to perform when event is hit\n"
71485c45
SRRH
3759 "\t Format: <trigger>[:count][if <filter>]\n"
3760 "\t trigger: traceon, traceoff\n"
3761 "\t enable_event:<system>:<event>\n"
3762 "\t disable_event:<system>:<event>\n"
26f25564 3763#ifdef CONFIG_STACKTRACE
71485c45 3764 "\t\t stacktrace\n"
26f25564
TZ
3765#endif
3766#ifdef CONFIG_TRACER_SNAPSHOT
71485c45 3767 "\t\t snapshot\n"
26f25564 3768#endif
71485c45
SRRH
3769 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3770 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3771 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3772 "\t events/block/block_unplug/trigger\n"
3773 "\t The first disables tracing every time block_unplug is hit.\n"
3774 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3775 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3776 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3777 "\t Like function triggers, the counter is only decremented if it\n"
3778 "\t enabled or disabled tracing.\n"
3779 "\t To remove a trigger without a count:\n"
3780 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3781 "\t To remove a trigger with a count:\n"
3782 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3783 "\t Filters can be ignored when removing a trigger.\n"
7bd2f24c
IM
3784;
3785
3786static ssize_t
3787tracing_readme_read(struct file *filp, char __user *ubuf,
3788 size_t cnt, loff_t *ppos)
3789{
3790 return simple_read_from_buffer(ubuf, cnt, ppos,
3791 readme_msg, strlen(readme_msg));
3792}
3793
5e2336a0 3794static const struct file_operations tracing_readme_fops = {
c7078de1
IM
3795 .open = tracing_open_generic,
3796 .read = tracing_readme_read,
b444786f 3797 .llseek = generic_file_llseek,
7bd2f24c
IM
3798};
3799
42584c81
YY
3800static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
3801{
3802 unsigned int *ptr = v;
69abe6a5 3803
42584c81
YY
3804 if (*pos || m->count)
3805 ptr++;
69abe6a5 3806
42584c81 3807 (*pos)++;
69abe6a5 3808
939c7a4f
YY
3809 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3810 ptr++) {
42584c81
YY
3811 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
3812 continue;
69abe6a5 3813
42584c81
YY
3814 return ptr;
3815 }
69abe6a5 3816
42584c81
YY
3817 return NULL;
3818}
3819
3820static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3821{
3822 void *v;
3823 loff_t l = 0;
69abe6a5 3824
4c27e756
SRRH
3825 preempt_disable();
3826 arch_spin_lock(&trace_cmdline_lock);
3827
939c7a4f 3828 v = &savedcmd->map_cmdline_to_pid[0];
42584c81
YY
3829 while (l <= *pos) {
3830 v = saved_cmdlines_next(m, v, &l);
3831 if (!v)
3832 return NULL;
69abe6a5
AP
3833 }
3834
42584c81
YY
3835 return v;
3836}
3837
3838static void saved_cmdlines_stop(struct seq_file *m, void *v)
3839{
4c27e756
SRRH
3840 arch_spin_unlock(&trace_cmdline_lock);
3841 preempt_enable();
42584c81 3842}
69abe6a5 3843
42584c81
YY
3844static int saved_cmdlines_show(struct seq_file *m, void *v)
3845{
3846 char buf[TASK_COMM_LEN];
3847 unsigned int *pid = v;
69abe6a5 3848
4c27e756 3849 __trace_find_cmdline(*pid, buf);
42584c81
YY
3850 seq_printf(m, "%d %s\n", *pid, buf);
3851 return 0;
3852}
3853
3854static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3855 .start = saved_cmdlines_start,
3856 .next = saved_cmdlines_next,
3857 .stop = saved_cmdlines_stop,
3858 .show = saved_cmdlines_show,
3859};
3860
3861static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3862{
3863 if (tracing_disabled)
3864 return -ENODEV;
3865
3866 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
69abe6a5
AP
3867}
3868
3869static const struct file_operations tracing_saved_cmdlines_fops = {
42584c81
YY
3870 .open = tracing_saved_cmdlines_open,
3871 .read = seq_read,
3872 .llseek = seq_lseek,
3873 .release = seq_release,
69abe6a5
AP
3874};
3875
939c7a4f
YY
3876static ssize_t
3877tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3878 size_t cnt, loff_t *ppos)
3879{
3880 char buf[64];
3881 int r;
3882
3883 arch_spin_lock(&trace_cmdline_lock);
a6af8fbf 3884 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
939c7a4f
YY
3885 arch_spin_unlock(&trace_cmdline_lock);
3886
3887 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3888}
3889
3890static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3891{
3892 kfree(s->saved_cmdlines);
3893 kfree(s->map_cmdline_to_pid);
3894 kfree(s);
3895}
3896
3897static int tracing_resize_saved_cmdlines(unsigned int val)
3898{
3899 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3900
a6af8fbf 3901 s = kmalloc(sizeof(*s), GFP_KERNEL);
939c7a4f
YY
3902 if (!s)
3903 return -ENOMEM;
3904
3905 if (allocate_cmdlines_buffer(val, s) < 0) {
3906 kfree(s);
3907 return -ENOMEM;
3908 }
3909
3910 arch_spin_lock(&trace_cmdline_lock);
3911 savedcmd_temp = savedcmd;
3912 savedcmd = s;
3913 arch_spin_unlock(&trace_cmdline_lock);
3914 free_saved_cmdlines_buffer(savedcmd_temp);
3915
3916 return 0;
3917}
3918
3919static ssize_t
3920tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3921 size_t cnt, loff_t *ppos)
3922{
3923 unsigned long val;
3924 int ret;
3925
3926 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3927 if (ret)
3928 return ret;
3929
3930 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3931 if (!val || val > PID_MAX_DEFAULT)
3932 return -EINVAL;
3933
3934 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3935 if (ret < 0)
3936 return ret;
3937
3938 *ppos += cnt;
3939
3940 return cnt;
3941}
3942
3943static const struct file_operations tracing_saved_cmdlines_size_fops = {
3944 .open = tracing_open_generic,
3945 .read = tracing_saved_cmdlines_size_read,
3946 .write = tracing_saved_cmdlines_size_write,
3947};
3948
9828413d
SRRH
3949#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3950static union trace_enum_map_item *
3951update_enum_map(union trace_enum_map_item *ptr)
3952{
3953 if (!ptr->map.enum_string) {
3954 if (ptr->tail.next) {
3955 ptr = ptr->tail.next;
3956 /* Set ptr to the next real item (skip head) */
3957 ptr++;
3958 } else
3959 return NULL;
3960 }
3961 return ptr;
3962}
3963
3964static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3965{
3966 union trace_enum_map_item *ptr = v;
3967
3968 /*
3969 * Paranoid! If ptr points to end, we don't want to increment past it.
3970 * This really should never happen.
3971 */
3972 ptr = update_enum_map(ptr);
3973 if (WARN_ON_ONCE(!ptr))
3974 return NULL;
3975
3976 ptr++;
3977
3978 (*pos)++;
3979
3980 ptr = update_enum_map(ptr);
3981
3982 return ptr;
3983}
3984
3985static void *enum_map_start(struct seq_file *m, loff_t *pos)
3986{
3987 union trace_enum_map_item *v;
3988 loff_t l = 0;
3989
3990 mutex_lock(&trace_enum_mutex);
3991
3992 v = trace_enum_maps;
3993 if (v)
3994 v++;
3995
3996 while (v && l < *pos) {
3997 v = enum_map_next(m, v, &l);
3998 }
3999
4000 return v;
4001}
4002
4003static void enum_map_stop(struct seq_file *m, void *v)
4004{
4005 mutex_unlock(&trace_enum_mutex);
4006}
4007
4008static int enum_map_show(struct seq_file *m, void *v)
4009{
4010 union trace_enum_map_item *ptr = v;
4011
4012 seq_printf(m, "%s %ld (%s)\n",
4013 ptr->map.enum_string, ptr->map.enum_value,
4014 ptr->map.system);
4015
4016 return 0;
4017}
4018
4019static const struct seq_operations tracing_enum_map_seq_ops = {
4020 .start = enum_map_start,
4021 .next = enum_map_next,
4022 .stop = enum_map_stop,
4023 .show = enum_map_show,
4024};
4025
4026static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4027{
4028 if (tracing_disabled)
4029 return -ENODEV;
4030
4031 return seq_open(filp, &tracing_enum_map_seq_ops);
4032}
4033
4034static const struct file_operations tracing_enum_map_fops = {
4035 .open = tracing_enum_map_open,
4036 .read = seq_read,
4037 .llseek = seq_lseek,
4038 .release = seq_release,
4039};
4040
4041static inline union trace_enum_map_item *
4042trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4043{
4044 /* Return tail of array given the head */
4045 return ptr + ptr->head.length + 1;
4046}
4047
4048static void
4049trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4050 int len)
4051{
4052 struct trace_enum_map **stop;
4053 struct trace_enum_map **map;
4054 union trace_enum_map_item *map_array;
4055 union trace_enum_map_item *ptr;
4056
4057 stop = start + len;
4058
4059 /*
4060 * The trace_enum_maps contains the map plus a head and tail item,
4061 * where the head holds the module and length of array, and the
4062 * tail holds a pointer to the next list.
4063 */
4064 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4065 if (!map_array) {
4066 pr_warning("Unable to allocate trace enum mapping\n");
4067 return;
4068 }
4069
4070 mutex_lock(&trace_enum_mutex);
4071
4072 if (!trace_enum_maps)
4073 trace_enum_maps = map_array;
4074 else {
4075 ptr = trace_enum_maps;
4076 for (;;) {
4077 ptr = trace_enum_jmp_to_tail(ptr);
4078 if (!ptr->tail.next)
4079 break;
4080 ptr = ptr->tail.next;
4081
4082 }
4083 ptr->tail.next = map_array;
4084 }
4085 map_array->head.mod = mod;
4086 map_array->head.length = len;
4087 map_array++;
4088
4089 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4090 map_array->map = **map;
4091 map_array++;
4092 }
4093 memset(map_array, 0, sizeof(*map_array));
4094
4095 mutex_unlock(&trace_enum_mutex);
4096}
4097
4098static void trace_create_enum_file(struct dentry *d_tracer)
4099{
4100 trace_create_file("enum_map", 0444, d_tracer,
4101 NULL, &tracing_enum_map_fops);
4102}
4103
4104#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4105static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4106static inline void trace_insert_enum_map_file(struct module *mod,
4107 struct trace_enum_map **start, int len) { }
4108#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4109
4110static void trace_insert_enum_map(struct module *mod,
4111 struct trace_enum_map **start, int len)
0c564a53
SRRH
4112{
4113 struct trace_enum_map **map;
0c564a53
SRRH
4114
4115 if (len <= 0)
4116 return;
4117
4118 map = start;
4119
4120 trace_event_enum_update(map, len);
9828413d
SRRH
4121
4122 trace_insert_enum_map_file(mod, start, len);
0c564a53
SRRH
4123}
4124
bc0c38d1
SR
4125static ssize_t
4126tracing_set_trace_read(struct file *filp, char __user *ubuf,
4127 size_t cnt, loff_t *ppos)
4128{
2b6080f2 4129 struct trace_array *tr = filp->private_data;
ee6c2c1b 4130 char buf[MAX_TRACER_SIZE+2];
bc0c38d1
SR
4131 int r;
4132
4133 mutex_lock(&trace_types_lock);
2b6080f2 4134 r = sprintf(buf, "%s\n", tr->current_trace->name);
bc0c38d1
SR
4135 mutex_unlock(&trace_types_lock);
4136
4bf39a94 4137 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4138}
4139
b6f11df2
ACM
4140int tracer_init(struct tracer *t, struct trace_array *tr)
4141{
12883efb 4142 tracing_reset_online_cpus(&tr->trace_buffer);
b6f11df2
ACM
4143 return t->init(tr);
4144}
4145
12883efb 4146static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
438ced17
VN
4147{
4148 int cpu;
737223fb 4149
438ced17 4150 for_each_tracing_cpu(cpu)
12883efb 4151 per_cpu_ptr(buf->data, cpu)->entries = val;
438ced17
VN
4152}
4153
12883efb 4154#ifdef CONFIG_TRACER_MAX_TRACE
d60da506 4155/* resize @tr's buffer to the size of @size_tr's entries */
12883efb
SRRH
4156static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4157 struct trace_buffer *size_buf, int cpu_id)
d60da506
HT
4158{
4159 int cpu, ret = 0;
4160
4161 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4162 for_each_tracing_cpu(cpu) {
12883efb
SRRH
4163 ret = ring_buffer_resize(trace_buf->buffer,
4164 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
d60da506
HT
4165 if (ret < 0)
4166 break;
12883efb
SRRH
4167 per_cpu_ptr(trace_buf->data, cpu)->entries =
4168 per_cpu_ptr(size_buf->data, cpu)->entries;
d60da506
HT
4169 }
4170 } else {
12883efb
SRRH
4171 ret = ring_buffer_resize(trace_buf->buffer,
4172 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
d60da506 4173 if (ret == 0)
12883efb
SRRH
4174 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4175 per_cpu_ptr(size_buf->data, cpu_id)->entries;
d60da506
HT
4176 }
4177
4178 return ret;
4179}
12883efb 4180#endif /* CONFIG_TRACER_MAX_TRACE */
d60da506 4181
2b6080f2
SR
4182static int __tracing_resize_ring_buffer(struct trace_array *tr,
4183 unsigned long size, int cpu)
73c5162a
SR
4184{
4185 int ret;
4186
4187 /*
4188 * If kernel or user changes the size of the ring buffer
a123c52b
SR
4189 * we use the size that was given, and we can forget about
4190 * expanding it later.
73c5162a 4191 */
55034cd6 4192 ring_buffer_expanded = true;
73c5162a 4193
b382ede6 4194 /* May be called before buffers are initialized */
12883efb 4195 if (!tr->trace_buffer.buffer)
b382ede6
SR
4196 return 0;
4197
12883efb 4198 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
73c5162a
SR
4199 if (ret < 0)
4200 return ret;
4201
12883efb 4202#ifdef CONFIG_TRACER_MAX_TRACE
2b6080f2
SR
4203 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4204 !tr->current_trace->use_max_tr)
ef710e10
KM
4205 goto out;
4206
12883efb 4207 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
73c5162a 4208 if (ret < 0) {
12883efb
SRRH
4209 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4210 &tr->trace_buffer, cpu);
73c5162a 4211 if (r < 0) {
a123c52b
SR
4212 /*
4213 * AARGH! We are left with different
4214 * size max buffer!!!!
4215 * The max buffer is our "snapshot" buffer.
4216 * When a tracer needs a snapshot (one of the
4217 * latency tracers), it swaps the max buffer
4218 * with the saved snap shot. We succeeded to
4219 * update the size of the main buffer, but failed to
4220 * update the size of the max buffer. But when we tried
4221 * to reset the main buffer to the original size, we
4222 * failed there too. This is very unlikely to
4223 * happen, but if it does, warn and kill all
4224 * tracing.
4225 */
73c5162a
SR
4226 WARN_ON(1);
4227 tracing_disabled = 1;
4228 }
4229 return ret;
4230 }
4231
438ced17 4232 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4233 set_buffer_entries(&tr->max_buffer, size);
438ced17 4234 else
12883efb 4235 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
438ced17 4236
ef710e10 4237 out:
12883efb
SRRH
4238#endif /* CONFIG_TRACER_MAX_TRACE */
4239
438ced17 4240 if (cpu == RING_BUFFER_ALL_CPUS)
12883efb 4241 set_buffer_entries(&tr->trace_buffer, size);
438ced17 4242 else
12883efb 4243 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
73c5162a
SR
4244
4245 return ret;
4246}
4247
2b6080f2
SR
4248static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4249 unsigned long size, int cpu_id)
4f271a2a 4250{
83f40318 4251 int ret = size;
4f271a2a
VN
4252
4253 mutex_lock(&trace_types_lock);
4254
438ced17
VN
4255 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4256 /* make sure, this cpu is enabled in the mask */
4257 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4258 ret = -EINVAL;
4259 goto out;
4260 }
4261 }
4f271a2a 4262
2b6080f2 4263 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
4f271a2a
VN
4264 if (ret < 0)
4265 ret = -ENOMEM;
4266
438ced17 4267out:
4f271a2a
VN
4268 mutex_unlock(&trace_types_lock);
4269
4270 return ret;
4271}
4272
ef710e10 4273
1852fcce
SR
4274/**
4275 * tracing_update_buffers - used by tracing facility to expand ring buffers
4276 *
4277 * To save on memory when the tracing is never used on a system with it
4278 * configured in. The ring buffers are set to a minimum size. But once
4279 * a user starts to use the tracing facility, then they need to grow
4280 * to their default size.
4281 *
4282 * This function is to be called when a tracer is about to be used.
4283 */
4284int tracing_update_buffers(void)
4285{
4286 int ret = 0;
4287
1027fcb2 4288 mutex_lock(&trace_types_lock);
1852fcce 4289 if (!ring_buffer_expanded)
2b6080f2 4290 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
438ced17 4291 RING_BUFFER_ALL_CPUS);
1027fcb2 4292 mutex_unlock(&trace_types_lock);
1852fcce
SR
4293
4294 return ret;
4295}
4296
577b785f
SR
4297struct trace_option_dentry;
4298
4299static struct trace_option_dentry *
2b6080f2 4300create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
577b785f
SR
4301
4302static void
4303destroy_trace_option_files(struct trace_option_dentry *topts);
4304
6b450d25
SRRH
4305/*
4306 * Used to clear out the tracer before deletion of an instance.
4307 * Must have trace_types_lock held.
4308 */
4309static void tracing_set_nop(struct trace_array *tr)
4310{
4311 if (tr->current_trace == &nop_trace)
4312 return;
4313
50512ab5 4314 tr->current_trace->enabled--;
6b450d25
SRRH
4315
4316 if (tr->current_trace->reset)
4317 tr->current_trace->reset(tr);
4318
4319 tr->current_trace = &nop_trace;
4320}
4321
09d23a1d 4322static void update_tracer_options(struct trace_array *tr, struct tracer *t)
bc0c38d1 4323{
577b785f 4324 static struct trace_option_dentry *topts;
09d23a1d
SRRH
4325
4326 /* Only enable if the directory has been created already. */
4327 if (!tr->dir)
4328 return;
4329
4330 /* Currently, only the top instance has options */
4331 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4332 return;
4333
4334 destroy_trace_option_files(topts);
4335 topts = create_trace_option_files(tr, t);
4336}
4337
4338static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4339{
bc0c38d1 4340 struct tracer *t;
12883efb 4341#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4342 bool had_max_tr;
12883efb 4343#endif
d9e54076 4344 int ret = 0;
bc0c38d1 4345
1027fcb2
SR
4346 mutex_lock(&trace_types_lock);
4347
73c5162a 4348 if (!ring_buffer_expanded) {
2b6080f2 4349 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
438ced17 4350 RING_BUFFER_ALL_CPUS);
73c5162a 4351 if (ret < 0)
59f586db 4352 goto out;
73c5162a
SR
4353 ret = 0;
4354 }
4355
bc0c38d1
SR
4356 for (t = trace_types; t; t = t->next) {
4357 if (strcmp(t->name, buf) == 0)
4358 break;
4359 }
c2931e05
FW
4360 if (!t) {
4361 ret = -EINVAL;
4362 goto out;
4363 }
2b6080f2 4364 if (t == tr->current_trace)
bc0c38d1
SR
4365 goto out;
4366
607e2ea1
SRRH
4367 /* Some tracers are only allowed for the top level buffer */
4368 if (!trace_ok_for_array(t, tr)) {
4369 ret = -EINVAL;
4370 goto out;
4371 }
4372
cf6ab6d9
SRRH
4373 /* If trace pipe files are being read, we can't change the tracer */
4374 if (tr->current_trace->ref) {
4375 ret = -EBUSY;
4376 goto out;
4377 }
4378
9f029e83 4379 trace_branch_disable();
613f04a0 4380
50512ab5 4381 tr->current_trace->enabled--;
613f04a0 4382
2b6080f2
SR
4383 if (tr->current_trace->reset)
4384 tr->current_trace->reset(tr);
34600f0e 4385
12883efb 4386 /* Current trace needs to be nop_trace before synchronize_sched */
2b6080f2 4387 tr->current_trace = &nop_trace;
34600f0e 4388
45ad21ca
SRRH
4389#ifdef CONFIG_TRACER_MAX_TRACE
4390 had_max_tr = tr->allocated_snapshot;
34600f0e
SR
4391
4392 if (had_max_tr && !t->use_max_tr) {
4393 /*
4394 * We need to make sure that the update_max_tr sees that
4395 * current_trace changed to nop_trace to keep it from
4396 * swapping the buffers after we resize it.
4397 * The update_max_tr is called from interrupts disabled
4398 * so a synchronized_sched() is sufficient.
4399 */
4400 synchronize_sched();
3209cff4 4401 free_snapshot(tr);
ef710e10 4402 }
12883efb 4403#endif
09d23a1d 4404 update_tracer_options(tr, t);
12883efb
SRRH
4405
4406#ifdef CONFIG_TRACER_MAX_TRACE
34600f0e 4407 if (t->use_max_tr && !had_max_tr) {
3209cff4 4408 ret = alloc_snapshot(tr);
d60da506
HT
4409 if (ret < 0)
4410 goto out;
ef710e10 4411 }
12883efb 4412#endif
577b785f 4413
1c80025a 4414 if (t->init) {
b6f11df2 4415 ret = tracer_init(t, tr);
1c80025a
FW
4416 if (ret)
4417 goto out;
4418 }
bc0c38d1 4419
2b6080f2 4420 tr->current_trace = t;
50512ab5 4421 tr->current_trace->enabled++;
9f029e83 4422 trace_branch_enable(tr);
bc0c38d1
SR
4423 out:
4424 mutex_unlock(&trace_types_lock);
4425
d9e54076
PZ
4426 return ret;
4427}
4428
4429static ssize_t
4430tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4431 size_t cnt, loff_t *ppos)
4432{
607e2ea1 4433 struct trace_array *tr = filp->private_data;
ee6c2c1b 4434 char buf[MAX_TRACER_SIZE+1];
d9e54076
PZ
4435 int i;
4436 size_t ret;
e6e7a65a
FW
4437 int err;
4438
4439 ret = cnt;
d9e54076 4440
ee6c2c1b
LZ
4441 if (cnt > MAX_TRACER_SIZE)
4442 cnt = MAX_TRACER_SIZE;
d9e54076
PZ
4443
4444 if (copy_from_user(&buf, ubuf, cnt))
4445 return -EFAULT;
4446
4447 buf[cnt] = 0;
4448
4449 /* strip ending whitespace. */
4450 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4451 buf[i] = 0;
4452
607e2ea1 4453 err = tracing_set_tracer(tr, buf);
e6e7a65a
FW
4454 if (err)
4455 return err;
d9e54076 4456
cf8517cf 4457 *ppos += ret;
bc0c38d1 4458
c2931e05 4459 return ret;
bc0c38d1
SR
4460}
4461
4462static ssize_t
6508fa76
SF
4463tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4464 size_t cnt, loff_t *ppos)
bc0c38d1 4465{
bc0c38d1
SR
4466 char buf[64];
4467 int r;
4468
cffae437 4469 r = snprintf(buf, sizeof(buf), "%ld\n",
bc0c38d1 4470 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
cffae437
SR
4471 if (r > sizeof(buf))
4472 r = sizeof(buf);
4bf39a94 4473 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
bc0c38d1
SR
4474}
4475
4476static ssize_t
6508fa76
SF
4477tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4478 size_t cnt, loff_t *ppos)
bc0c38d1 4479{
5e39841c 4480 unsigned long val;
c6caeeb1 4481 int ret;
bc0c38d1 4482
22fe9b54
PH
4483 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4484 if (ret)
c6caeeb1 4485 return ret;
bc0c38d1
SR
4486
4487 *ptr = val * 1000;
4488
4489 return cnt;
4490}
4491
6508fa76
SF
4492static ssize_t
4493tracing_thresh_read(struct file *filp, char __user *ubuf,
4494 size_t cnt, loff_t *ppos)
4495{
4496 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4497}
4498
4499static ssize_t
4500tracing_thresh_write(struct file *filp, const char __user *ubuf,
4501 size_t cnt, loff_t *ppos)
4502{
4503 struct trace_array *tr = filp->private_data;
4504 int ret;
4505
4506 mutex_lock(&trace_types_lock);
4507 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4508 if (ret < 0)
4509 goto out;
4510
4511 if (tr->current_trace->update_thresh) {
4512 ret = tr->current_trace->update_thresh(tr);
4513 if (ret < 0)
4514 goto out;
4515 }
4516
4517 ret = cnt;
4518out:
4519 mutex_unlock(&trace_types_lock);
4520
4521 return ret;
4522}
4523
4524static ssize_t
4525tracing_max_lat_read(struct file *filp, char __user *ubuf,
4526 size_t cnt, loff_t *ppos)
4527{
4528 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4529}
4530
4531static ssize_t
4532tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4533 size_t cnt, loff_t *ppos)
4534{
4535 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4536}
4537
b3806b43
SR
4538static int tracing_open_pipe(struct inode *inode, struct file *filp)
4539{
15544209 4540 struct trace_array *tr = inode->i_private;
b3806b43 4541 struct trace_iterator *iter;
b04cc6b1 4542 int ret = 0;
b3806b43
SR
4543
4544 if (tracing_disabled)
4545 return -ENODEV;
4546
7b85af63
SRRH
4547 if (trace_array_get(tr) < 0)
4548 return -ENODEV;
4549
b04cc6b1
FW
4550 mutex_lock(&trace_types_lock);
4551
b3806b43
SR
4552 /* create a buffer to store the information to pass to userspace */
4553 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
b04cc6b1
FW
4554 if (!iter) {
4555 ret = -ENOMEM;
f77d09a3 4556 __trace_array_put(tr);
b04cc6b1
FW
4557 goto out;
4558 }
b3806b43 4559
3a161d99 4560 trace_seq_init(&iter->seq);
d716ff71 4561 iter->trace = tr->current_trace;
d7350c3f 4562
4462344e 4563 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
b04cc6b1 4564 ret = -ENOMEM;
d7350c3f 4565 goto fail;
4462344e
RR
4566 }
4567
a309720c 4568 /* trace pipe does not show start of buffer */
4462344e 4569 cpumask_setall(iter->started);
a309720c 4570
112f38a7
SR
4571 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4572 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4573
8be0709f 4574 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
58e8eedf 4575 if (trace_clocks[tr->clock_id].in_ns)
8be0709f
DS
4576 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4577
15544209
ON
4578 iter->tr = tr;
4579 iter->trace_buffer = &tr->trace_buffer;
4580 iter->cpu_file = tracing_get_cpu(inode);
d7350c3f 4581 mutex_init(&iter->mutex);
b3806b43
SR
4582 filp->private_data = iter;
4583
107bad8b
SR
4584 if (iter->trace->pipe_open)
4585 iter->trace->pipe_open(iter);
107bad8b 4586
b444786f 4587 nonseekable_open(inode, filp);
cf6ab6d9
SRRH
4588
4589 tr->current_trace->ref++;
b04cc6b1
FW
4590out:
4591 mutex_unlock(&trace_types_lock);
4592 return ret;
d7350c3f
FW
4593
4594fail:
4595 kfree(iter->trace);
4596 kfree(iter);
7b85af63 4597 __trace_array_put(tr);
d7350c3f
FW
4598 mutex_unlock(&trace_types_lock);
4599 return ret;
b3806b43
SR
4600}
4601
4602static int tracing_release_pipe(struct inode *inode, struct file *file)
4603{
4604 struct trace_iterator *iter = file->private_data;
15544209 4605 struct trace_array *tr = inode->i_private;
b3806b43 4606
b04cc6b1
FW
4607 mutex_lock(&trace_types_lock);
4608
cf6ab6d9
SRRH
4609 tr->current_trace->ref--;
4610
29bf4a5e 4611 if (iter->trace->pipe_close)
c521efd1
SR
4612 iter->trace->pipe_close(iter);
4613
b04cc6b1
FW
4614 mutex_unlock(&trace_types_lock);
4615
4462344e 4616 free_cpumask_var(iter->started);
d7350c3f 4617 mutex_destroy(&iter->mutex);
b3806b43 4618 kfree(iter);
b3806b43 4619
7b85af63
SRRH
4620 trace_array_put(tr);
4621
b3806b43
SR
4622 return 0;
4623}
4624
2a2cc8f7 4625static unsigned int
cc60cdc9 4626trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
2a2cc8f7 4627{
15693458
SRRH
4628 /* Iterators are static, they should be filled or empty */
4629 if (trace_buffer_iter(iter, iter->cpu_file))
4630 return POLLIN | POLLRDNORM;
2a2cc8f7 4631
15693458 4632 if (trace_flags & TRACE_ITER_BLOCK)
2a2cc8f7
SSP
4633 /*
4634 * Always select as readable when in blocking mode
4635 */
4636 return POLLIN | POLLRDNORM;
15693458 4637 else
12883efb 4638 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
15693458 4639 filp, poll_table);
2a2cc8f7 4640}
2a2cc8f7 4641
cc60cdc9
SR
4642static unsigned int
4643tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4644{
4645 struct trace_iterator *iter = filp->private_data;
4646
4647 return trace_poll(iter, filp, poll_table);
2a2cc8f7
SSP
4648}
4649
d716ff71 4650/* Must be called with iter->mutex held. */
ff98781b 4651static int tracing_wait_pipe(struct file *filp)
b3806b43
SR
4652{
4653 struct trace_iterator *iter = filp->private_data;
8b8b3683 4654 int ret;
b3806b43 4655
b3806b43 4656 while (trace_empty(iter)) {
2dc8f095 4657
107bad8b 4658 if ((filp->f_flags & O_NONBLOCK)) {
ff98781b 4659 return -EAGAIN;
107bad8b 4660 }
2dc8f095 4661
b3806b43 4662 /*
250bfd3d 4663 * We block until we read something and tracing is disabled.
b3806b43
SR
4664 * We still block if tracing is disabled, but we have never
4665 * read anything. This allows a user to cat this file, and
4666 * then enable tracing. But after we have read something,
4667 * we give an EOF when tracing is again disabled.
4668 *
4669 * iter->pos will be 0 if we haven't read anything.
4670 */
10246fa3 4671 if (!tracing_is_on() && iter->pos)
b3806b43 4672 break;
f4874261
SRRH
4673
4674 mutex_unlock(&iter->mutex);
4675
e30f53aa 4676 ret = wait_on_pipe(iter, false);
f4874261
SRRH
4677
4678 mutex_lock(&iter->mutex);
4679
8b8b3683
SRRH
4680 if (ret)
4681 return ret;
b3806b43
SR
4682 }
4683
ff98781b
EGM
4684 return 1;
4685}
4686
4687/*
4688 * Consumer reader.
4689 */
4690static ssize_t
4691tracing_read_pipe(struct file *filp, char __user *ubuf,
4692 size_t cnt, loff_t *ppos)
4693{
4694 struct trace_iterator *iter = filp->private_data;
4695 ssize_t sret;
4696
4697 /* return any leftover data */
4698 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4699 if (sret != -EBUSY)
4700 return sret;
4701
f9520750 4702 trace_seq_init(&iter->seq);
ff98781b 4703
d7350c3f
FW
4704 /*
4705 * Avoid more than one consumer on a single file descriptor
4706 * This is just a matter of traces coherency, the ring buffer itself
4707 * is protected.
4708 */
4709 mutex_lock(&iter->mutex);
ff98781b
EGM
4710 if (iter->trace->read) {
4711 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4712 if (sret)
4713 goto out;
4714 }
4715
4716waitagain:
4717 sret = tracing_wait_pipe(filp);
4718 if (sret <= 0)
4719 goto out;
4720
b3806b43 4721 /* stop when tracing is finished */
ff98781b
EGM
4722 if (trace_empty(iter)) {
4723 sret = 0;
107bad8b 4724 goto out;
ff98781b 4725 }
b3806b43
SR
4726
4727 if (cnt >= PAGE_SIZE)
4728 cnt = PAGE_SIZE - 1;
4729
53d0aa77 4730 /* reset all but tr, trace, and overruns */
53d0aa77
SR
4731 memset(&iter->seq, 0,
4732 sizeof(struct trace_iterator) -
4733 offsetof(struct trace_iterator, seq));
ed5467da 4734 cpumask_clear(iter->started);
4823ed7e 4735 iter->pos = -1;
b3806b43 4736
4f535968 4737 trace_event_read_lock();
7e53bd42 4738 trace_access_lock(iter->cpu_file);
955b61e5 4739 while (trace_find_next_entry_inc(iter) != NULL) {
2c4f035f 4740 enum print_line_t ret;
5ac48378 4741 int save_len = iter->seq.seq.len;
088b1e42 4742
f9896bf3 4743 ret = print_trace_line(iter);
2c4f035f 4744 if (ret == TRACE_TYPE_PARTIAL_LINE) {
088b1e42 4745 /* don't print partial lines */
5ac48378 4746 iter->seq.seq.len = save_len;
b3806b43 4747 break;
088b1e42 4748 }
b91facc3
FW
4749 if (ret != TRACE_TYPE_NO_CONSUME)
4750 trace_consume(iter);
b3806b43 4751
5ac48378 4752 if (trace_seq_used(&iter->seq) >= cnt)
b3806b43 4753 break;
ee5e51f5
JO
4754
4755 /*
4756 * Setting the full flag means we reached the trace_seq buffer
4757 * size and we should leave by partial output condition above.
4758 * One of the trace_seq_* functions is not used properly.
4759 */
4760 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4761 iter->ent->type);
b3806b43 4762 }
7e53bd42 4763 trace_access_unlock(iter->cpu_file);
4f535968 4764 trace_event_read_unlock();
b3806b43 4765
b3806b43 4766 /* Now copy what we have to the user */
6c6c2796 4767 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5ac48378 4768 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
f9520750 4769 trace_seq_init(&iter->seq);
9ff4b974
PP
4770
4771 /*
25985edc 4772 * If there was nothing to send to user, in spite of consuming trace
9ff4b974
PP
4773 * entries, go back to wait for more entries.
4774 */
6c6c2796 4775 if (sret == -EBUSY)
9ff4b974 4776 goto waitagain;
b3806b43 4777
107bad8b 4778out:
d7350c3f 4779 mutex_unlock(&iter->mutex);
107bad8b 4780
6c6c2796 4781 return sret;
b3806b43
SR
4782}
4783
3c56819b
EGM
4784static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4785 unsigned int idx)
4786{
4787 __free_page(spd->pages[idx]);
4788}
4789
28dfef8f 4790static const struct pipe_buf_operations tracing_pipe_buf_ops = {
34cd4998 4791 .can_merge = 0,
34cd4998 4792 .confirm = generic_pipe_buf_confirm,
92fdd98c 4793 .release = generic_pipe_buf_release,
34cd4998
SR
4794 .steal = generic_pipe_buf_steal,
4795 .get = generic_pipe_buf_get,
3c56819b
EGM
4796};
4797
34cd4998 4798static size_t
fa7c7f6e 4799tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
34cd4998
SR
4800{
4801 size_t count;
74f06bb7 4802 int save_len;
34cd4998
SR
4803 int ret;
4804
4805 /* Seq buffer is page-sized, exactly what we need. */
4806 for (;;) {
74f06bb7 4807 save_len = iter->seq.seq.len;
34cd4998 4808 ret = print_trace_line(iter);
74f06bb7
SRRH
4809
4810 if (trace_seq_has_overflowed(&iter->seq)) {
4811 iter->seq.seq.len = save_len;
34cd4998
SR
4812 break;
4813 }
74f06bb7
SRRH
4814
4815 /*
4816 * This should not be hit, because it should only
4817 * be set if the iter->seq overflowed. But check it
4818 * anyway to be safe.
4819 */
34cd4998 4820 if (ret == TRACE_TYPE_PARTIAL_LINE) {
74f06bb7
SRRH
4821 iter->seq.seq.len = save_len;
4822 break;
4823 }
4824
5ac48378 4825 count = trace_seq_used(&iter->seq) - save_len;
74f06bb7
SRRH
4826 if (rem < count) {
4827 rem = 0;
4828 iter->seq.seq.len = save_len;
34cd4998
SR
4829 break;
4830 }
4831
74e7ff8c
LJ
4832 if (ret != TRACE_TYPE_NO_CONSUME)
4833 trace_consume(iter);
34cd4998 4834 rem -= count;
955b61e5 4835 if (!trace_find_next_entry_inc(iter)) {
34cd4998
SR
4836 rem = 0;
4837 iter->ent = NULL;
4838 break;
4839 }
4840 }
4841
4842 return rem;
4843}
4844
3c56819b
EGM
4845static ssize_t tracing_splice_read_pipe(struct file *filp,
4846 loff_t *ppos,
4847 struct pipe_inode_info *pipe,
4848 size_t len,
4849 unsigned int flags)
4850{
35f3d14d
JA
4851 struct page *pages_def[PIPE_DEF_BUFFERS];
4852 struct partial_page partial_def[PIPE_DEF_BUFFERS];
3c56819b
EGM
4853 struct trace_iterator *iter = filp->private_data;
4854 struct splice_pipe_desc spd = {
35f3d14d
JA
4855 .pages = pages_def,
4856 .partial = partial_def,
34cd4998 4857 .nr_pages = 0, /* This gets updated below. */
047fe360 4858 .nr_pages_max = PIPE_DEF_BUFFERS,
34cd4998
SR
4859 .flags = flags,
4860 .ops = &tracing_pipe_buf_ops,
4861 .spd_release = tracing_spd_release_pipe,
3c56819b
EGM
4862 };
4863 ssize_t ret;
34cd4998 4864 size_t rem;
3c56819b
EGM
4865 unsigned int i;
4866
35f3d14d
JA
4867 if (splice_grow_spd(pipe, &spd))
4868 return -ENOMEM;
4869
d7350c3f 4870 mutex_lock(&iter->mutex);
3c56819b
EGM
4871
4872 if (iter->trace->splice_read) {
4873 ret = iter->trace->splice_read(iter, filp,
4874 ppos, pipe, len, flags);
4875 if (ret)
34cd4998 4876 goto out_err;
3c56819b
EGM
4877 }
4878
4879 ret = tracing_wait_pipe(filp);
4880 if (ret <= 0)
34cd4998 4881 goto out_err;
3c56819b 4882
955b61e5 4883 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3c56819b 4884 ret = -EFAULT;
34cd4998 4885 goto out_err;
3c56819b
EGM
4886 }
4887
4f535968 4888 trace_event_read_lock();
7e53bd42 4889 trace_access_lock(iter->cpu_file);
4f535968 4890
3c56819b 4891 /* Fill as many pages as possible. */
a786c06d 4892 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
35f3d14d
JA
4893 spd.pages[i] = alloc_page(GFP_KERNEL);
4894 if (!spd.pages[i])
34cd4998 4895 break;
3c56819b 4896
fa7c7f6e 4897 rem = tracing_fill_pipe_page(rem, iter);
3c56819b
EGM
4898
4899 /* Copy the data into the page, so we can start over. */
4900 ret = trace_seq_to_buffer(&iter->seq,
35f3d14d 4901 page_address(spd.pages[i]),
5ac48378 4902 trace_seq_used(&iter->seq));
3c56819b 4903 if (ret < 0) {
35f3d14d 4904 __free_page(spd.pages[i]);
3c56819b
EGM
4905 break;
4906 }
35f3d14d 4907 spd.partial[i].offset = 0;
5ac48378 4908 spd.partial[i].len = trace_seq_used(&iter->seq);
3c56819b 4909
f9520750 4910 trace_seq_init(&iter->seq);
3c56819b
EGM
4911 }
4912
7e53bd42 4913 trace_access_unlock(iter->cpu_file);
4f535968 4914 trace_event_read_unlock();
d7350c3f 4915 mutex_unlock(&iter->mutex);
3c56819b
EGM
4916
4917 spd.nr_pages = i;
4918
35f3d14d
JA
4919 ret = splice_to_pipe(pipe, &spd);
4920out:
047fe360 4921 splice_shrink_spd(&spd);
35f3d14d 4922 return ret;
3c56819b 4923
34cd4998 4924out_err:
d7350c3f 4925 mutex_unlock(&iter->mutex);
35f3d14d 4926 goto out;
3c56819b
EGM
4927}
4928
a98a3c3f
SR
4929static ssize_t
4930tracing_entries_read(struct file *filp, char __user *ubuf,
4931 size_t cnt, loff_t *ppos)
4932{
0bc392ee
ON
4933 struct inode *inode = file_inode(filp);
4934 struct trace_array *tr = inode->i_private;
4935 int cpu = tracing_get_cpu(inode);
438ced17
VN
4936 char buf[64];
4937 int r = 0;
4938 ssize_t ret;
a98a3c3f 4939
db526ca3 4940 mutex_lock(&trace_types_lock);
438ced17 4941
0bc392ee 4942 if (cpu == RING_BUFFER_ALL_CPUS) {
438ced17
VN
4943 int cpu, buf_size_same;
4944 unsigned long size;
4945
4946 size = 0;
4947 buf_size_same = 1;
4948 /* check if all cpu sizes are same */
4949 for_each_tracing_cpu(cpu) {
4950 /* fill in the size from first enabled cpu */
4951 if (size == 0)
12883efb
SRRH
4952 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4953 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
438ced17
VN
4954 buf_size_same = 0;
4955 break;
4956 }
4957 }
4958
4959 if (buf_size_same) {
4960 if (!ring_buffer_expanded)
4961 r = sprintf(buf, "%lu (expanded: %lu)\n",
4962 size >> 10,
4963 trace_buf_size >> 10);
4964 else
4965 r = sprintf(buf, "%lu\n", size >> 10);
4966 } else
4967 r = sprintf(buf, "X\n");
4968 } else
0bc392ee 4969 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
438ced17 4970
db526ca3
SR
4971 mutex_unlock(&trace_types_lock);
4972
438ced17
VN
4973 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4974 return ret;
a98a3c3f
SR
4975}
4976
4977static ssize_t
4978tracing_entries_write(struct file *filp, const char __user *ubuf,
4979 size_t cnt, loff_t *ppos)
4980{
0bc392ee
ON
4981 struct inode *inode = file_inode(filp);
4982 struct trace_array *tr = inode->i_private;
a98a3c3f 4983 unsigned long val;
4f271a2a 4984 int ret;
a98a3c3f 4985
22fe9b54
PH
4986 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4987 if (ret)
c6caeeb1 4988 return ret;
a98a3c3f
SR
4989
4990 /* must have at least 1 entry */
4991 if (!val)
4992 return -EINVAL;
4993
1696b2b0
SR
4994 /* value is in KB */
4995 val <<= 10;
0bc392ee 4996 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
4f271a2a
VN
4997 if (ret < 0)
4998 return ret;
a98a3c3f 4999
cf8517cf 5000 *ppos += cnt;
a98a3c3f 5001
4f271a2a
VN
5002 return cnt;
5003}
bf5e6519 5004
f81ab074
VN
5005static ssize_t
5006tracing_total_entries_read(struct file *filp, char __user *ubuf,
5007 size_t cnt, loff_t *ppos)
5008{
5009 struct trace_array *tr = filp->private_data;
5010 char buf[64];
5011 int r, cpu;
5012 unsigned long size = 0, expanded_size = 0;
5013
5014 mutex_lock(&trace_types_lock);
5015 for_each_tracing_cpu(cpu) {
12883efb 5016 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
f81ab074
VN
5017 if (!ring_buffer_expanded)
5018 expanded_size += trace_buf_size >> 10;
5019 }
5020 if (ring_buffer_expanded)
5021 r = sprintf(buf, "%lu\n", size);
5022 else
5023 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5024 mutex_unlock(&trace_types_lock);
5025
5026 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5027}
5028
4f271a2a
VN
5029static ssize_t
5030tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5031 size_t cnt, loff_t *ppos)
5032{
5033 /*
5034 * There is no need to read what the user has written, this function
5035 * is just to make sure that there is no error when "echo" is used
5036 */
5037
5038 *ppos += cnt;
a98a3c3f
SR
5039
5040 return cnt;
5041}
5042
4f271a2a
VN
5043static int
5044tracing_free_buffer_release(struct inode *inode, struct file *filp)
5045{
2b6080f2
SR
5046 struct trace_array *tr = inode->i_private;
5047
cf30cf67
SR
5048 /* disable tracing ? */
5049 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
711e1243 5050 tracer_tracing_off(tr);
4f271a2a 5051 /* resize the ring buffer to 0 */
2b6080f2 5052 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
4f271a2a 5053
7b85af63
SRRH
5054 trace_array_put(tr);
5055
4f271a2a
VN
5056 return 0;
5057}
5058
5bf9a1ee
PP
5059static ssize_t
5060tracing_mark_write(struct file *filp, const char __user *ubuf,
5061 size_t cnt, loff_t *fpos)
5062{
d696b58c 5063 unsigned long addr = (unsigned long)ubuf;
2d71619c 5064 struct trace_array *tr = filp->private_data;
d696b58c
SR
5065 struct ring_buffer_event *event;
5066 struct ring_buffer *buffer;
5067 struct print_entry *entry;
5068 unsigned long irq_flags;
5069 struct page *pages[2];
6edb2a8a 5070 void *map_page[2];
d696b58c
SR
5071 int nr_pages = 1;
5072 ssize_t written;
d696b58c
SR
5073 int offset;
5074 int size;
5075 int len;
5076 int ret;
6edb2a8a 5077 int i;
5bf9a1ee 5078
c76f0694 5079 if (tracing_disabled)
5bf9a1ee
PP
5080 return -EINVAL;
5081
5224c3a3
MSB
5082 if (!(trace_flags & TRACE_ITER_MARKERS))
5083 return -EINVAL;
5084
5bf9a1ee
PP
5085 if (cnt > TRACE_BUF_SIZE)
5086 cnt = TRACE_BUF_SIZE;
5087
d696b58c
SR
5088 /*
5089 * Userspace is injecting traces into the kernel trace buffer.
5090 * We want to be as non intrusive as possible.
5091 * To do so, we do not want to allocate any special buffers
5092 * or take any locks, but instead write the userspace data
5093 * straight into the ring buffer.
5094 *
5095 * First we need to pin the userspace buffer into memory,
5096 * which, most likely it is, because it just referenced it.
5097 * But there's no guarantee that it is. By using get_user_pages_fast()
5098 * and kmap_atomic/kunmap_atomic() we can get access to the
5099 * pages directly. We then write the data directly into the
5100 * ring buffer.
5101 */
5102 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
5bf9a1ee 5103
d696b58c
SR
5104 /* check if we cross pages */
5105 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5106 nr_pages = 2;
5107
5108 offset = addr & (PAGE_SIZE - 1);
5109 addr &= PAGE_MASK;
5110
5111 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5112 if (ret < nr_pages) {
5113 while (--ret >= 0)
5114 put_page(pages[ret]);
5115 written = -EFAULT;
5116 goto out;
5bf9a1ee 5117 }
d696b58c 5118
6edb2a8a
SR
5119 for (i = 0; i < nr_pages; i++)
5120 map_page[i] = kmap_atomic(pages[i]);
d696b58c
SR
5121
5122 local_save_flags(irq_flags);
5123 size = sizeof(*entry) + cnt + 2; /* possible \n added */
2d71619c 5124 buffer = tr->trace_buffer.buffer;
d696b58c
SR
5125 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5126 irq_flags, preempt_count());
5127 if (!event) {
5128 /* Ring buffer disabled, return as if not open for write */
5129 written = -EBADF;
5130 goto out_unlock;
5bf9a1ee 5131 }
d696b58c
SR
5132
5133 entry = ring_buffer_event_data(event);
5134 entry->ip = _THIS_IP_;
5135
5136 if (nr_pages == 2) {
5137 len = PAGE_SIZE - offset;
6edb2a8a
SR
5138 memcpy(&entry->buf, map_page[0] + offset, len);
5139 memcpy(&entry->buf[len], map_page[1], cnt - len);
c13d2f7c 5140 } else
6edb2a8a 5141 memcpy(&entry->buf, map_page[0] + offset, cnt);
5bf9a1ee 5142
d696b58c
SR
5143 if (entry->buf[cnt - 1] != '\n') {
5144 entry->buf[cnt] = '\n';
5145 entry->buf[cnt + 1] = '\0';
5146 } else
5147 entry->buf[cnt] = '\0';
5148
7ffbd48d 5149 __buffer_unlock_commit(buffer, event);
5bf9a1ee 5150
d696b58c 5151 written = cnt;
5bf9a1ee 5152
d696b58c 5153 *fpos += written;
1aa54bca 5154
d696b58c 5155 out_unlock:
7215853e 5156 for (i = nr_pages - 1; i >= 0; i--) {
6edb2a8a
SR
5157 kunmap_atomic(map_page[i]);
5158 put_page(pages[i]);
5159 }
d696b58c 5160 out:
1aa54bca 5161 return written;
5bf9a1ee
PP
5162}
5163
13f16d20 5164static int tracing_clock_show(struct seq_file *m, void *v)
5079f326 5165{
2b6080f2 5166 struct trace_array *tr = m->private;
5079f326
Z
5167 int i;
5168
5169 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
13f16d20 5170 seq_printf(m,
5079f326 5171 "%s%s%s%s", i ? " " : "",
2b6080f2
SR
5172 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5173 i == tr->clock_id ? "]" : "");
13f16d20 5174 seq_putc(m, '\n');
5079f326 5175
13f16d20 5176 return 0;
5079f326
Z
5177}
5178
e1e232ca 5179static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
5079f326 5180{
5079f326
Z
5181 int i;
5182
5079f326
Z
5183 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5184 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5185 break;
5186 }
5187 if (i == ARRAY_SIZE(trace_clocks))
5188 return -EINVAL;
5189
5079f326
Z
5190 mutex_lock(&trace_types_lock);
5191
2b6080f2
SR
5192 tr->clock_id = i;
5193
12883efb 5194 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
5079f326 5195
60303ed3
DS
5196 /*
5197 * New clock may not be consistent with the previous clock.
5198 * Reset the buffer so that it doesn't have incomparable timestamps.
5199 */
9457158b 5200 tracing_reset_online_cpus(&tr->trace_buffer);
12883efb
SRRH
5201
5202#ifdef CONFIG_TRACER_MAX_TRACE
5203 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5204 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
9457158b 5205 tracing_reset_online_cpus(&tr->max_buffer);
12883efb 5206#endif
60303ed3 5207
5079f326
Z
5208 mutex_unlock(&trace_types_lock);
5209
e1e232ca
SR
5210 return 0;
5211}
5212
5213static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5214 size_t cnt, loff_t *fpos)
5215{
5216 struct seq_file *m = filp->private_data;
5217 struct trace_array *tr = m->private;
5218 char buf[64];
5219 const char *clockstr;
5220 int ret;
5221
5222 if (cnt >= sizeof(buf))
5223 return -EINVAL;
5224
5225 if (copy_from_user(&buf, ubuf, cnt))
5226 return -EFAULT;
5227
5228 buf[cnt] = 0;
5229
5230 clockstr = strstrip(buf);
5231
5232 ret = tracing_set_clock(tr, clockstr);
5233 if (ret)
5234 return ret;
5235
5079f326
Z
5236 *fpos += cnt;
5237
5238 return cnt;
5239}
5240
13f16d20
LZ
5241static int tracing_clock_open(struct inode *inode, struct file *file)
5242{
7b85af63
SRRH
5243 struct trace_array *tr = inode->i_private;
5244 int ret;
5245
13f16d20
LZ
5246 if (tracing_disabled)
5247 return -ENODEV;
2b6080f2 5248
7b85af63
SRRH
5249 if (trace_array_get(tr))
5250 return -ENODEV;
5251
5252 ret = single_open(file, tracing_clock_show, inode->i_private);
5253 if (ret < 0)
5254 trace_array_put(tr);
5255
5256 return ret;
13f16d20
LZ
5257}
5258
6de58e62
SRRH
5259struct ftrace_buffer_info {
5260 struct trace_iterator iter;
5261 void *spare;
5262 unsigned int read;
5263};
5264
debdd57f
HT
5265#ifdef CONFIG_TRACER_SNAPSHOT
5266static int tracing_snapshot_open(struct inode *inode, struct file *file)
5267{
6484c71c 5268 struct trace_array *tr = inode->i_private;
debdd57f 5269 struct trace_iterator *iter;
2b6080f2 5270 struct seq_file *m;
debdd57f
HT
5271 int ret = 0;
5272
ff451961
SRRH
5273 if (trace_array_get(tr) < 0)
5274 return -ENODEV;
5275
debdd57f 5276 if (file->f_mode & FMODE_READ) {
6484c71c 5277 iter = __tracing_open(inode, file, true);
debdd57f
HT
5278 if (IS_ERR(iter))
5279 ret = PTR_ERR(iter);
2b6080f2
SR
5280 } else {
5281 /* Writes still need the seq_file to hold the private data */
f77d09a3 5282 ret = -ENOMEM;
2b6080f2
SR
5283 m = kzalloc(sizeof(*m), GFP_KERNEL);
5284 if (!m)
f77d09a3 5285 goto out;
2b6080f2
SR
5286 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5287 if (!iter) {
5288 kfree(m);
f77d09a3 5289 goto out;
2b6080f2 5290 }
f77d09a3
AL
5291 ret = 0;
5292
ff451961 5293 iter->tr = tr;
6484c71c
ON
5294 iter->trace_buffer = &tr->max_buffer;
5295 iter->cpu_file = tracing_get_cpu(inode);
2b6080f2
SR
5296 m->private = iter;
5297 file->private_data = m;
debdd57f 5298 }
f77d09a3 5299out:
ff451961
SRRH
5300 if (ret < 0)
5301 trace_array_put(tr);
5302
debdd57f
HT
5303 return ret;
5304}
5305
5306static ssize_t
5307tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5308 loff_t *ppos)
5309{
2b6080f2
SR
5310 struct seq_file *m = filp->private_data;
5311 struct trace_iterator *iter = m->private;
5312 struct trace_array *tr = iter->tr;
debdd57f
HT
5313 unsigned long val;
5314 int ret;
5315
5316 ret = tracing_update_buffers();
5317 if (ret < 0)
5318 return ret;
5319
5320 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5321 if (ret)
5322 return ret;
5323
5324 mutex_lock(&trace_types_lock);
5325
2b6080f2 5326 if (tr->current_trace->use_max_tr) {
debdd57f
HT
5327 ret = -EBUSY;
5328 goto out;
5329 }
5330
5331 switch (val) {
5332 case 0:
f1affcaa
SRRH
5333 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5334 ret = -EINVAL;
5335 break;
debdd57f 5336 }
3209cff4
SRRH
5337 if (tr->allocated_snapshot)
5338 free_snapshot(tr);
debdd57f
HT
5339 break;
5340 case 1:
f1affcaa
SRRH
5341/* Only allow per-cpu swap if the ring buffer supports it */
5342#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5343 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5344 ret = -EINVAL;
5345 break;
5346 }
5347#endif
45ad21ca 5348 if (!tr->allocated_snapshot) {
3209cff4 5349 ret = alloc_snapshot(tr);
debdd57f
HT
5350 if (ret < 0)
5351 break;
debdd57f 5352 }
debdd57f
HT
5353 local_irq_disable();
5354 /* Now, we're going to swap */
f1affcaa 5355 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
ce9bae55 5356 update_max_tr(tr, current, smp_processor_id());
f1affcaa 5357 else
ce9bae55 5358 update_max_tr_single(tr, current, iter->cpu_file);
debdd57f
HT
5359 local_irq_enable();
5360 break;
5361 default:
45ad21ca 5362 if (tr->allocated_snapshot) {
f1affcaa
SRRH
5363 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5364 tracing_reset_online_cpus(&tr->max_buffer);
5365 else
5366 tracing_reset(&tr->max_buffer, iter->cpu_file);
5367 }
debdd57f
HT
5368 break;
5369 }
5370
5371 if (ret >= 0) {
5372 *ppos += cnt;
5373 ret = cnt;
5374 }
5375out:
5376 mutex_unlock(&trace_types_lock);
5377 return ret;
5378}
2b6080f2
SR
5379
5380static int tracing_snapshot_release(struct inode *inode, struct file *file)
5381{
5382 struct seq_file *m = file->private_data;
ff451961
SRRH
5383 int ret;
5384
5385 ret = tracing_release(inode, file);
2b6080f2
SR
5386
5387 if (file->f_mode & FMODE_READ)
ff451961 5388 return ret;
2b6080f2
SR
5389
5390 /* If write only, the seq_file is just a stub */
5391 if (m)
5392 kfree(m->private);
5393 kfree(m);
5394
5395 return 0;
5396}
5397
6de58e62
SRRH
5398static int tracing_buffers_open(struct inode *inode, struct file *filp);
5399static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5400 size_t count, loff_t *ppos);
5401static int tracing_buffers_release(struct inode *inode, struct file *file);
5402static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5403 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5404
5405static int snapshot_raw_open(struct inode *inode, struct file *filp)
5406{
5407 struct ftrace_buffer_info *info;
5408 int ret;
5409
5410 ret = tracing_buffers_open(inode, filp);
5411 if (ret < 0)
5412 return ret;
5413
5414 info = filp->private_data;
5415
5416 if (info->iter.trace->use_max_tr) {
5417 tracing_buffers_release(inode, filp);
5418 return -EBUSY;
5419 }
5420
5421 info->iter.snapshot = true;
5422 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5423
5424 return ret;
5425}
5426
debdd57f
HT
5427#endif /* CONFIG_TRACER_SNAPSHOT */
5428
5429
6508fa76
SF
5430static const struct file_operations tracing_thresh_fops = {
5431 .open = tracing_open_generic,
5432 .read = tracing_thresh_read,
5433 .write = tracing_thresh_write,
5434 .llseek = generic_file_llseek,
5435};
5436
5e2336a0 5437static const struct file_operations tracing_max_lat_fops = {
4bf39a94
IM
5438 .open = tracing_open_generic,
5439 .read = tracing_max_lat_read,
5440 .write = tracing_max_lat_write,
b444786f 5441 .llseek = generic_file_llseek,
bc0c38d1
SR
5442};
5443
5e2336a0 5444static const struct file_operations set_tracer_fops = {
4bf39a94
IM
5445 .open = tracing_open_generic,
5446 .read = tracing_set_trace_read,
5447 .write = tracing_set_trace_write,
b444786f 5448 .llseek = generic_file_llseek,
bc0c38d1
SR
5449};
5450
5e2336a0 5451static const struct file_operations tracing_pipe_fops = {
4bf39a94 5452 .open = tracing_open_pipe,
2a2cc8f7 5453 .poll = tracing_poll_pipe,
4bf39a94 5454 .read = tracing_read_pipe,
3c56819b 5455 .splice_read = tracing_splice_read_pipe,
4bf39a94 5456 .release = tracing_release_pipe,
b444786f 5457 .llseek = no_llseek,
b3806b43
SR
5458};
5459
5e2336a0 5460static const struct file_operations tracing_entries_fops = {
0bc392ee 5461 .open = tracing_open_generic_tr,
a98a3c3f
SR
5462 .read = tracing_entries_read,
5463 .write = tracing_entries_write,
b444786f 5464 .llseek = generic_file_llseek,
0bc392ee 5465 .release = tracing_release_generic_tr,
a98a3c3f
SR
5466};
5467
f81ab074 5468static const struct file_operations tracing_total_entries_fops = {
7b85af63 5469 .open = tracing_open_generic_tr,
f81ab074
VN
5470 .read = tracing_total_entries_read,
5471 .llseek = generic_file_llseek,
7b85af63 5472 .release = tracing_release_generic_tr,
f81ab074
VN
5473};
5474
4f271a2a 5475static const struct file_operations tracing_free_buffer_fops = {
7b85af63 5476 .open = tracing_open_generic_tr,
4f271a2a
VN
5477 .write = tracing_free_buffer_write,
5478 .release = tracing_free_buffer_release,
5479};
5480
5e2336a0 5481static const struct file_operations tracing_mark_fops = {
7b85af63 5482 .open = tracing_open_generic_tr,
5bf9a1ee 5483 .write = tracing_mark_write,
b444786f 5484 .llseek = generic_file_llseek,
7b85af63 5485 .release = tracing_release_generic_tr,
5bf9a1ee
PP
5486};
5487
5079f326 5488static const struct file_operations trace_clock_fops = {
13f16d20
LZ
5489 .open = tracing_clock_open,
5490 .read = seq_read,
5491 .llseek = seq_lseek,
7b85af63 5492 .release = tracing_single_release_tr,
5079f326
Z
5493 .write = tracing_clock_write,
5494};
5495
debdd57f
HT
5496#ifdef CONFIG_TRACER_SNAPSHOT
5497static const struct file_operations snapshot_fops = {
5498 .open = tracing_snapshot_open,
5499 .read = seq_read,
5500 .write = tracing_snapshot_write,
098c879e 5501 .llseek = tracing_lseek,
2b6080f2 5502 .release = tracing_snapshot_release,
debdd57f 5503};
debdd57f 5504
6de58e62
SRRH
5505static const struct file_operations snapshot_raw_fops = {
5506 .open = snapshot_raw_open,
5507 .read = tracing_buffers_read,
5508 .release = tracing_buffers_release,
5509 .splice_read = tracing_buffers_splice_read,
5510 .llseek = no_llseek,
2cadf913
SR
5511};
5512
6de58e62
SRRH
5513#endif /* CONFIG_TRACER_SNAPSHOT */
5514
2cadf913
SR
5515static int tracing_buffers_open(struct inode *inode, struct file *filp)
5516{
46ef2be0 5517 struct trace_array *tr = inode->i_private;
2cadf913 5518 struct ftrace_buffer_info *info;
7b85af63 5519 int ret;
2cadf913
SR
5520
5521 if (tracing_disabled)
5522 return -ENODEV;
5523
7b85af63
SRRH
5524 if (trace_array_get(tr) < 0)
5525 return -ENODEV;
5526
2cadf913 5527 info = kzalloc(sizeof(*info), GFP_KERNEL);
7b85af63
SRRH
5528 if (!info) {
5529 trace_array_put(tr);
2cadf913 5530 return -ENOMEM;
7b85af63 5531 }
2cadf913 5532
a695cb58
SRRH
5533 mutex_lock(&trace_types_lock);
5534
cc60cdc9 5535 info->iter.tr = tr;
46ef2be0 5536 info->iter.cpu_file = tracing_get_cpu(inode);
b627344f 5537 info->iter.trace = tr->current_trace;
12883efb 5538 info->iter.trace_buffer = &tr->trace_buffer;
cc60cdc9 5539 info->spare = NULL;
2cadf913 5540 /* Force reading ring buffer for first read */
cc60cdc9 5541 info->read = (unsigned int)-1;
2cadf913
SR
5542
5543 filp->private_data = info;
5544
cf6ab6d9
SRRH
5545 tr->current_trace->ref++;
5546
a695cb58
SRRH
5547 mutex_unlock(&trace_types_lock);
5548
7b85af63
SRRH
5549 ret = nonseekable_open(inode, filp);
5550 if (ret < 0)
5551 trace_array_put(tr);
5552
5553 return ret;
2cadf913
SR
5554}
5555
cc60cdc9
SR
5556static unsigned int
5557tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5558{
5559 struct ftrace_buffer_info *info = filp->private_data;
5560 struct trace_iterator *iter = &info->iter;
5561
5562 return trace_poll(iter, filp, poll_table);
5563}
5564
2cadf913
SR
5565static ssize_t
5566tracing_buffers_read(struct file *filp, char __user *ubuf,
5567 size_t count, loff_t *ppos)
5568{
5569 struct ftrace_buffer_info *info = filp->private_data;
cc60cdc9 5570 struct trace_iterator *iter = &info->iter;
2cadf913 5571 ssize_t ret;
6de58e62 5572 ssize_t size;
2cadf913 5573
2dc5d12b
SR
5574 if (!count)
5575 return 0;
5576
6de58e62 5577#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5578 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5579 return -EBUSY;
6de58e62
SRRH
5580#endif
5581
ddd538f3 5582 if (!info->spare)
12883efb
SRRH
5583 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5584 iter->cpu_file);
ddd538f3 5585 if (!info->spare)
d716ff71 5586 return -ENOMEM;
ddd538f3 5587
2cadf913
SR
5588 /* Do we have previous read data to read? */
5589 if (info->read < PAGE_SIZE)
5590 goto read;
5591
b627344f 5592 again:
cc60cdc9 5593 trace_access_lock(iter->cpu_file);
12883efb 5594 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
2cadf913
SR
5595 &info->spare,
5596 count,
cc60cdc9
SR
5597 iter->cpu_file, 0);
5598 trace_access_unlock(iter->cpu_file);
2cadf913 5599
b627344f
SR
5600 if (ret < 0) {
5601 if (trace_empty(iter)) {
d716ff71
SRRH
5602 if ((filp->f_flags & O_NONBLOCK))
5603 return -EAGAIN;
5604
e30f53aa 5605 ret = wait_on_pipe(iter, false);
d716ff71
SRRH
5606 if (ret)
5607 return ret;
5608
b627344f
SR
5609 goto again;
5610 }
d716ff71 5611 return 0;
b627344f 5612 }
436fc280 5613
436fc280 5614 info->read = 0;
b627344f 5615 read:
2cadf913
SR
5616 size = PAGE_SIZE - info->read;
5617 if (size > count)
5618 size = count;
5619
5620 ret = copy_to_user(ubuf, info->spare + info->read, size);
d716ff71
SRRH
5621 if (ret == size)
5622 return -EFAULT;
5623
2dc5d12b
SR
5624 size -= ret;
5625
2cadf913
SR
5626 *ppos += size;
5627 info->read += size;
5628
5629 return size;
5630}
5631
5632static int tracing_buffers_release(struct inode *inode, struct file *file)
5633{
5634 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5635 struct trace_iterator *iter = &info->iter;
2cadf913 5636
a695cb58
SRRH
5637 mutex_lock(&trace_types_lock);
5638
cf6ab6d9
SRRH
5639 iter->tr->current_trace->ref--;
5640
ff451961 5641 __trace_array_put(iter->tr);
2cadf913 5642
ddd538f3 5643 if (info->spare)
12883efb 5644 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
2cadf913
SR
5645 kfree(info);
5646
a695cb58
SRRH
5647 mutex_unlock(&trace_types_lock);
5648
2cadf913
SR
5649 return 0;
5650}
5651
5652struct buffer_ref {
5653 struct ring_buffer *buffer;
5654 void *page;
5655 int ref;
5656};
5657
5658static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5659 struct pipe_buffer *buf)
5660{
5661 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5662
5663 if (--ref->ref)
5664 return;
5665
5666 ring_buffer_free_read_page(ref->buffer, ref->page);
5667 kfree(ref);
5668 buf->private = 0;
5669}
5670
2cadf913
SR
5671static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5672 struct pipe_buffer *buf)
5673{
5674 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5675
5676 ref->ref++;
5677}
5678
5679/* Pipe buffer operations for a buffer. */
28dfef8f 5680static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2cadf913 5681 .can_merge = 0,
2cadf913
SR
5682 .confirm = generic_pipe_buf_confirm,
5683 .release = buffer_pipe_buf_release,
d55cb6cf 5684 .steal = generic_pipe_buf_steal,
2cadf913
SR
5685 .get = buffer_pipe_buf_get,
5686};
5687
5688/*
5689 * Callback from splice_to_pipe(), if we need to release some pages
5690 * at the end of the spd in case we error'ed out in filling the pipe.
5691 */
5692static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5693{
5694 struct buffer_ref *ref =
5695 (struct buffer_ref *)spd->partial[i].private;
5696
5697 if (--ref->ref)
5698 return;
5699
5700 ring_buffer_free_read_page(ref->buffer, ref->page);
5701 kfree(ref);
5702 spd->partial[i].private = 0;
5703}
5704
5705static ssize_t
5706tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5707 struct pipe_inode_info *pipe, size_t len,
5708 unsigned int flags)
5709{
5710 struct ftrace_buffer_info *info = file->private_data;
cc60cdc9 5711 struct trace_iterator *iter = &info->iter;
35f3d14d
JA
5712 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5713 struct page *pages_def[PIPE_DEF_BUFFERS];
2cadf913 5714 struct splice_pipe_desc spd = {
35f3d14d
JA
5715 .pages = pages_def,
5716 .partial = partial_def,
047fe360 5717 .nr_pages_max = PIPE_DEF_BUFFERS,
2cadf913
SR
5718 .flags = flags,
5719 .ops = &buffer_pipe_buf_ops,
5720 .spd_release = buffer_spd_release,
5721 };
5722 struct buffer_ref *ref;
93459c6c 5723 int entries, size, i;
07906da7 5724 ssize_t ret = 0;
2cadf913 5725
6de58e62 5726#ifdef CONFIG_TRACER_MAX_TRACE
d716ff71
SRRH
5727 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5728 return -EBUSY;
6de58e62
SRRH
5729#endif
5730
d716ff71
SRRH
5731 if (splice_grow_spd(pipe, &spd))
5732 return -ENOMEM;
35f3d14d 5733
d716ff71
SRRH
5734 if (*ppos & (PAGE_SIZE - 1))
5735 return -EINVAL;
93cfb3c9
LJ
5736
5737 if (len & (PAGE_SIZE - 1)) {
d716ff71
SRRH
5738 if (len < PAGE_SIZE)
5739 return -EINVAL;
93cfb3c9
LJ
5740 len &= PAGE_MASK;
5741 }
5742
cc60cdc9
SR
5743 again:
5744 trace_access_lock(iter->cpu_file);
12883efb 5745 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
93459c6c 5746
a786c06d 5747 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
2cadf913
SR
5748 struct page *page;
5749 int r;
5750
5751 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
07906da7
RV
5752 if (!ref) {
5753 ret = -ENOMEM;
2cadf913 5754 break;
07906da7 5755 }
2cadf913 5756
7267fa68 5757 ref->ref = 1;
12883efb 5758 ref->buffer = iter->trace_buffer->buffer;
cc60cdc9 5759 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2cadf913 5760 if (!ref->page) {
07906da7 5761 ret = -ENOMEM;
2cadf913
SR
5762 kfree(ref);
5763 break;
5764 }
5765
5766 r = ring_buffer_read_page(ref->buffer, &ref->page,
cc60cdc9 5767 len, iter->cpu_file, 1);
2cadf913 5768 if (r < 0) {
7ea59064 5769 ring_buffer_free_read_page(ref->buffer, ref->page);
2cadf913
SR
5770 kfree(ref);
5771 break;
5772 }
5773
5774 /*
5775 * zero out any left over data, this is going to
5776 * user land.
5777 */
5778 size = ring_buffer_page_len(ref->page);
5779 if (size < PAGE_SIZE)
5780 memset(ref->page + size, 0, PAGE_SIZE - size);
5781
5782 page = virt_to_page(ref->page);
5783
5784 spd.pages[i] = page;
5785 spd.partial[i].len = PAGE_SIZE;
5786 spd.partial[i].offset = 0;
5787 spd.partial[i].private = (unsigned long)ref;
5788 spd.nr_pages++;
93cfb3c9 5789 *ppos += PAGE_SIZE;
93459c6c 5790
12883efb 5791 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
2cadf913
SR
5792 }
5793
cc60cdc9 5794 trace_access_unlock(iter->cpu_file);
2cadf913
SR
5795 spd.nr_pages = i;
5796
5797 /* did we read anything? */
5798 if (!spd.nr_pages) {
07906da7 5799 if (ret)
d716ff71
SRRH
5800 return ret;
5801
5802 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5803 return -EAGAIN;
07906da7 5804
e30f53aa 5805 ret = wait_on_pipe(iter, true);
8b8b3683 5806 if (ret)
d716ff71 5807 return ret;
e30f53aa 5808
cc60cdc9 5809 goto again;
2cadf913
SR
5810 }
5811
5812 ret = splice_to_pipe(pipe, &spd);
047fe360 5813 splice_shrink_spd(&spd);
6de58e62 5814
2cadf913
SR
5815 return ret;
5816}
5817
5818static const struct file_operations tracing_buffers_fops = {
5819 .open = tracing_buffers_open,
5820 .read = tracing_buffers_read,
cc60cdc9 5821 .poll = tracing_buffers_poll,
2cadf913
SR
5822 .release = tracing_buffers_release,
5823 .splice_read = tracing_buffers_splice_read,
5824 .llseek = no_llseek,
5825};
5826
c8d77183
SR
5827static ssize_t
5828tracing_stats_read(struct file *filp, char __user *ubuf,
5829 size_t count, loff_t *ppos)
5830{
4d3435b8
ON
5831 struct inode *inode = file_inode(filp);
5832 struct trace_array *tr = inode->i_private;
12883efb 5833 struct trace_buffer *trace_buf = &tr->trace_buffer;
4d3435b8 5834 int cpu = tracing_get_cpu(inode);
c8d77183
SR
5835 struct trace_seq *s;
5836 unsigned long cnt;
c64e148a
VN
5837 unsigned long long t;
5838 unsigned long usec_rem;
c8d77183 5839
e4f2d10f 5840 s = kmalloc(sizeof(*s), GFP_KERNEL);
c8d77183 5841 if (!s)
a646365c 5842 return -ENOMEM;
c8d77183
SR
5843
5844 trace_seq_init(s);
5845
12883efb 5846 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5847 trace_seq_printf(s, "entries: %ld\n", cnt);
5848
12883efb 5849 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5850 trace_seq_printf(s, "overrun: %ld\n", cnt);
5851
12883efb 5852 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
c8d77183
SR
5853 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5854
12883efb 5855 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
c64e148a
VN
5856 trace_seq_printf(s, "bytes: %ld\n", cnt);
5857
58e8eedf 5858 if (trace_clocks[tr->clock_id].in_ns) {
11043d8b 5859 /* local or global for trace_clock */
12883efb 5860 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
11043d8b
YY
5861 usec_rem = do_div(t, USEC_PER_SEC);
5862 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5863 t, usec_rem);
5864
12883efb 5865 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b
YY
5866 usec_rem = do_div(t, USEC_PER_SEC);
5867 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5868 } else {
5869 /* counter or tsc mode for trace_clock */
5870 trace_seq_printf(s, "oldest event ts: %llu\n",
12883efb 5871 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
c64e148a 5872
11043d8b 5873 trace_seq_printf(s, "now ts: %llu\n",
12883efb 5874 ring_buffer_time_stamp(trace_buf->buffer, cpu));
11043d8b 5875 }
c64e148a 5876
12883efb 5877 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
884bfe89
SP
5878 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5879
12883efb 5880 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
ad964704
SRRH
5881 trace_seq_printf(s, "read events: %ld\n", cnt);
5882
5ac48378
SRRH
5883 count = simple_read_from_buffer(ubuf, count, ppos,
5884 s->buffer, trace_seq_used(s));
c8d77183
SR
5885
5886 kfree(s);
5887
5888 return count;
5889}
5890
5891static const struct file_operations tracing_stats_fops = {
4d3435b8 5892 .open = tracing_open_generic_tr,
c8d77183 5893 .read = tracing_stats_read,
b444786f 5894 .llseek = generic_file_llseek,
4d3435b8 5895 .release = tracing_release_generic_tr,
c8d77183
SR
5896};
5897
bc0c38d1
SR
5898#ifdef CONFIG_DYNAMIC_FTRACE
5899
b807c3d0
SR
5900int __weak ftrace_arch_read_dyn_info(char *buf, int size)
5901{
5902 return 0;
5903}
5904
bc0c38d1 5905static ssize_t
b807c3d0 5906tracing_read_dyn_info(struct file *filp, char __user *ubuf,
bc0c38d1
SR
5907 size_t cnt, loff_t *ppos)
5908{
a26a2a27
SR
5909 static char ftrace_dyn_info_buffer[1024];
5910 static DEFINE_MUTEX(dyn_info_mutex);
bc0c38d1 5911 unsigned long *p = filp->private_data;
b807c3d0 5912 char *buf = ftrace_dyn_info_buffer;
a26a2a27 5913 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
bc0c38d1
SR
5914 int r;
5915
b807c3d0
SR
5916 mutex_lock(&dyn_info_mutex);
5917 r = sprintf(buf, "%ld ", *p);
4bf39a94 5918
a26a2a27 5919 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
b807c3d0
SR
5920 buf[r++] = '\n';
5921
5922 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5923
5924 mutex_unlock(&dyn_info_mutex);
5925
5926 return r;
bc0c38d1
SR
5927}
5928
5e2336a0 5929static const struct file_operations tracing_dyn_info_fops = {
4bf39a94 5930 .open = tracing_open_generic,
b807c3d0 5931 .read = tracing_read_dyn_info,
b444786f 5932 .llseek = generic_file_llseek,
bc0c38d1 5933};
77fd5c15 5934#endif /* CONFIG_DYNAMIC_FTRACE */
bc0c38d1 5935
77fd5c15
SRRH
5936#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5937static void
5938ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5939{
5940 tracing_snapshot();
5941}
bc0c38d1 5942
77fd5c15
SRRH
5943static void
5944ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
bc0c38d1 5945{
77fd5c15
SRRH
5946 unsigned long *count = (long *)data;
5947
5948 if (!*count)
5949 return;
bc0c38d1 5950
77fd5c15
SRRH
5951 if (*count != -1)
5952 (*count)--;
5953
5954 tracing_snapshot();
5955}
5956
5957static int
5958ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5959 struct ftrace_probe_ops *ops, void *data)
5960{
5961 long count = (long)data;
5962
5963 seq_printf(m, "%ps:", (void *)ip);
5964
fa6f0cc7 5965 seq_puts(m, "snapshot");
77fd5c15
SRRH
5966
5967 if (count == -1)
fa6f0cc7 5968 seq_puts(m, ":unlimited\n");
77fd5c15
SRRH
5969 else
5970 seq_printf(m, ":count=%ld\n", count);
5971
5972 return 0;
5973}
5974
5975static struct ftrace_probe_ops snapshot_probe_ops = {
5976 .func = ftrace_snapshot,
5977 .print = ftrace_snapshot_print,
5978};
5979
5980static struct ftrace_probe_ops snapshot_count_probe_ops = {
5981 .func = ftrace_count_snapshot,
5982 .print = ftrace_snapshot_print,
5983};
5984
5985static int
5986ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5987 char *glob, char *cmd, char *param, int enable)
5988{
5989 struct ftrace_probe_ops *ops;
5990 void *count = (void *)-1;
5991 char *number;
5992 int ret;
5993
5994 /* hash funcs only work with set_ftrace_filter */
5995 if (!enable)
5996 return -EINVAL;
5997
5998 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5999
6000 if (glob[0] == '!') {
6001 unregister_ftrace_function_probe_func(glob+1, ops);
6002 return 0;
6003 }
6004
6005 if (!param)
6006 goto out_reg;
6007
6008 number = strsep(&param, ":");
6009
6010 if (!strlen(number))
6011 goto out_reg;
6012
6013 /*
6014 * We use the callback data field (which is a pointer)
6015 * as our counter.
6016 */
6017 ret = kstrtoul(number, 0, (unsigned long *)&count);
6018 if (ret)
6019 return ret;
6020
6021 out_reg:
6022 ret = register_ftrace_function_probe(glob, ops, count);
6023
6024 if (ret >= 0)
6025 alloc_snapshot(&global_trace);
6026
6027 return ret < 0 ? ret : 0;
6028}
6029
6030static struct ftrace_func_command ftrace_snapshot_cmd = {
6031 .name = "snapshot",
6032 .func = ftrace_trace_snapshot_callback,
6033};
6034
38de93ab 6035static __init int register_snapshot_cmd(void)
77fd5c15
SRRH
6036{
6037 return register_ftrace_command(&ftrace_snapshot_cmd);
6038}
6039#else
38de93ab 6040static inline __init int register_snapshot_cmd(void) { return 0; }
77fd5c15 6041#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
bc0c38d1 6042
7eeafbca 6043static struct dentry *tracing_get_dentry(struct trace_array *tr)
bc0c38d1 6044{
8434dc93
SRRH
6045 if (WARN_ON(!tr->dir))
6046 return ERR_PTR(-ENODEV);
6047
6048 /* Top directory uses NULL as the parent */
6049 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6050 return NULL;
6051
6052 /* All sub buffers have a descriptor */
2b6080f2 6053 return tr->dir;
bc0c38d1
SR
6054}
6055
2b6080f2 6056static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
b04cc6b1 6057{
b04cc6b1
FW
6058 struct dentry *d_tracer;
6059
2b6080f2
SR
6060 if (tr->percpu_dir)
6061 return tr->percpu_dir;
b04cc6b1 6062
7eeafbca 6063 d_tracer = tracing_get_dentry(tr);
14a5ae40 6064 if (IS_ERR(d_tracer))
b04cc6b1
FW
6065 return NULL;
6066
8434dc93 6067 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
b04cc6b1 6068
2b6080f2 6069 WARN_ONCE(!tr->percpu_dir,
8434dc93 6070 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
b04cc6b1 6071
2b6080f2 6072 return tr->percpu_dir;
b04cc6b1
FW
6073}
6074
649e9c70
ON
6075static struct dentry *
6076trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6077 void *data, long cpu, const struct file_operations *fops)
6078{
6079 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6080
6081 if (ret) /* See tracing_get_cpu() */
6082 ret->d_inode->i_cdev = (void *)(cpu + 1);
6083 return ret;
6084}
6085
2b6080f2 6086static void
8434dc93 6087tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
b04cc6b1 6088{
2b6080f2 6089 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
5452af66 6090 struct dentry *d_cpu;
dd49a38c 6091 char cpu_dir[30]; /* 30 characters should be more than enough */
b04cc6b1 6092
0a3d7ce7
NK
6093 if (!d_percpu)
6094 return;
6095
dd49a38c 6096 snprintf(cpu_dir, 30, "cpu%ld", cpu);
8434dc93 6097 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
8656e7a2 6098 if (!d_cpu) {
8434dc93 6099 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
8656e7a2
FW
6100 return;
6101 }
b04cc6b1 6102
8656e7a2 6103 /* per cpu trace_pipe */
649e9c70 6104 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
15544209 6105 tr, cpu, &tracing_pipe_fops);
b04cc6b1
FW
6106
6107 /* per cpu trace */
649e9c70 6108 trace_create_cpu_file("trace", 0644, d_cpu,
6484c71c 6109 tr, cpu, &tracing_fops);
7f96f93f 6110
649e9c70 6111 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
46ef2be0 6112 tr, cpu, &tracing_buffers_fops);
7f96f93f 6113
649e9c70 6114 trace_create_cpu_file("stats", 0444, d_cpu,
4d3435b8 6115 tr, cpu, &tracing_stats_fops);
438ced17 6116
649e9c70 6117 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
0bc392ee 6118 tr, cpu, &tracing_entries_fops);
f1affcaa
SRRH
6119
6120#ifdef CONFIG_TRACER_SNAPSHOT
649e9c70 6121 trace_create_cpu_file("snapshot", 0644, d_cpu,
6484c71c 6122 tr, cpu, &snapshot_fops);
6de58e62 6123
649e9c70 6124 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
46ef2be0 6125 tr, cpu, &snapshot_raw_fops);
f1affcaa 6126#endif
b04cc6b1
FW
6127}
6128
60a11774
SR
6129#ifdef CONFIG_FTRACE_SELFTEST
6130/* Let selftest have access to static functions in this file */
6131#include "trace_selftest.c"
6132#endif
6133
577b785f
SR
6134struct trace_option_dentry {
6135 struct tracer_opt *opt;
6136 struct tracer_flags *flags;
2b6080f2 6137 struct trace_array *tr;
577b785f
SR
6138 struct dentry *entry;
6139};
6140
6141static ssize_t
6142trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6143 loff_t *ppos)
6144{
6145 struct trace_option_dentry *topt = filp->private_data;
6146 char *buf;
6147
6148 if (topt->flags->val & topt->opt->bit)
6149 buf = "1\n";
6150 else
6151 buf = "0\n";
6152
6153 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6154}
6155
6156static ssize_t
6157trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6158 loff_t *ppos)
6159{
6160 struct trace_option_dentry *topt = filp->private_data;
6161 unsigned long val;
577b785f
SR
6162 int ret;
6163
22fe9b54
PH
6164 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6165 if (ret)
577b785f
SR
6166 return ret;
6167
8d18eaaf
LZ
6168 if (val != 0 && val != 1)
6169 return -EINVAL;
577b785f 6170
8d18eaaf 6171 if (!!(topt->flags->val & topt->opt->bit) != val) {
577b785f 6172 mutex_lock(&trace_types_lock);
8c1a49ae 6173 ret = __set_tracer_option(topt->tr, topt->flags,
c757bea9 6174 topt->opt, !val);
577b785f
SR
6175 mutex_unlock(&trace_types_lock);
6176 if (ret)
6177 return ret;
577b785f
SR
6178 }
6179
6180 *ppos += cnt;
6181
6182 return cnt;
6183}
6184
6185
6186static const struct file_operations trace_options_fops = {
6187 .open = tracing_open_generic,
6188 .read = trace_options_read,
6189 .write = trace_options_write,
b444786f 6190 .llseek = generic_file_llseek,
577b785f
SR
6191};
6192
a8259075
SR
6193static ssize_t
6194trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6195 loff_t *ppos)
6196{
6197 long index = (long)filp->private_data;
6198 char *buf;
6199
6200 if (trace_flags & (1 << index))
6201 buf = "1\n";
6202 else
6203 buf = "0\n";
6204
6205 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6206}
6207
6208static ssize_t
6209trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6210 loff_t *ppos)
6211{
2b6080f2 6212 struct trace_array *tr = &global_trace;
a8259075 6213 long index = (long)filp->private_data;
a8259075
SR
6214 unsigned long val;
6215 int ret;
6216
22fe9b54
PH
6217 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6218 if (ret)
a8259075
SR
6219 return ret;
6220
f2d84b65 6221 if (val != 0 && val != 1)
a8259075 6222 return -EINVAL;
69d34da2
SRRH
6223
6224 mutex_lock(&trace_types_lock);
2b6080f2 6225 ret = set_tracer_flag(tr, 1 << index, val);
69d34da2 6226 mutex_unlock(&trace_types_lock);
a8259075 6227
613f04a0
SRRH
6228 if (ret < 0)
6229 return ret;
6230
a8259075
SR
6231 *ppos += cnt;
6232
6233 return cnt;
6234}
6235
a8259075
SR
6236static const struct file_operations trace_options_core_fops = {
6237 .open = tracing_open_generic,
6238 .read = trace_options_core_read,
6239 .write = trace_options_core_write,
b444786f 6240 .llseek = generic_file_llseek,
a8259075
SR
6241};
6242
5452af66 6243struct dentry *trace_create_file(const char *name,
f4ae40a6 6244 umode_t mode,
5452af66
FW
6245 struct dentry *parent,
6246 void *data,
6247 const struct file_operations *fops)
6248{
6249 struct dentry *ret;
6250
8434dc93 6251 ret = tracefs_create_file(name, mode, parent, data, fops);
5452af66 6252 if (!ret)
8434dc93 6253 pr_warning("Could not create tracefs '%s' entry\n", name);
5452af66
FW
6254
6255 return ret;
6256}
6257
6258
2b6080f2 6259static struct dentry *trace_options_init_dentry(struct trace_array *tr)
a8259075
SR
6260{
6261 struct dentry *d_tracer;
a8259075 6262
2b6080f2
SR
6263 if (tr->options)
6264 return tr->options;
a8259075 6265
7eeafbca 6266 d_tracer = tracing_get_dentry(tr);
14a5ae40 6267 if (IS_ERR(d_tracer))
a8259075
SR
6268 return NULL;
6269
8434dc93 6270 tr->options = tracefs_create_dir("options", d_tracer);
2b6080f2 6271 if (!tr->options) {
8434dc93 6272 pr_warning("Could not create tracefs directory 'options'\n");
a8259075
SR
6273 return NULL;
6274 }
6275
2b6080f2 6276 return tr->options;
a8259075
SR
6277}
6278
577b785f 6279static void
2b6080f2
SR
6280create_trace_option_file(struct trace_array *tr,
6281 struct trace_option_dentry *topt,
577b785f
SR
6282 struct tracer_flags *flags,
6283 struct tracer_opt *opt)
6284{
6285 struct dentry *t_options;
577b785f 6286
2b6080f2 6287 t_options = trace_options_init_dentry(tr);
577b785f
SR
6288 if (!t_options)
6289 return;
6290
6291 topt->flags = flags;
6292 topt->opt = opt;
2b6080f2 6293 topt->tr = tr;
577b785f 6294
5452af66 6295 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
577b785f
SR
6296 &trace_options_fops);
6297
577b785f
SR
6298}
6299
6300static struct trace_option_dentry *
2b6080f2 6301create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
577b785f
SR
6302{
6303 struct trace_option_dentry *topts;
6304 struct tracer_flags *flags;
6305 struct tracer_opt *opts;
6306 int cnt;
6307
6308 if (!tracer)
6309 return NULL;
6310
6311 flags = tracer->flags;
6312
6313 if (!flags || !flags->opts)
6314 return NULL;
6315
6316 opts = flags->opts;
6317
6318 for (cnt = 0; opts[cnt].name; cnt++)
6319 ;
6320
0cfe8245 6321 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
577b785f
SR
6322 if (!topts)
6323 return NULL;
6324
6325 for (cnt = 0; opts[cnt].name; cnt++)
2b6080f2 6326 create_trace_option_file(tr, &topts[cnt], flags,
577b785f
SR
6327 &opts[cnt]);
6328
6329 return topts;
6330}
6331
6332static void
6333destroy_trace_option_files(struct trace_option_dentry *topts)
6334{
6335 int cnt;
6336
6337 if (!topts)
6338 return;
6339
3f4d8f78 6340 for (cnt = 0; topts[cnt].opt; cnt++)
8434dc93 6341 tracefs_remove(topts[cnt].entry);
577b785f
SR
6342
6343 kfree(topts);
6344}
6345
a8259075 6346static struct dentry *
2b6080f2
SR
6347create_trace_option_core_file(struct trace_array *tr,
6348 const char *option, long index)
a8259075
SR
6349{
6350 struct dentry *t_options;
a8259075 6351
2b6080f2 6352 t_options = trace_options_init_dentry(tr);
a8259075
SR
6353 if (!t_options)
6354 return NULL;
6355
5452af66 6356 return trace_create_file(option, 0644, t_options, (void *)index,
a8259075 6357 &trace_options_core_fops);
a8259075
SR
6358}
6359
2b6080f2 6360static __init void create_trace_options_dir(struct trace_array *tr)
a8259075
SR
6361{
6362 struct dentry *t_options;
a8259075
SR
6363 int i;
6364
2b6080f2 6365 t_options = trace_options_init_dentry(tr);
a8259075
SR
6366 if (!t_options)
6367 return;
6368
5452af66 6369 for (i = 0; trace_options[i]; i++)
2b6080f2 6370 create_trace_option_core_file(tr, trace_options[i], i);
a8259075
SR
6371}
6372
499e5470
SR
6373static ssize_t
6374rb_simple_read(struct file *filp, char __user *ubuf,
6375 size_t cnt, loff_t *ppos)
6376{
348f0fc2 6377 struct trace_array *tr = filp->private_data;
499e5470
SR
6378 char buf[64];
6379 int r;
6380
10246fa3 6381 r = tracer_tracing_is_on(tr);
499e5470
SR
6382 r = sprintf(buf, "%d\n", r);
6383
6384 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6385}
6386
6387static ssize_t
6388rb_simple_write(struct file *filp, const char __user *ubuf,
6389 size_t cnt, loff_t *ppos)
6390{
348f0fc2 6391 struct trace_array *tr = filp->private_data;
12883efb 6392 struct ring_buffer *buffer = tr->trace_buffer.buffer;
499e5470
SR
6393 unsigned long val;
6394 int ret;
6395
6396 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6397 if (ret)
6398 return ret;
6399
6400 if (buffer) {
2df8f8a6
SR
6401 mutex_lock(&trace_types_lock);
6402 if (val) {
10246fa3 6403 tracer_tracing_on(tr);
2b6080f2
SR
6404 if (tr->current_trace->start)
6405 tr->current_trace->start(tr);
2df8f8a6 6406 } else {
10246fa3 6407 tracer_tracing_off(tr);
2b6080f2
SR
6408 if (tr->current_trace->stop)
6409 tr->current_trace->stop(tr);
2df8f8a6
SR
6410 }
6411 mutex_unlock(&trace_types_lock);
499e5470
SR
6412 }
6413
6414 (*ppos)++;
6415
6416 return cnt;
6417}
6418
6419static const struct file_operations rb_simple_fops = {
7b85af63 6420 .open = tracing_open_generic_tr,
499e5470
SR
6421 .read = rb_simple_read,
6422 .write = rb_simple_write,
7b85af63 6423 .release = tracing_release_generic_tr,
499e5470
SR
6424 .llseek = default_llseek,
6425};
6426
277ba044
SR
6427struct dentry *trace_instance_dir;
6428
6429static void
8434dc93 6430init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
277ba044 6431
55034cd6
SRRH
6432static int
6433allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
277ba044
SR
6434{
6435 enum ring_buffer_flags rb_flags;
737223fb
SRRH
6436
6437 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6438
dced341b
SRRH
6439 buf->tr = tr;
6440
55034cd6
SRRH
6441 buf->buffer = ring_buffer_alloc(size, rb_flags);
6442 if (!buf->buffer)
6443 return -ENOMEM;
737223fb 6444
55034cd6
SRRH
6445 buf->data = alloc_percpu(struct trace_array_cpu);
6446 if (!buf->data) {
6447 ring_buffer_free(buf->buffer);
6448 return -ENOMEM;
6449 }
737223fb 6450
737223fb
SRRH
6451 /* Allocate the first page for all buffers */
6452 set_buffer_entries(&tr->trace_buffer,
6453 ring_buffer_size(tr->trace_buffer.buffer, 0));
6454
55034cd6
SRRH
6455 return 0;
6456}
737223fb 6457
55034cd6
SRRH
6458static int allocate_trace_buffers(struct trace_array *tr, int size)
6459{
6460 int ret;
737223fb 6461
55034cd6
SRRH
6462 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6463 if (ret)
6464 return ret;
737223fb 6465
55034cd6
SRRH
6466#ifdef CONFIG_TRACER_MAX_TRACE
6467 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6468 allocate_snapshot ? size : 1);
6469 if (WARN_ON(ret)) {
737223fb 6470 ring_buffer_free(tr->trace_buffer.buffer);
55034cd6
SRRH
6471 free_percpu(tr->trace_buffer.data);
6472 return -ENOMEM;
6473 }
6474 tr->allocated_snapshot = allocate_snapshot;
737223fb 6475
55034cd6
SRRH
6476 /*
6477 * Only the top level trace array gets its snapshot allocated
6478 * from the kernel command line.
6479 */
6480 allocate_snapshot = false;
737223fb 6481#endif
55034cd6 6482 return 0;
737223fb
SRRH
6483}
6484
f0b70cc4
SRRH
6485static void free_trace_buffer(struct trace_buffer *buf)
6486{
6487 if (buf->buffer) {
6488 ring_buffer_free(buf->buffer);
6489 buf->buffer = NULL;
6490 free_percpu(buf->data);
6491 buf->data = NULL;
6492 }
6493}
6494
23aaa3c1
SRRH
6495static void free_trace_buffers(struct trace_array *tr)
6496{
6497 if (!tr)
6498 return;
6499
f0b70cc4 6500 free_trace_buffer(&tr->trace_buffer);
23aaa3c1
SRRH
6501
6502#ifdef CONFIG_TRACER_MAX_TRACE
f0b70cc4 6503 free_trace_buffer(&tr->max_buffer);
23aaa3c1
SRRH
6504#endif
6505}
6506
eae47358 6507static int instance_mkdir(const char *name)
737223fb 6508{
277ba044
SR
6509 struct trace_array *tr;
6510 int ret;
277ba044
SR
6511
6512 mutex_lock(&trace_types_lock);
6513
6514 ret = -EEXIST;
6515 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6516 if (tr->name && strcmp(tr->name, name) == 0)
6517 goto out_unlock;
6518 }
6519
6520 ret = -ENOMEM;
6521 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6522 if (!tr)
6523 goto out_unlock;
6524
6525 tr->name = kstrdup(name, GFP_KERNEL);
6526 if (!tr->name)
6527 goto out_free_tr;
6528
ccfe9e42
AL
6529 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6530 goto out_free_tr;
6531
6532 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6533
277ba044
SR
6534 raw_spin_lock_init(&tr->start_lock);
6535
0b9b12c1
SRRH
6536 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6537
277ba044
SR
6538 tr->current_trace = &nop_trace;
6539
6540 INIT_LIST_HEAD(&tr->systems);
6541 INIT_LIST_HEAD(&tr->events);
6542
737223fb 6543 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
277ba044
SR
6544 goto out_free_tr;
6545
8434dc93 6546 tr->dir = tracefs_create_dir(name, trace_instance_dir);
277ba044
SR
6547 if (!tr->dir)
6548 goto out_free_tr;
6549
6550 ret = event_trace_add_tracer(tr->dir, tr);
609e85a7 6551 if (ret) {
8434dc93 6552 tracefs_remove_recursive(tr->dir);
277ba044 6553 goto out_free_tr;
609e85a7 6554 }
277ba044 6555
8434dc93 6556 init_tracer_tracefs(tr, tr->dir);
277ba044
SR
6557
6558 list_add(&tr->list, &ftrace_trace_arrays);
6559
6560 mutex_unlock(&trace_types_lock);
6561
6562 return 0;
6563
6564 out_free_tr:
23aaa3c1 6565 free_trace_buffers(tr);
ccfe9e42 6566 free_cpumask_var(tr->tracing_cpumask);
277ba044
SR
6567 kfree(tr->name);
6568 kfree(tr);
6569
6570 out_unlock:
6571 mutex_unlock(&trace_types_lock);
6572
6573 return ret;
6574
6575}
6576
eae47358 6577static int instance_rmdir(const char *name)
0c8916c3
SR
6578{
6579 struct trace_array *tr;
6580 int found = 0;
6581 int ret;
6582
6583 mutex_lock(&trace_types_lock);
6584
6585 ret = -ENODEV;
6586 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6587 if (tr->name && strcmp(tr->name, name) == 0) {
6588 found = 1;
6589 break;
6590 }
6591 }
6592 if (!found)
6593 goto out_unlock;
6594
a695cb58 6595 ret = -EBUSY;
cf6ab6d9 6596 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
a695cb58
SRRH
6597 goto out_unlock;
6598
0c8916c3
SR
6599 list_del(&tr->list);
6600
6b450d25 6601 tracing_set_nop(tr);
0c8916c3 6602 event_trace_del_tracer(tr);
591dffda 6603 ftrace_destroy_function_files(tr);
0c8916c3 6604 debugfs_remove_recursive(tr->dir);
a9fcaaac 6605 free_trace_buffers(tr);
0c8916c3
SR
6606
6607 kfree(tr->name);
6608 kfree(tr);
6609
6610 ret = 0;
6611
6612 out_unlock:
6613 mutex_unlock(&trace_types_lock);
6614
6615 return ret;
6616}
6617
277ba044
SR
6618static __init void create_trace_instances(struct dentry *d_tracer)
6619{
eae47358
SRRH
6620 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6621 instance_mkdir,
6622 instance_rmdir);
277ba044
SR
6623 if (WARN_ON(!trace_instance_dir))
6624 return;
277ba044
SR
6625}
6626
2b6080f2 6627static void
8434dc93 6628init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
2b6080f2 6629{
121aaee7 6630 int cpu;
2b6080f2 6631
607e2ea1
SRRH
6632 trace_create_file("available_tracers", 0444, d_tracer,
6633 tr, &show_traces_fops);
6634
6635 trace_create_file("current_tracer", 0644, d_tracer,
6636 tr, &set_tracer_fops);
6637
ccfe9e42
AL
6638 trace_create_file("tracing_cpumask", 0644, d_tracer,
6639 tr, &tracing_cpumask_fops);
6640
2b6080f2
SR
6641 trace_create_file("trace_options", 0644, d_tracer,
6642 tr, &tracing_iter_fops);
6643
6644 trace_create_file("trace", 0644, d_tracer,
6484c71c 6645 tr, &tracing_fops);
2b6080f2
SR
6646
6647 trace_create_file("trace_pipe", 0444, d_tracer,
15544209 6648 tr, &tracing_pipe_fops);
2b6080f2
SR
6649
6650 trace_create_file("buffer_size_kb", 0644, d_tracer,
0bc392ee 6651 tr, &tracing_entries_fops);
2b6080f2
SR
6652
6653 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6654 tr, &tracing_total_entries_fops);
6655
238ae93d 6656 trace_create_file("free_buffer", 0200, d_tracer,
2b6080f2
SR
6657 tr, &tracing_free_buffer_fops);
6658
6659 trace_create_file("trace_marker", 0220, d_tracer,
6660 tr, &tracing_mark_fops);
6661
6662 trace_create_file("trace_clock", 0644, d_tracer, tr,
6663 &trace_clock_fops);
6664
6665 trace_create_file("tracing_on", 0644, d_tracer,
6484c71c 6666 tr, &rb_simple_fops);
ce9bae55 6667
6d9b3fa5
SRRH
6668#ifdef CONFIG_TRACER_MAX_TRACE
6669 trace_create_file("tracing_max_latency", 0644, d_tracer,
6670 &tr->max_latency, &tracing_max_lat_fops);
6671#endif
6672
591dffda
SRRH
6673 if (ftrace_create_function_files(tr, d_tracer))
6674 WARN(1, "Could not allocate function filter files");
6675
ce9bae55
SRRH
6676#ifdef CONFIG_TRACER_SNAPSHOT
6677 trace_create_file("snapshot", 0644, d_tracer,
6484c71c 6678 tr, &snapshot_fops);
ce9bae55 6679#endif
121aaee7
SRRH
6680
6681 for_each_tracing_cpu(cpu)
8434dc93 6682 tracing_init_tracefs_percpu(tr, cpu);
121aaee7 6683
2b6080f2
SR
6684}
6685
f76180bc
SRRH
6686static struct vfsmount *trace_automount(void *ingore)
6687{
6688 struct vfsmount *mnt;
6689 struct file_system_type *type;
6690
6691 /*
6692 * To maintain backward compatibility for tools that mount
6693 * debugfs to get to the tracing facility, tracefs is automatically
6694 * mounted to the debugfs/tracing directory.
6695 */
6696 type = get_fs_type("tracefs");
6697 if (!type)
6698 return NULL;
6699 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6700 put_filesystem(type);
6701 if (IS_ERR(mnt))
6702 return NULL;
6703 mntget(mnt);
6704
6705 return mnt;
6706}
6707
7eeafbca
SRRH
6708/**
6709 * tracing_init_dentry - initialize top level trace array
6710 *
6711 * This is called when creating files or directories in the tracing
6712 * directory. It is called via fs_initcall() by any of the boot up code
6713 * and expects to return the dentry of the top level tracing directory.
6714 */
6715struct dentry *tracing_init_dentry(void)
6716{
6717 struct trace_array *tr = &global_trace;
6718
f76180bc 6719 /* The top level trace array uses NULL as parent */
7eeafbca 6720 if (tr->dir)
f76180bc 6721 return NULL;
7eeafbca
SRRH
6722
6723 if (WARN_ON(!debugfs_initialized()))
6724 return ERR_PTR(-ENODEV);
6725
f76180bc
SRRH
6726 /*
6727 * As there may still be users that expect the tracing
6728 * files to exist in debugfs/tracing, we must automount
6729 * the tracefs file system there, so older tools still
6730 * work with the newer kerenl.
6731 */
6732 tr->dir = debugfs_create_automount("tracing", NULL,
6733 trace_automount, NULL);
7eeafbca
SRRH
6734 if (!tr->dir) {
6735 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6736 return ERR_PTR(-ENOMEM);
6737 }
6738
8434dc93 6739 return NULL;
7eeafbca
SRRH
6740}
6741
0c564a53
SRRH
6742extern struct trace_enum_map *__start_ftrace_enum_maps[];
6743extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6744
6745static void __init trace_enum_init(void)
6746{
3673b8e4
SRRH
6747 int len;
6748
6749 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
9828413d 6750 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
3673b8e4
SRRH
6751}
6752
6753#ifdef CONFIG_MODULES
6754static void trace_module_add_enums(struct module *mod)
6755{
6756 if (!mod->num_trace_enums)
6757 return;
6758
6759 /*
6760 * Modules with bad taint do not have events created, do
6761 * not bother with enums either.
6762 */
6763 if (trace_module_has_bad_taint(mod))
6764 return;
6765
9828413d 6766 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
3673b8e4
SRRH
6767}
6768
9828413d
SRRH
6769#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6770static void trace_module_remove_enums(struct module *mod)
6771{
6772 union trace_enum_map_item *map;
6773 union trace_enum_map_item **last = &trace_enum_maps;
6774
6775 if (!mod->num_trace_enums)
6776 return;
6777
6778 mutex_lock(&trace_enum_mutex);
6779
6780 map = trace_enum_maps;
6781
6782 while (map) {
6783 if (map->head.mod == mod)
6784 break;
6785 map = trace_enum_jmp_to_tail(map);
6786 last = &map->tail.next;
6787 map = map->tail.next;
6788 }
6789 if (!map)
6790 goto out;
6791
6792 *last = trace_enum_jmp_to_tail(map)->tail.next;
6793 kfree(map);
6794 out:
6795 mutex_unlock(&trace_enum_mutex);
6796}
6797#else
6798static inline void trace_module_remove_enums(struct module *mod) { }
6799#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6800
3673b8e4
SRRH
6801static int trace_module_notify(struct notifier_block *self,
6802 unsigned long val, void *data)
6803{
6804 struct module *mod = data;
6805
6806 switch (val) {
6807 case MODULE_STATE_COMING:
6808 trace_module_add_enums(mod);
6809 break;
9828413d
SRRH
6810 case MODULE_STATE_GOING:
6811 trace_module_remove_enums(mod);
6812 break;
3673b8e4
SRRH
6813 }
6814
6815 return 0;
0c564a53
SRRH
6816}
6817
3673b8e4
SRRH
6818static struct notifier_block trace_module_nb = {
6819 .notifier_call = trace_module_notify,
6820 .priority = 0,
6821};
9828413d 6822#endif /* CONFIG_MODULES */
3673b8e4 6823
8434dc93 6824static __init int tracer_init_tracefs(void)
bc0c38d1
SR
6825{
6826 struct dentry *d_tracer;
bc0c38d1 6827
7e53bd42
LJ
6828 trace_access_lock_init();
6829
bc0c38d1 6830 d_tracer = tracing_init_dentry();
14a5ae40 6831 if (IS_ERR(d_tracer))
ed6f1c99 6832 return 0;
bc0c38d1 6833
8434dc93 6834 init_tracer_tracefs(&global_trace, d_tracer);
bc0c38d1 6835
5452af66 6836 trace_create_file("tracing_thresh", 0644, d_tracer,
6508fa76 6837 &global_trace, &tracing_thresh_fops);
a8259075 6838
339ae5d3 6839 trace_create_file("README", 0444, d_tracer,
5452af66
FW
6840 NULL, &tracing_readme_fops);
6841
69abe6a5
AP
6842 trace_create_file("saved_cmdlines", 0444, d_tracer,
6843 NULL, &tracing_saved_cmdlines_fops);
5bf9a1ee 6844
939c7a4f
YY
6845 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6846 NULL, &tracing_saved_cmdlines_size_fops);
6847
0c564a53
SRRH
6848 trace_enum_init();
6849
9828413d
SRRH
6850 trace_create_enum_file(d_tracer);
6851
3673b8e4
SRRH
6852#ifdef CONFIG_MODULES
6853 register_module_notifier(&trace_module_nb);
6854#endif
6855
bc0c38d1 6856#ifdef CONFIG_DYNAMIC_FTRACE
5452af66
FW
6857 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6858 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
bc0c38d1 6859#endif
b04cc6b1 6860
277ba044 6861 create_trace_instances(d_tracer);
5452af66 6862
2b6080f2 6863 create_trace_options_dir(&global_trace);
b04cc6b1 6864
09d23a1d
SRRH
6865 /* If the tracer was started via cmdline, create options for it here */
6866 if (global_trace.current_trace != &nop_trace)
6867 update_tracer_options(&global_trace, global_trace.current_trace);
6868
b5ad384e 6869 return 0;
bc0c38d1
SR
6870}
6871
3f5a54e3
SR
6872static int trace_panic_handler(struct notifier_block *this,
6873 unsigned long event, void *unused)
6874{
944ac425 6875 if (ftrace_dump_on_oops)
cecbca96 6876 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6877 return NOTIFY_OK;
6878}
6879
6880static struct notifier_block trace_panic_notifier = {
6881 .notifier_call = trace_panic_handler,
6882 .next = NULL,
6883 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6884};
6885
6886static int trace_die_handler(struct notifier_block *self,
6887 unsigned long val,
6888 void *data)
6889{
6890 switch (val) {
6891 case DIE_OOPS:
944ac425 6892 if (ftrace_dump_on_oops)
cecbca96 6893 ftrace_dump(ftrace_dump_on_oops);
3f5a54e3
SR
6894 break;
6895 default:
6896 break;
6897 }
6898 return NOTIFY_OK;
6899}
6900
6901static struct notifier_block trace_die_notifier = {
6902 .notifier_call = trace_die_handler,
6903 .priority = 200
6904};
6905
6906/*
6907 * printk is set to max of 1024, we really don't need it that big.
6908 * Nothing should be printing 1000 characters anyway.
6909 */
6910#define TRACE_MAX_PRINT 1000
6911
6912/*
6913 * Define here KERN_TRACE so that we have one place to modify
6914 * it if we decide to change what log level the ftrace dump
6915 * should be at.
6916 */
428aee14 6917#define KERN_TRACE KERN_EMERG
3f5a54e3 6918
955b61e5 6919void
3f5a54e3
SR
6920trace_printk_seq(struct trace_seq *s)
6921{
6922 /* Probably should print a warning here. */
3a161d99
SRRH
6923 if (s->seq.len >= TRACE_MAX_PRINT)
6924 s->seq.len = TRACE_MAX_PRINT;
3f5a54e3 6925
820b75f6
SRRH
6926 /*
6927 * More paranoid code. Although the buffer size is set to
6928 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6929 * an extra layer of protection.
6930 */
6931 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6932 s->seq.len = s->seq.size - 1;
3f5a54e3
SR
6933
6934 /* should be zero ended, but we are paranoid. */
3a161d99 6935 s->buffer[s->seq.len] = 0;
3f5a54e3
SR
6936
6937 printk(KERN_TRACE "%s", s->buffer);
6938
f9520750 6939 trace_seq_init(s);
3f5a54e3
SR
6940}
6941
955b61e5
JW
6942void trace_init_global_iter(struct trace_iterator *iter)
6943{
6944 iter->tr = &global_trace;
2b6080f2 6945 iter->trace = iter->tr->current_trace;
ae3b5093 6946 iter->cpu_file = RING_BUFFER_ALL_CPUS;
12883efb 6947 iter->trace_buffer = &global_trace.trace_buffer;
b2f974d6
CS
6948
6949 if (iter->trace && iter->trace->open)
6950 iter->trace->open(iter);
6951
6952 /* Annotate start of buffers if we had overruns */
6953 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6954 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6955
6956 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6957 if (trace_clocks[iter->tr->clock_id].in_ns)
6958 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
955b61e5
JW
6959}
6960
7fe70b57 6961void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
3f5a54e3 6962{
3f5a54e3
SR
6963 /* use static because iter can be a bit big for the stack */
6964 static struct trace_iterator iter;
7fe70b57 6965 static atomic_t dump_running;
cf586b61 6966 unsigned int old_userobj;
d769041f
SR
6967 unsigned long flags;
6968 int cnt = 0, cpu;
3f5a54e3 6969
7fe70b57
SRRH
6970 /* Only allow one dump user at a time. */
6971 if (atomic_inc_return(&dump_running) != 1) {
6972 atomic_dec(&dump_running);
6973 return;
6974 }
3f5a54e3 6975
7fe70b57
SRRH
6976 /*
6977 * Always turn off tracing when we dump.
6978 * We don't need to show trace output of what happens
6979 * between multiple crashes.
6980 *
6981 * If the user does a sysrq-z, then they can re-enable
6982 * tracing with echo 1 > tracing_on.
6983 */
0ee6b6cf 6984 tracing_off();
cf586b61 6985
7fe70b57 6986 local_irq_save(flags);
3f5a54e3 6987
38dbe0b1 6988 /* Simulate the iterator */
955b61e5
JW
6989 trace_init_global_iter(&iter);
6990
d769041f 6991 for_each_tracing_cpu(cpu) {
12883efb 6992 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
d769041f
SR
6993 }
6994
cf586b61
FW
6995 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6996
b54d3de9
TE
6997 /* don't look at user memory in panic mode */
6998 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6999
cecbca96
FW
7000 switch (oops_dump_mode) {
7001 case DUMP_ALL:
ae3b5093 7002 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7003 break;
7004 case DUMP_ORIG:
7005 iter.cpu_file = raw_smp_processor_id();
7006 break;
7007 case DUMP_NONE:
7008 goto out_enable;
7009 default:
7010 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
ae3b5093 7011 iter.cpu_file = RING_BUFFER_ALL_CPUS;
cecbca96
FW
7012 }
7013
7014 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3f5a54e3 7015
7fe70b57
SRRH
7016 /* Did function tracer already get disabled? */
7017 if (ftrace_is_dead()) {
7018 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7019 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7020 }
7021
3f5a54e3
SR
7022 /*
7023 * We need to stop all tracing on all CPUS to read the
7024 * the next buffer. This is a bit expensive, but is
7025 * not done often. We fill all what we can read,
7026 * and then release the locks again.
7027 */
7028
3f5a54e3
SR
7029 while (!trace_empty(&iter)) {
7030
7031 if (!cnt)
7032 printk(KERN_TRACE "---------------------------------\n");
7033
7034 cnt++;
7035
7036 /* reset all but tr, trace, and overruns */
7037 memset(&iter.seq, 0,
7038 sizeof(struct trace_iterator) -
7039 offsetof(struct trace_iterator, seq));
7040 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7041 iter.pos = -1;
7042
955b61e5 7043 if (trace_find_next_entry_inc(&iter) != NULL) {
74e7ff8c
LJ
7044 int ret;
7045
7046 ret = print_trace_line(&iter);
7047 if (ret != TRACE_TYPE_NO_CONSUME)
7048 trace_consume(&iter);
3f5a54e3 7049 }
b892e5c8 7050 touch_nmi_watchdog();
3f5a54e3
SR
7051
7052 trace_printk_seq(&iter.seq);
7053 }
7054
7055 if (!cnt)
7056 printk(KERN_TRACE " (ftrace buffer empty)\n");
7057 else
7058 printk(KERN_TRACE "---------------------------------\n");
7059
cecbca96 7060 out_enable:
7fe70b57 7061 trace_flags |= old_userobj;
cf586b61 7062
7fe70b57
SRRH
7063 for_each_tracing_cpu(cpu) {
7064 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
cf586b61 7065 }
7fe70b57 7066 atomic_dec(&dump_running);
cd891ae0 7067 local_irq_restore(flags);
3f5a54e3 7068}
a8eecf22 7069EXPORT_SYMBOL_GPL(ftrace_dump);
cf586b61 7070
3928a8a2 7071__init static int tracer_alloc_buffers(void)
bc0c38d1 7072{
73c5162a 7073 int ring_buf_size;
9e01c1b7 7074 int ret = -ENOMEM;
4c11d7ae 7075
9e01c1b7
RR
7076 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7077 goto out;
7078
ccfe9e42 7079 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
9e01c1b7 7080 goto out_free_buffer_mask;
4c11d7ae 7081
07d777fe
SR
7082 /* Only allocate trace_printk buffers if a trace_printk exists */
7083 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
81698831 7084 /* Must be called before global_trace.buffer is allocated */
07d777fe
SR
7085 trace_printk_init_buffers();
7086
73c5162a
SR
7087 /* To save memory, keep the ring buffer size to its minimum */
7088 if (ring_buffer_expanded)
7089 ring_buf_size = trace_buf_size;
7090 else
7091 ring_buf_size = 1;
7092
9e01c1b7 7093 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
ccfe9e42 7094 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
9e01c1b7 7095
2b6080f2
SR
7096 raw_spin_lock_init(&global_trace.start_lock);
7097
2c4a33ab
SRRH
7098 /* Used for event triggers */
7099 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7100 if (!temp_buffer)
7101 goto out_free_cpumask;
7102
939c7a4f
YY
7103 if (trace_create_savedcmd() < 0)
7104 goto out_free_temp_buffer;
7105
9e01c1b7 7106 /* TODO: make the number of buffers hot pluggable with CPUS */
737223fb 7107 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
3928a8a2
SR
7108 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7109 WARN_ON(1);
939c7a4f 7110 goto out_free_savedcmd;
4c11d7ae 7111 }
a7603ff4 7112
499e5470
SR
7113 if (global_trace.buffer_disabled)
7114 tracing_off();
4c11d7ae 7115
e1e232ca
SR
7116 if (trace_boot_clock) {
7117 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7118 if (ret < 0)
7119 pr_warning("Trace clock %s not defined, going back to default\n",
7120 trace_boot_clock);
7121 }
7122
ca164318
SRRH
7123 /*
7124 * register_tracer() might reference current_trace, so it
7125 * needs to be set before we register anything. This is
7126 * just a bootstrap of current_trace anyway.
7127 */
2b6080f2
SR
7128 global_trace.current_trace = &nop_trace;
7129
0b9b12c1
SRRH
7130 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7131
4104d326
SRRH
7132 ftrace_init_global_array_ops(&global_trace);
7133
ca164318
SRRH
7134 register_tracer(&nop_trace);
7135
60a11774
SR
7136 /* All seems OK, enable tracing */
7137 tracing_disabled = 0;
3928a8a2 7138
3f5a54e3
SR
7139 atomic_notifier_chain_register(&panic_notifier_list,
7140 &trace_panic_notifier);
7141
7142 register_die_notifier(&trace_die_notifier);
2fc1dfbe 7143
ae63b31e
SR
7144 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7145
7146 INIT_LIST_HEAD(&global_trace.systems);
7147 INIT_LIST_HEAD(&global_trace.events);
7148 list_add(&global_trace.list, &ftrace_trace_arrays);
7149
7bcfaf54
SR
7150 while (trace_boot_options) {
7151 char *option;
7152
7153 option = strsep(&trace_boot_options, ",");
2b6080f2 7154 trace_set_options(&global_trace, option);
7bcfaf54
SR
7155 }
7156
77fd5c15
SRRH
7157 register_snapshot_cmd();
7158
2fc1dfbe 7159 return 0;
3f5a54e3 7160
939c7a4f
YY
7161out_free_savedcmd:
7162 free_saved_cmdlines_buffer(savedcmd);
2c4a33ab
SRRH
7163out_free_temp_buffer:
7164 ring_buffer_free(temp_buffer);
9e01c1b7 7165out_free_cpumask:
ccfe9e42 7166 free_cpumask_var(global_trace.tracing_cpumask);
9e01c1b7
RR
7167out_free_buffer_mask:
7168 free_cpumask_var(tracing_buffer_mask);
7169out:
7170 return ret;
bc0c38d1 7171}
b2821ae6 7172
5f893b26
SRRH
7173void __init trace_init(void)
7174{
0daa2302
SRRH
7175 if (tracepoint_printk) {
7176 tracepoint_print_iter =
7177 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7178 if (WARN_ON(!tracepoint_print_iter))
7179 tracepoint_printk = 0;
7180 }
5f893b26 7181 tracer_alloc_buffers();
0c564a53 7182 trace_event_init();
5f893b26
SRRH
7183}
7184
b2821ae6
SR
7185__init static int clear_boot_tracer(void)
7186{
7187 /*
7188 * The default tracer at boot buffer is an init section.
7189 * This function is called in lateinit. If we did not
7190 * find the boot tracer, then clear it out, to prevent
7191 * later registration from accessing the buffer that is
7192 * about to be freed.
7193 */
7194 if (!default_bootup_tracer)
7195 return 0;
7196
7197 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7198 default_bootup_tracer);
7199 default_bootup_tracer = NULL;
7200
7201 return 0;
7202}
7203
8434dc93 7204fs_initcall(tracer_init_tracefs);
b2821ae6 7205late_initcall(clear_boot_tracer);